FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c
1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 */
36
37 /*
38 * External virtual filesystem routines
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD: releng/6.2/sys/kern/vfs_subr.c 164870 2006-12-04 08:47:53Z pjd $");
43
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/dirent.h>
53 #include <sys/event.h>
54 #include <sys/eventhandler.h>
55 #include <sys/extattr.h>
56 #include <sys/file.h>
57 #include <sys/fcntl.h>
58 #include <sys/kdb.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/mac.h>
62 #include <sys/malloc.h>
63 #include <sys/mount.h>
64 #include <sys/namei.h>
65 #include <sys/reboot.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/stat.h>
68 #include <sys/sysctl.h>
69 #include <sys/syslog.h>
70 #include <sys/vmmeter.h>
71 #include <sys/vnode.h>
72
73 #include <machine/stdarg.h>
74
75 #include <vm/vm.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_extern.h>
78 #include <vm/pmap.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_page.h>
81 #include <vm/vm_kern.h>
82 #include <vm/uma.h>
83
84 #ifdef DDB
85 #include <ddb/ddb.h>
86 #endif
87
88 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
89
90 static void delmntque(struct vnode *vp);
91 static void insmntque(struct vnode *vp, struct mount *mp);
92 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
93 int slpflag, int slptimeo);
94 static void syncer_shutdown(void *arg, int howto);
95 static int vtryrecycle(struct vnode *vp);
96 static void vbusy(struct vnode *vp);
97 static void vdropl(struct vnode *vp);
98 static void vinactive(struct vnode *, struct thread *);
99 static void v_incr_usecount(struct vnode *);
100 static void v_decr_usecount(struct vnode *);
101 static void v_decr_useonly(struct vnode *);
102 static void v_upgrade_usecount(struct vnode *);
103 static void vfree(struct vnode *);
104 static void vnlru_free(int);
105 static void vdestroy(struct vnode *);
106 static void vgonel(struct vnode *);
107 static void vfs_knllock(void *arg);
108 static void vfs_knlunlock(void *arg);
109 static int vfs_knllocked(void *arg);
110
111
112 /*
113 * Enable Giant pushdown based on whether or not the vm is mpsafe in this
114 * build. Without mpsafevm the buffer cache can not run Giant free.
115 */
116 #if defined(__alpha__) || defined(__amd64__) || defined(__i386__) || \
117 defined(__sparc64__)
118 int mpsafe_vfs = 1;
119 #else
120 int mpsafe_vfs;
121 #endif
122 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs);
123 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0,
124 "MPSAFE VFS");
125
126 /*
127 * Number of vnodes in existence. Increased whenever getnewvnode()
128 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
129 * vnode.
130 */
131 static unsigned long numvnodes;
132
133 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
134
135 /*
136 * Conversion tables for conversion from vnode types to inode formats
137 * and back.
138 */
139 enum vtype iftovt_tab[16] = {
140 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[10] = {
144 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
146 };
147
148 /*
149 * List of vnodes that are ready for recycling.
150 */
151 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
152
153 /*
154 * Free vnode target. Free vnodes may simply be files which have been stat'd
155 * but not read. This is somewhat common, and a small cache of such files
156 * should be kept to avoid recreation costs.
157 */
158 static u_long wantfreevnodes;
159 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
160 /* Number of vnodes in the free list. */
161 static u_long freevnodes;
162 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
163
164 /*
165 * Various variables used for debugging the new implementation of
166 * reassignbuf().
167 * XXX these are probably of (very) limited utility now.
168 */
169 static int reassignbufcalls;
170 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
171
172 /*
173 * Cache for the mount type id assigned to NFS. This is used for
174 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
175 */
176 int nfs_mount_type = -1;
177
178 /* To keep more than one thread at a time from running vfs_getnewfsid */
179 static struct mtx mntid_mtx;
180
181 /*
182 * Lock for any access to the following:
183 * vnode_free_list
184 * numvnodes
185 * freevnodes
186 */
187 static struct mtx vnode_free_list_mtx;
188
189 /* Publicly exported FS */
190 struct nfs_public nfs_pub;
191
192 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
193 static uma_zone_t vnode_zone;
194 static uma_zone_t vnodepoll_zone;
195
196 /* Set to 1 to print out reclaim of active vnodes */
197 int prtactive;
198
199 /*
200 * The workitem queue.
201 *
202 * It is useful to delay writes of file data and filesystem metadata
203 * for tens of seconds so that quickly created and deleted files need
204 * not waste disk bandwidth being created and removed. To realize this,
205 * we append vnodes to a "workitem" queue. When running with a soft
206 * updates implementation, most pending metadata dependencies should
207 * not wait for more than a few seconds. Thus, mounted on block devices
208 * are delayed only about a half the time that file data is delayed.
209 * Similarly, directory updates are more critical, so are only delayed
210 * about a third the time that file data is delayed. Thus, there are
211 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
212 * one each second (driven off the filesystem syncer process). The
213 * syncer_delayno variable indicates the next queue that is to be processed.
214 * Items that need to be processed soon are placed in this queue:
215 *
216 * syncer_workitem_pending[syncer_delayno]
217 *
218 * A delay of fifteen seconds is done by placing the request fifteen
219 * entries later in the queue:
220 *
221 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
222 *
223 */
224 static int syncer_delayno;
225 static long syncer_mask;
226 LIST_HEAD(synclist, bufobj);
227 static struct synclist *syncer_workitem_pending;
228 /*
229 * The sync_mtx protects:
230 * bo->bo_synclist
231 * sync_vnode_count
232 * syncer_delayno
233 * syncer_state
234 * syncer_workitem_pending
235 * syncer_worklist_len
236 * rushjob
237 */
238 static struct mtx sync_mtx;
239
240 #define SYNCER_MAXDELAY 32
241 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
242 static int syncdelay = 30; /* max time to delay syncing data */
243 static int filedelay = 30; /* time to delay syncing files */
244 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
245 static int dirdelay = 29; /* time to delay syncing directories */
246 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
247 static int metadelay = 28; /* time to delay syncing metadata */
248 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
249 static int rushjob; /* number of slots to run ASAP */
250 static int stat_rush_requests; /* number of times I/O speeded up */
251 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
252
253 /*
254 * When shutting down the syncer, run it at four times normal speed.
255 */
256 #define SYNCER_SHUTDOWN_SPEEDUP 4
257 static int sync_vnode_count;
258 static int syncer_worklist_len;
259 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
260 syncer_state;
261
262 /*
263 * Number of vnodes we want to exist at any one time. This is mostly used
264 * to size hash tables in vnode-related code. It is normally not used in
265 * getnewvnode(), as wantfreevnodes is normally nonzero.)
266 *
267 * XXX desiredvnodes is historical cruft and should not exist.
268 */
269 int desiredvnodes;
270 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
271 &desiredvnodes, 0, "Maximum number of vnodes");
272 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
273 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
274 static int vnlru_nowhere;
275 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
276 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
277
278 /*
279 * Macros to control when a vnode is freed and recycled. All require
280 * the vnode interlock.
281 */
282 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
283 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
284 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
285
286
287 /*
288 * Initialize the vnode management data structures.
289 */
290 #ifndef MAXVNODES_MAX
291 #define MAXVNODES_MAX 100000
292 #endif
293 static void
294 vntblinit(void *dummy __unused)
295 {
296
297 /*
298 * Desiredvnodes is a function of the physical memory size and
299 * the kernel's heap size. Specifically, desiredvnodes scales
300 * in proportion to the physical memory size until two fifths
301 * of the kernel's heap size is consumed by vnodes and vm
302 * objects.
303 */
304 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
305 (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
306 if (desiredvnodes > MAXVNODES_MAX) {
307 if (bootverbose)
308 printf("Reducing kern.maxvnodes %d -> %d\n",
309 desiredvnodes, MAXVNODES_MAX);
310 desiredvnodes = MAXVNODES_MAX;
311 }
312 wantfreevnodes = desiredvnodes / 4;
313 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
314 TAILQ_INIT(&vnode_free_list);
315 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
316 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
317 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
318 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
319 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
320 /*
321 * Initialize the filesystem syncer.
322 */
323 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
324 &syncer_mask);
325 syncer_maxdelay = syncer_mask + 1;
326 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
327 }
328 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
329
330
331 /*
332 * Mark a mount point as busy. Used to synchronize access and to delay
333 * unmounting. Interlock is not released on failure.
334 */
335 int
336 vfs_busy(mp, flags, interlkp, td)
337 struct mount *mp;
338 int flags;
339 struct mtx *interlkp;
340 struct thread *td;
341 {
342 int lkflags;
343
344 MNT_ILOCK(mp);
345 MNT_REF(mp);
346 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
347 if (flags & LK_NOWAIT) {
348 MNT_REL(mp);
349 MNT_IUNLOCK(mp);
350 return (ENOENT);
351 }
352 if (interlkp)
353 mtx_unlock(interlkp);
354 mp->mnt_kern_flag |= MNTK_MWAIT;
355 /*
356 * Since all busy locks are shared except the exclusive
357 * lock granted when unmounting, the only place that a
358 * wakeup needs to be done is at the release of the
359 * exclusive lock at the end of dounmount.
360 */
361 msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
362 MNT_REL(mp);
363 MNT_IUNLOCK(mp);
364 if (interlkp)
365 mtx_lock(interlkp);
366 return (ENOENT);
367 }
368 if (interlkp)
369 mtx_unlock(interlkp);
370 lkflags = LK_SHARED | LK_INTERLOCK;
371 if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
372 panic("vfs_busy: unexpected lock failure");
373 return (0);
374 }
375
376 /*
377 * Free a busy filesystem.
378 */
379 void
380 vfs_unbusy(mp, td)
381 struct mount *mp;
382 struct thread *td;
383 {
384
385 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
386 vfs_rel(mp);
387 }
388
389 /*
390 * Lookup a mount point by filesystem identifier.
391 */
392 struct mount *
393 vfs_getvfs(fsid)
394 fsid_t *fsid;
395 {
396 struct mount *mp;
397
398 mtx_lock(&mountlist_mtx);
399 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
400 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
401 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
402 vfs_ref(mp);
403 mtx_unlock(&mountlist_mtx);
404 return (mp);
405 }
406 }
407 mtx_unlock(&mountlist_mtx);
408 return ((struct mount *) 0);
409 }
410
411 /*
412 * Check if a user can access priveledged mount options.
413 */
414 int
415 vfs_suser(struct mount *mp, struct thread *td)
416 {
417 int error;
418
419 if ((mp->mnt_flag & MNT_USER) == 0 ||
420 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
421 if ((error = suser(td)) != 0)
422 return (error);
423 }
424 return (0);
425 }
426
427 /*
428 * Get a new unique fsid. Try to make its val[0] unique, since this value
429 * will be used to create fake device numbers for stat(). Also try (but
430 * not so hard) make its val[0] unique mod 2^16, since some emulators only
431 * support 16-bit device numbers. We end up with unique val[0]'s for the
432 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
433 *
434 * Keep in mind that several mounts may be running in parallel. Starting
435 * the search one past where the previous search terminated is both a
436 * micro-optimization and a defense against returning the same fsid to
437 * different mounts.
438 */
439 void
440 vfs_getnewfsid(mp)
441 struct mount *mp;
442 {
443 static u_int16_t mntid_base;
444 struct mount *nmp;
445 fsid_t tfsid;
446 int mtype;
447
448 mtx_lock(&mntid_mtx);
449 mtype = mp->mnt_vfc->vfc_typenum;
450 tfsid.val[1] = mtype;
451 mtype = (mtype & 0xFF) << 24;
452 for (;;) {
453 tfsid.val[0] = makedev(255,
454 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
455 mntid_base++;
456 if ((nmp = vfs_getvfs(&tfsid)) == NULL)
457 break;
458 vfs_rel(nmp);
459 }
460 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
461 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
462 mtx_unlock(&mntid_mtx);
463 }
464
465 /*
466 * Knob to control the precision of file timestamps:
467 *
468 * 0 = seconds only; nanoseconds zeroed.
469 * 1 = seconds and nanoseconds, accurate within 1/HZ.
470 * 2 = seconds and nanoseconds, truncated to microseconds.
471 * >=3 = seconds and nanoseconds, maximum precision.
472 */
473 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
474
475 static int timestamp_precision = TSP_SEC;
476 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
477 ×tamp_precision, 0, "");
478
479 /*
480 * Get a current timestamp.
481 */
482 void
483 vfs_timestamp(tsp)
484 struct timespec *tsp;
485 {
486 struct timeval tv;
487
488 switch (timestamp_precision) {
489 case TSP_SEC:
490 tsp->tv_sec = time_second;
491 tsp->tv_nsec = 0;
492 break;
493 case TSP_HZ:
494 getnanotime(tsp);
495 break;
496 case TSP_USEC:
497 microtime(&tv);
498 TIMEVAL_TO_TIMESPEC(&tv, tsp);
499 break;
500 case TSP_NSEC:
501 default:
502 nanotime(tsp);
503 break;
504 }
505 }
506
507 /*
508 * Set vnode attributes to VNOVAL
509 */
510 void
511 vattr_null(vap)
512 struct vattr *vap;
513 {
514
515 vap->va_type = VNON;
516 vap->va_size = VNOVAL;
517 vap->va_bytes = VNOVAL;
518 vap->va_mode = VNOVAL;
519 vap->va_nlink = VNOVAL;
520 vap->va_uid = VNOVAL;
521 vap->va_gid = VNOVAL;
522 vap->va_fsid = VNOVAL;
523 vap->va_fileid = VNOVAL;
524 vap->va_blocksize = VNOVAL;
525 vap->va_rdev = VNOVAL;
526 vap->va_atime.tv_sec = VNOVAL;
527 vap->va_atime.tv_nsec = VNOVAL;
528 vap->va_mtime.tv_sec = VNOVAL;
529 vap->va_mtime.tv_nsec = VNOVAL;
530 vap->va_ctime.tv_sec = VNOVAL;
531 vap->va_ctime.tv_nsec = VNOVAL;
532 vap->va_birthtime.tv_sec = VNOVAL;
533 vap->va_birthtime.tv_nsec = VNOVAL;
534 vap->va_flags = VNOVAL;
535 vap->va_gen = VNOVAL;
536 vap->va_vaflags = 0;
537 }
538
539 /*
540 * This routine is called when we have too many vnodes. It attempts
541 * to free <count> vnodes and will potentially free vnodes that still
542 * have VM backing store (VM backing store is typically the cause
543 * of a vnode blowout so we want to do this). Therefore, this operation
544 * is not considered cheap.
545 *
546 * A number of conditions may prevent a vnode from being reclaimed.
547 * the buffer cache may have references on the vnode, a directory
548 * vnode may still have references due to the namei cache representing
549 * underlying files, or the vnode may be in active use. It is not
550 * desireable to reuse such vnodes. These conditions may cause the
551 * number of vnodes to reach some minimum value regardless of what
552 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
553 */
554 static int
555 vlrureclaim(struct mount *mp)
556 {
557 struct thread *td;
558 struct vnode *vp;
559 int done;
560 int trigger;
561 int usevnodes;
562 int count;
563
564 /*
565 * Calculate the trigger point, don't allow user
566 * screwups to blow us up. This prevents us from
567 * recycling vnodes with lots of resident pages. We
568 * aren't trying to free memory, we are trying to
569 * free vnodes.
570 */
571 usevnodes = desiredvnodes;
572 if (usevnodes <= 0)
573 usevnodes = 1;
574 trigger = cnt.v_page_count * 2 / usevnodes;
575 done = 0;
576 td = curthread;
577 vn_start_write(NULL, &mp, V_WAIT);
578 MNT_ILOCK(mp);
579 count = mp->mnt_nvnodelistsize / 10 + 1;
580 while (count != 0) {
581 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
582 while (vp != NULL && vp->v_type == VMARKER)
583 vp = TAILQ_NEXT(vp, v_nmntvnodes);
584 if (vp == NULL)
585 break;
586 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
587 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
588 --count;
589 if (!VI_TRYLOCK(vp))
590 goto next_iter;
591 /*
592 * If it's been deconstructed already, it's still
593 * referenced, or it exceeds the trigger, skip it.
594 */
595 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
596 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
597 vp->v_object->resident_page_count > trigger)) {
598 VI_UNLOCK(vp);
599 goto next_iter;
600 }
601 MNT_IUNLOCK(mp);
602 vholdl(vp);
603 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT, td)) {
604 vdrop(vp);
605 goto next_iter_mntunlocked;
606 }
607 VI_LOCK(vp);
608 /*
609 * v_usecount may have been bumped after VOP_LOCK() dropped
610 * the vnode interlock and before it was locked again.
611 *
612 * It is not necessary to recheck VI_DOOMED because it can
613 * only be set by another thread that holds both the vnode
614 * lock and vnode interlock. If another thread has the
615 * vnode lock before we get to VOP_LOCK() and obtains the
616 * vnode interlock after VOP_LOCK() drops the vnode
617 * interlock, the other thread will be unable to drop the
618 * vnode lock before our VOP_LOCK() call fails.
619 */
620 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
621 (vp->v_object != NULL &&
622 vp->v_object->resident_page_count > trigger)) {
623 VOP_UNLOCK(vp, LK_INTERLOCK, td);
624 goto next_iter_mntunlocked;
625 }
626 KASSERT((vp->v_iflag & VI_DOOMED) == 0,
627 ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
628 vgonel(vp);
629 VOP_UNLOCK(vp, 0, td);
630 vdropl(vp);
631 done++;
632 next_iter_mntunlocked:
633 if ((count % 256) != 0)
634 goto relock_mnt;
635 goto yield;
636 next_iter:
637 if ((count % 256) != 0)
638 continue;
639 MNT_IUNLOCK(mp);
640 yield:
641 uio_yield();
642 relock_mnt:
643 MNT_ILOCK(mp);
644 }
645 MNT_IUNLOCK(mp);
646 vn_finished_write(mp);
647 return done;
648 }
649
650 /*
651 * Attempt to keep the free list at wantfreevnodes length.
652 */
653 static void
654 vnlru_free(int count)
655 {
656 struct vnode *vp;
657 int vfslocked;
658
659 mtx_assert(&vnode_free_list_mtx, MA_OWNED);
660 for (; count > 0; count--) {
661 vp = TAILQ_FIRST(&vnode_free_list);
662 /*
663 * The list can be modified while the free_list_mtx
664 * has been dropped and vp could be NULL here.
665 */
666 if (!vp)
667 break;
668 VNASSERT(vp->v_op != NULL, vp,
669 ("vnlru_free: vnode already reclaimed."));
670 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
671 /*
672 * Don't recycle if we can't get the interlock.
673 */
674 if (!VI_TRYLOCK(vp)) {
675 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
676 continue;
677 }
678 VNASSERT(VCANRECYCLE(vp), vp,
679 ("vp inconsistent on freelist"));
680 freevnodes--;
681 vp->v_iflag &= ~VI_FREE;
682 vholdl(vp);
683 mtx_unlock(&vnode_free_list_mtx);
684 VI_UNLOCK(vp);
685 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
686 vtryrecycle(vp);
687 VFS_UNLOCK_GIANT(vfslocked);
688 /*
689 * If the recycled succeeded this vdrop will actually free
690 * the vnode. If not it will simply place it back on
691 * the free list.
692 */
693 vdrop(vp);
694 mtx_lock(&vnode_free_list_mtx);
695 }
696 }
697 /*
698 * Attempt to recycle vnodes in a context that is always safe to block.
699 * Calling vlrurecycle() from the bowels of filesystem code has some
700 * interesting deadlock problems.
701 */
702 static struct proc *vnlruproc;
703 static int vnlruproc_sig;
704
705 static void
706 vnlru_proc(void)
707 {
708 struct mount *mp, *nmp;
709 int done;
710 struct proc *p = vnlruproc;
711 struct thread *td = FIRST_THREAD_IN_PROC(p);
712
713 mtx_lock(&Giant);
714
715 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
716 SHUTDOWN_PRI_FIRST);
717
718 for (;;) {
719 kthread_suspend_check(p);
720 mtx_lock(&vnode_free_list_mtx);
721 if (freevnodes > wantfreevnodes)
722 vnlru_free(freevnodes - wantfreevnodes);
723 if (numvnodes <= desiredvnodes * 9 / 10) {
724 vnlruproc_sig = 0;
725 wakeup(&vnlruproc_sig);
726 msleep(vnlruproc, &vnode_free_list_mtx,
727 PVFS|PDROP, "vlruwt", hz);
728 continue;
729 }
730 mtx_unlock(&vnode_free_list_mtx);
731 done = 0;
732 mtx_lock(&mountlist_mtx);
733 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
734 int vfsunlocked;
735 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
736 nmp = TAILQ_NEXT(mp, mnt_list);
737 continue;
738 }
739 if (!VFS_NEEDSGIANT(mp)) {
740 mtx_unlock(&Giant);
741 vfsunlocked = 1;
742 } else
743 vfsunlocked = 0;
744 done += vlrureclaim(mp);
745 if (vfsunlocked)
746 mtx_lock(&Giant);
747 mtx_lock(&mountlist_mtx);
748 nmp = TAILQ_NEXT(mp, mnt_list);
749 vfs_unbusy(mp, td);
750 }
751 mtx_unlock(&mountlist_mtx);
752 if (done == 0) {
753 #if 0
754 /* These messages are temporary debugging aids */
755 if (vnlru_nowhere < 5)
756 printf("vnlru process getting nowhere..\n");
757 else if (vnlru_nowhere == 5)
758 printf("vnlru process messages stopped.\n");
759 #endif
760 vnlru_nowhere++;
761 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
762 } else
763 uio_yield();
764 }
765 }
766
767 static struct kproc_desc vnlru_kp = {
768 "vnlru",
769 vnlru_proc,
770 &vnlruproc
771 };
772 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
773
774 /*
775 * Routines having to do with the management of the vnode table.
776 */
777
778 static void
779 vdestroy(struct vnode *vp)
780 {
781 struct bufobj *bo;
782
783 CTR1(KTR_VFS, "vdestroy vp %p", vp);
784 mtx_lock(&vnode_free_list_mtx);
785 numvnodes--;
786 mtx_unlock(&vnode_free_list_mtx);
787 bo = &vp->v_bufobj;
788 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
789 ("cleaned vnode still on the free list."));
790 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
791 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
792 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
793 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
794 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
795 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
796 VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
797 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
798 VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
799 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
800 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
801 VI_UNLOCK(vp);
802 #ifdef MAC
803 mac_destroy_vnode(vp);
804 #endif
805 if (vp->v_pollinfo != NULL) {
806 knlist_destroy(&vp->v_pollinfo->vpi_selinfo.si_note);
807 mtx_destroy(&vp->v_pollinfo->vpi_lock);
808 uma_zfree(vnodepoll_zone, vp->v_pollinfo);
809 }
810 #ifdef INVARIANTS
811 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
812 vp->v_op = NULL;
813 #endif
814 lockdestroy(vp->v_vnlock);
815 mtx_destroy(&vp->v_interlock);
816 uma_zfree(vnode_zone, vp);
817 }
818
819 /*
820 * Try to recycle a freed vnode. We abort if anyone picks up a reference
821 * before we actually vgone(). This function must be called with the vnode
822 * held to prevent the vnode from being returned to the free list midway
823 * through vgone().
824 */
825 static int
826 vtryrecycle(struct vnode *vp)
827 {
828 struct thread *td = curthread;
829 struct mount *vnmp;
830
831 CTR1(KTR_VFS, "vtryrecycle: trying vp %p", vp);
832 VNASSERT(vp->v_holdcnt, vp,
833 ("vtryrecycle: Recycling vp %p without a reference.", vp));
834 /*
835 * This vnode may found and locked via some other list, if so we
836 * can't recycle it yet.
837 */
838 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
839 return (EWOULDBLOCK);
840 /*
841 * Don't recycle if its filesystem is being suspended.
842 */
843 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
844 VOP_UNLOCK(vp, 0, td);
845 return (EBUSY);
846 }
847 /*
848 * If we got this far, we need to acquire the interlock and see if
849 * anyone picked up this vnode from another list. If not, we will
850 * mark it with DOOMED via vgonel() so that anyone who does find it
851 * will skip over it.
852 */
853 VI_LOCK(vp);
854 if (vp->v_usecount) {
855 VOP_UNLOCK(vp, LK_INTERLOCK, td);
856 vn_finished_write(vnmp);
857 return (EBUSY);
858 }
859 if ((vp->v_iflag & VI_DOOMED) == 0)
860 vgonel(vp);
861 VOP_UNLOCK(vp, LK_INTERLOCK, td);
862 vn_finished_write(vnmp);
863 CTR1(KTR_VFS, "vtryrecycle: recycled vp %p", vp);
864 return (0);
865 }
866
867 /*
868 * Return the next vnode from the free list.
869 */
870 int
871 getnewvnode(tag, mp, vops, vpp)
872 const char *tag;
873 struct mount *mp;
874 struct vop_vector *vops;
875 struct vnode **vpp;
876 {
877 struct vnode *vp = NULL;
878 struct bufobj *bo;
879
880 mtx_lock(&vnode_free_list_mtx);
881 /*
882 * Lend our context to reclaim vnodes if they've exceeded the max.
883 */
884 if (freevnodes > wantfreevnodes)
885 vnlru_free(1);
886 /*
887 * Wait for available vnodes.
888 */
889 if (numvnodes > desiredvnodes) {
890 if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
891 /*
892 * File system is beeing suspended, we cannot risk a
893 * deadlock here, so allocate new vnode anyway.
894 */
895 if (freevnodes > wantfreevnodes)
896 vnlru_free(freevnodes - wantfreevnodes);
897 goto alloc;
898 }
899 if (vnlruproc_sig == 0) {
900 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
901 wakeup(vnlruproc);
902 }
903 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
904 "vlruwk", hz);
905 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
906 if (numvnodes > desiredvnodes) {
907 mtx_unlock(&vnode_free_list_mtx);
908 return (ENFILE);
909 }
910 #endif
911 }
912 alloc:
913 numvnodes++;
914 mtx_unlock(&vnode_free_list_mtx);
915 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
916 /*
917 * Setup locks.
918 */
919 vp->v_vnlock = &vp->v_lock;
920 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
921 /*
922 * By default, don't allow shared locks unless filesystems
923 * opt-in.
924 */
925 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
926 /*
927 * Initialize bufobj.
928 */
929 bo = &vp->v_bufobj;
930 bo->__bo_vnode = vp;
931 bo->bo_mtx = &vp->v_interlock;
932 bo->bo_ops = &buf_ops_bio;
933 bo->bo_private = vp;
934 TAILQ_INIT(&bo->bo_clean.bv_hd);
935 TAILQ_INIT(&bo->bo_dirty.bv_hd);
936 /*
937 * Initialize namecache.
938 */
939 LIST_INIT(&vp->v_cache_src);
940 TAILQ_INIT(&vp->v_cache_dst);
941 /*
942 * Finalize various vnode identity bits.
943 */
944 vp->v_type = VNON;
945 vp->v_tag = tag;
946 vp->v_op = vops;
947 v_incr_usecount(vp);
948 vp->v_data = 0;
949 #ifdef MAC
950 mac_init_vnode(vp);
951 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
952 mac_associate_vnode_singlelabel(mp, vp);
953 else if (mp == NULL)
954 printf("NULL mp in getnewvnode()\n");
955 #endif
956 if (mp != NULL) {
957 insmntque(vp, mp);
958 bo->bo_bsize = mp->mnt_stat.f_iosize;
959 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
960 vp->v_vflag |= VV_NOKNOTE;
961 }
962
963 CTR2(KTR_VFS, "getnewvnode: mp %p vp %p", mp, vp);
964 *vpp = vp;
965 return (0);
966 }
967
968 /*
969 * Delete from old mount point vnode list, if on one.
970 */
971 static void
972 delmntque(struct vnode *vp)
973 {
974 struct mount *mp;
975
976 mp = vp->v_mount;
977 if (mp == NULL)
978 return;
979 MNT_ILOCK(mp);
980 vp->v_mount = NULL;
981 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
982 ("bad mount point vnode list size"));
983 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
984 mp->mnt_nvnodelistsize--;
985 MNT_REL(mp);
986 MNT_IUNLOCK(mp);
987 }
988
989 /*
990 * Insert into list of vnodes for the new mount point, if available.
991 */
992 static void
993 insmntque(struct vnode *vp, struct mount *mp)
994 {
995
996 vp->v_mount = mp;
997 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
998 MNT_ILOCK(mp);
999 MNT_REF(mp);
1000 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1001 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1002 ("neg mount point vnode list size"));
1003 mp->mnt_nvnodelistsize++;
1004 MNT_IUNLOCK(mp);
1005 }
1006
1007 /*
1008 * Flush out and invalidate all buffers associated with a bufobj
1009 * Called with the underlying object locked.
1010 */
1011 int
1012 bufobj_invalbuf(struct bufobj *bo, int flags, struct thread *td, int slpflag, int slptimeo)
1013 {
1014 int error;
1015
1016 BO_LOCK(bo);
1017 if (flags & V_SAVE) {
1018 error = bufobj_wwait(bo, slpflag, slptimeo);
1019 if (error) {
1020 BO_UNLOCK(bo);
1021 return (error);
1022 }
1023 if (bo->bo_dirty.bv_cnt > 0) {
1024 BO_UNLOCK(bo);
1025 if ((error = BO_SYNC(bo, MNT_WAIT, td)) != 0)
1026 return (error);
1027 /*
1028 * XXX We could save a lock/unlock if this was only
1029 * enabled under INVARIANTS
1030 */
1031 BO_LOCK(bo);
1032 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1033 panic("vinvalbuf: dirty bufs");
1034 }
1035 }
1036 /*
1037 * If you alter this loop please notice that interlock is dropped and
1038 * reacquired in flushbuflist. Special care is needed to ensure that
1039 * no race conditions occur from this.
1040 */
1041 do {
1042 error = flushbuflist(&bo->bo_clean,
1043 flags, bo, slpflag, slptimeo);
1044 if (error == 0)
1045 error = flushbuflist(&bo->bo_dirty,
1046 flags, bo, slpflag, slptimeo);
1047 if (error != 0 && error != EAGAIN) {
1048 BO_UNLOCK(bo);
1049 return (error);
1050 }
1051 } while (error != 0);
1052
1053 /*
1054 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1055 * have write I/O in-progress but if there is a VM object then the
1056 * VM object can also have read-I/O in-progress.
1057 */
1058 do {
1059 bufobj_wwait(bo, 0, 0);
1060 BO_UNLOCK(bo);
1061 if (bo->bo_object != NULL) {
1062 VM_OBJECT_LOCK(bo->bo_object);
1063 vm_object_pip_wait(bo->bo_object, "bovlbx");
1064 VM_OBJECT_UNLOCK(bo->bo_object);
1065 }
1066 BO_LOCK(bo);
1067 } while (bo->bo_numoutput > 0);
1068 BO_UNLOCK(bo);
1069
1070 /*
1071 * Destroy the copy in the VM cache, too.
1072 */
1073 if (bo->bo_object != NULL) {
1074 VM_OBJECT_LOCK(bo->bo_object);
1075 vm_object_page_remove(bo->bo_object, 0, 0,
1076 (flags & V_SAVE) ? TRUE : FALSE);
1077 VM_OBJECT_UNLOCK(bo->bo_object);
1078 }
1079
1080 #ifdef INVARIANTS
1081 BO_LOCK(bo);
1082 if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1083 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1084 panic("vinvalbuf: flush failed");
1085 BO_UNLOCK(bo);
1086 #endif
1087 return (0);
1088 }
1089
1090 /*
1091 * Flush out and invalidate all buffers associated with a vnode.
1092 * Called with the underlying object locked.
1093 */
1094 int
1095 vinvalbuf(struct vnode *vp, int flags, struct thread *td, int slpflag, int slptimeo)
1096 {
1097
1098 CTR2(KTR_VFS, "vinvalbuf vp %p flags %d", vp, flags);
1099 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1100 return (bufobj_invalbuf(&vp->v_bufobj, flags, td, slpflag, slptimeo));
1101 }
1102
1103 /*
1104 * Flush out buffers on the specified list.
1105 *
1106 */
1107 static int
1108 flushbuflist(bufv, flags, bo, slpflag, slptimeo)
1109 struct bufv *bufv;
1110 int flags;
1111 struct bufobj *bo;
1112 int slpflag, slptimeo;
1113 {
1114 struct buf *bp, *nbp;
1115 int retval, error;
1116 daddr_t lblkno;
1117 b_xflags_t xflags;
1118
1119 ASSERT_BO_LOCKED(bo);
1120
1121 retval = 0;
1122 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1123 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1124 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1125 continue;
1126 }
1127 lblkno = 0;
1128 xflags = 0;
1129 if (nbp != NULL) {
1130 lblkno = nbp->b_lblkno;
1131 xflags = nbp->b_xflags &
1132 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
1133 }
1134 retval = EAGAIN;
1135 error = BUF_TIMELOCK(bp,
1136 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1137 "flushbuf", slpflag, slptimeo);
1138 if (error) {
1139 BO_LOCK(bo);
1140 return (error != ENOLCK ? error : EAGAIN);
1141 }
1142 KASSERT(bp->b_bufobj == bo,
1143 ("bp %p wrong b_bufobj %p should be %p",
1144 bp, bp->b_bufobj, bo));
1145 if (bp->b_bufobj != bo) { /* XXX: necessary ? */
1146 BUF_UNLOCK(bp);
1147 BO_LOCK(bo);
1148 return (EAGAIN);
1149 }
1150 /*
1151 * XXX Since there are no node locks for NFS, I
1152 * believe there is a slight chance that a delayed
1153 * write will occur while sleeping just above, so
1154 * check for it.
1155 */
1156 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1157 (flags & V_SAVE)) {
1158 bremfree(bp);
1159 bp->b_flags |= B_ASYNC;
1160 bwrite(bp);
1161 BO_LOCK(bo);
1162 return (EAGAIN); /* XXX: why not loop ? */
1163 }
1164 bremfree(bp);
1165 bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1166 bp->b_flags &= ~B_ASYNC;
1167 brelse(bp);
1168 BO_LOCK(bo);
1169 if (nbp != NULL &&
1170 (nbp->b_bufobj != bo ||
1171 nbp->b_lblkno != lblkno ||
1172 (nbp->b_xflags &
1173 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1174 break; /* nbp invalid */
1175 }
1176 return (retval);
1177 }
1178
1179 /*
1180 * Truncate a file's buffer and pages to a specified length. This
1181 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1182 * sync activity.
1183 */
1184 int
1185 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, off_t length, int blksize)
1186 {
1187 struct buf *bp, *nbp;
1188 int anyfreed;
1189 int trunclbn;
1190 struct bufobj *bo;
1191
1192 CTR2(KTR_VFS, "vtruncbuf vp %p length %jd", vp, length);
1193 /*
1194 * Round up to the *next* lbn.
1195 */
1196 trunclbn = (length + blksize - 1) / blksize;
1197
1198 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1199 restart:
1200 VI_LOCK(vp);
1201 bo = &vp->v_bufobj;
1202 anyfreed = 1;
1203 for (;anyfreed;) {
1204 anyfreed = 0;
1205 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1206 if (bp->b_lblkno < trunclbn)
1207 continue;
1208 if (BUF_LOCK(bp,
1209 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1210 VI_MTX(vp)) == ENOLCK)
1211 goto restart;
1212
1213 bremfree(bp);
1214 bp->b_flags |= (B_INVAL | B_RELBUF);
1215 bp->b_flags &= ~B_ASYNC;
1216 brelse(bp);
1217 anyfreed = 1;
1218
1219 if (nbp != NULL &&
1220 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1221 (nbp->b_vp != vp) ||
1222 (nbp->b_flags & B_DELWRI))) {
1223 goto restart;
1224 }
1225 VI_LOCK(vp);
1226 }
1227
1228 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1229 if (bp->b_lblkno < trunclbn)
1230 continue;
1231 if (BUF_LOCK(bp,
1232 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1233 VI_MTX(vp)) == ENOLCK)
1234 goto restart;
1235 bremfree(bp);
1236 bp->b_flags |= (B_INVAL | B_RELBUF);
1237 bp->b_flags &= ~B_ASYNC;
1238 brelse(bp);
1239 anyfreed = 1;
1240 if (nbp != NULL &&
1241 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1242 (nbp->b_vp != vp) ||
1243 (nbp->b_flags & B_DELWRI) == 0)) {
1244 goto restart;
1245 }
1246 VI_LOCK(vp);
1247 }
1248 }
1249
1250 if (length > 0) {
1251 restartsync:
1252 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1253 if (bp->b_lblkno > 0)
1254 continue;
1255 /*
1256 * Since we hold the vnode lock this should only
1257 * fail if we're racing with the buf daemon.
1258 */
1259 if (BUF_LOCK(bp,
1260 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1261 VI_MTX(vp)) == ENOLCK) {
1262 goto restart;
1263 }
1264 VNASSERT((bp->b_flags & B_DELWRI), vp,
1265 ("buf(%p) on dirty queue without DELWRI", bp));
1266
1267 bremfree(bp);
1268 bawrite(bp);
1269 VI_LOCK(vp);
1270 goto restartsync;
1271 }
1272 }
1273
1274 bufobj_wwait(bo, 0, 0);
1275 VI_UNLOCK(vp);
1276 vnode_pager_setsize(vp, length);
1277
1278 return (0);
1279 }
1280
1281 /*
1282 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1283 * a vnode.
1284 *
1285 * NOTE: We have to deal with the special case of a background bitmap
1286 * buffer, a situation where two buffers will have the same logical
1287 * block offset. We want (1) only the foreground buffer to be accessed
1288 * in a lookup and (2) must differentiate between the foreground and
1289 * background buffer in the splay tree algorithm because the splay
1290 * tree cannot normally handle multiple entities with the same 'index'.
1291 * We accomplish this by adding differentiating flags to the splay tree's
1292 * numerical domain.
1293 */
1294 static
1295 struct buf *
1296 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1297 {
1298 struct buf dummy;
1299 struct buf *lefttreemax, *righttreemin, *y;
1300
1301 if (root == NULL)
1302 return (NULL);
1303 lefttreemax = righttreemin = &dummy;
1304 for (;;) {
1305 if (lblkno < root->b_lblkno ||
1306 (lblkno == root->b_lblkno &&
1307 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1308 if ((y = root->b_left) == NULL)
1309 break;
1310 if (lblkno < y->b_lblkno) {
1311 /* Rotate right. */
1312 root->b_left = y->b_right;
1313 y->b_right = root;
1314 root = y;
1315 if ((y = root->b_left) == NULL)
1316 break;
1317 }
1318 /* Link into the new root's right tree. */
1319 righttreemin->b_left = root;
1320 righttreemin = root;
1321 } else if (lblkno > root->b_lblkno ||
1322 (lblkno == root->b_lblkno &&
1323 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1324 if ((y = root->b_right) == NULL)
1325 break;
1326 if (lblkno > y->b_lblkno) {
1327 /* Rotate left. */
1328 root->b_right = y->b_left;
1329 y->b_left = root;
1330 root = y;
1331 if ((y = root->b_right) == NULL)
1332 break;
1333 }
1334 /* Link into the new root's left tree. */
1335 lefttreemax->b_right = root;
1336 lefttreemax = root;
1337 } else {
1338 break;
1339 }
1340 root = y;
1341 }
1342 /* Assemble the new root. */
1343 lefttreemax->b_right = root->b_left;
1344 righttreemin->b_left = root->b_right;
1345 root->b_left = dummy.b_right;
1346 root->b_right = dummy.b_left;
1347 return (root);
1348 }
1349
1350 static void
1351 buf_vlist_remove(struct buf *bp)
1352 {
1353 struct buf *root;
1354 struct bufv *bv;
1355
1356 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1357 ASSERT_BO_LOCKED(bp->b_bufobj);
1358 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1359 (BX_VNDIRTY|BX_VNCLEAN),
1360 ("buf_vlist_remove: Buf %p is on two lists", bp));
1361 if (bp->b_xflags & BX_VNDIRTY)
1362 bv = &bp->b_bufobj->bo_dirty;
1363 else
1364 bv = &bp->b_bufobj->bo_clean;
1365 if (bp != bv->bv_root) {
1366 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1367 KASSERT(root == bp, ("splay lookup failed in remove"));
1368 }
1369 if (bp->b_left == NULL) {
1370 root = bp->b_right;
1371 } else {
1372 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1373 root->b_right = bp->b_right;
1374 }
1375 bv->bv_root = root;
1376 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1377 bv->bv_cnt--;
1378 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1379 }
1380
1381 /*
1382 * Add the buffer to the sorted clean or dirty block list using a
1383 * splay tree algorithm.
1384 *
1385 * NOTE: xflags is passed as a constant, optimizing this inline function!
1386 */
1387 static void
1388 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1389 {
1390 struct buf *root;
1391 struct bufv *bv;
1392
1393 ASSERT_BO_LOCKED(bo);
1394 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1395 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1396 bp->b_xflags |= xflags;
1397 if (xflags & BX_VNDIRTY)
1398 bv = &bo->bo_dirty;
1399 else
1400 bv = &bo->bo_clean;
1401
1402 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1403 if (root == NULL) {
1404 bp->b_left = NULL;
1405 bp->b_right = NULL;
1406 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1407 } else if (bp->b_lblkno < root->b_lblkno ||
1408 (bp->b_lblkno == root->b_lblkno &&
1409 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1410 bp->b_left = root->b_left;
1411 bp->b_right = root;
1412 root->b_left = NULL;
1413 TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1414 } else {
1415 bp->b_right = root->b_right;
1416 bp->b_left = root;
1417 root->b_right = NULL;
1418 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1419 }
1420 bv->bv_cnt++;
1421 bv->bv_root = bp;
1422 }
1423
1424 /*
1425 * Lookup a buffer using the splay tree. Note that we specifically avoid
1426 * shadow buffers used in background bitmap writes.
1427 *
1428 * This code isn't quite efficient as it could be because we are maintaining
1429 * two sorted lists and do not know which list the block resides in.
1430 *
1431 * During a "make buildworld" the desired buffer is found at one of
1432 * the roots more than 60% of the time. Thus, checking both roots
1433 * before performing either splay eliminates unnecessary splays on the
1434 * first tree splayed.
1435 */
1436 struct buf *
1437 gbincore(struct bufobj *bo, daddr_t lblkno)
1438 {
1439 struct buf *bp;
1440
1441 ASSERT_BO_LOCKED(bo);
1442 if ((bp = bo->bo_clean.bv_root) != NULL &&
1443 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1444 return (bp);
1445 if ((bp = bo->bo_dirty.bv_root) != NULL &&
1446 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1447 return (bp);
1448 if ((bp = bo->bo_clean.bv_root) != NULL) {
1449 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1450 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1451 return (bp);
1452 }
1453 if ((bp = bo->bo_dirty.bv_root) != NULL) {
1454 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1455 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1456 return (bp);
1457 }
1458 return (NULL);
1459 }
1460
1461 /*
1462 * Associate a buffer with a vnode.
1463 */
1464 void
1465 bgetvp(struct vnode *vp, struct buf *bp)
1466 {
1467
1468 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1469
1470 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1471 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1472 ("bgetvp: bp already attached! %p", bp));
1473
1474 ASSERT_VI_LOCKED(vp, "bgetvp");
1475 vholdl(vp);
1476 bp->b_vp = vp;
1477 bp->b_bufobj = &vp->v_bufobj;
1478 /*
1479 * Insert onto list for new vnode.
1480 */
1481 buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
1482 }
1483
1484 /*
1485 * Disassociate a buffer from a vnode.
1486 */
1487 void
1488 brelvp(struct buf *bp)
1489 {
1490 struct bufobj *bo;
1491 struct vnode *vp;
1492
1493 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1494 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1495
1496 /*
1497 * Delete from old vnode list, if on one.
1498 */
1499 vp = bp->b_vp; /* XXX */
1500 bo = bp->b_bufobj;
1501 BO_LOCK(bo);
1502 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1503 buf_vlist_remove(bp);
1504 else
1505 panic("brelvp: Buffer %p not on queue.", bp);
1506 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1507 bo->bo_flag &= ~BO_ONWORKLST;
1508 mtx_lock(&sync_mtx);
1509 LIST_REMOVE(bo, bo_synclist);
1510 syncer_worklist_len--;
1511 mtx_unlock(&sync_mtx);
1512 }
1513 bp->b_vp = NULL;
1514 bp->b_bufobj = NULL;
1515 vdropl(vp);
1516 }
1517
1518 /*
1519 * Add an item to the syncer work queue.
1520 */
1521 static void
1522 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1523 {
1524 int slot;
1525
1526 ASSERT_BO_LOCKED(bo);
1527
1528 mtx_lock(&sync_mtx);
1529 if (bo->bo_flag & BO_ONWORKLST)
1530 LIST_REMOVE(bo, bo_synclist);
1531 else {
1532 bo->bo_flag |= BO_ONWORKLST;
1533 syncer_worklist_len++;
1534 }
1535
1536 if (delay > syncer_maxdelay - 2)
1537 delay = syncer_maxdelay - 2;
1538 slot = (syncer_delayno + delay) & syncer_mask;
1539
1540 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1541 mtx_unlock(&sync_mtx);
1542 }
1543
1544 static int
1545 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1546 {
1547 int error, len;
1548
1549 mtx_lock(&sync_mtx);
1550 len = syncer_worklist_len - sync_vnode_count;
1551 mtx_unlock(&sync_mtx);
1552 error = SYSCTL_OUT(req, &len, sizeof(len));
1553 return (error);
1554 }
1555
1556 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1557 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1558
1559 static struct proc *updateproc;
1560 static void sched_sync(void);
1561 static struct kproc_desc up_kp = {
1562 "syncer",
1563 sched_sync,
1564 &updateproc
1565 };
1566 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1567
1568 static int
1569 sync_vnode(struct bufobj *bo, struct thread *td)
1570 {
1571 struct vnode *vp;
1572 struct mount *mp;
1573
1574 vp = bo->__bo_vnode; /* XXX */
1575 if (VOP_ISLOCKED(vp, NULL) != 0)
1576 return (1);
1577 if (VI_TRYLOCK(vp) == 0)
1578 return (1);
1579 /*
1580 * We use vhold in case the vnode does not
1581 * successfully sync. vhold prevents the vnode from
1582 * going away when we unlock the sync_mtx so that
1583 * we can acquire the vnode interlock.
1584 */
1585 vholdl(vp);
1586 mtx_unlock(&sync_mtx);
1587 VI_UNLOCK(vp);
1588 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1589 vdrop(vp);
1590 mtx_lock(&sync_mtx);
1591 return (1);
1592 }
1593 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1594 (void) VOP_FSYNC(vp, MNT_LAZY, td);
1595 VOP_UNLOCK(vp, 0, td);
1596 vn_finished_write(mp);
1597 VI_LOCK(vp);
1598 if ((bo->bo_flag & BO_ONWORKLST) != 0) {
1599 /*
1600 * Put us back on the worklist. The worklist
1601 * routine will remove us from our current
1602 * position and then add us back in at a later
1603 * position.
1604 */
1605 vn_syncer_add_to_worklist(bo, syncdelay);
1606 }
1607 vdropl(vp);
1608 mtx_lock(&sync_mtx);
1609 return (0);
1610 }
1611
1612 /*
1613 * System filesystem synchronizer daemon.
1614 */
1615 static void
1616 sched_sync(void)
1617 {
1618 struct synclist *next;
1619 struct synclist *slp;
1620 struct bufobj *bo;
1621 long starttime;
1622 struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1623 static int dummychan;
1624 int last_work_seen;
1625 int net_worklist_len;
1626 int syncer_final_iter;
1627 int first_printf;
1628 int error;
1629
1630 mtx_lock(&Giant);
1631 last_work_seen = 0;
1632 syncer_final_iter = 0;
1633 first_printf = 1;
1634 syncer_state = SYNCER_RUNNING;
1635 starttime = time_second;
1636 td->td_pflags |= TDP_NORUNNINGBUF;
1637
1638 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1639 SHUTDOWN_PRI_LAST);
1640
1641 for (;;) {
1642 mtx_lock(&sync_mtx);
1643 if (syncer_state == SYNCER_FINAL_DELAY &&
1644 syncer_final_iter == 0) {
1645 mtx_unlock(&sync_mtx);
1646 kthread_suspend_check(td->td_proc);
1647 mtx_lock(&sync_mtx);
1648 }
1649 net_worklist_len = syncer_worklist_len - sync_vnode_count;
1650 if (syncer_state != SYNCER_RUNNING &&
1651 starttime != time_second) {
1652 if (first_printf) {
1653 printf("\nSyncing disks, vnodes remaining...");
1654 first_printf = 0;
1655 }
1656 printf("%d ", net_worklist_len);
1657 }
1658 starttime = time_second;
1659
1660 /*
1661 * Push files whose dirty time has expired. Be careful
1662 * of interrupt race on slp queue.
1663 *
1664 * Skip over empty worklist slots when shutting down.
1665 */
1666 do {
1667 slp = &syncer_workitem_pending[syncer_delayno];
1668 syncer_delayno += 1;
1669 if (syncer_delayno == syncer_maxdelay)
1670 syncer_delayno = 0;
1671 next = &syncer_workitem_pending[syncer_delayno];
1672 /*
1673 * If the worklist has wrapped since the
1674 * it was emptied of all but syncer vnodes,
1675 * switch to the FINAL_DELAY state and run
1676 * for one more second.
1677 */
1678 if (syncer_state == SYNCER_SHUTTING_DOWN &&
1679 net_worklist_len == 0 &&
1680 last_work_seen == syncer_delayno) {
1681 syncer_state = SYNCER_FINAL_DELAY;
1682 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1683 }
1684 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1685 syncer_worklist_len > 0);
1686
1687 /*
1688 * Keep track of the last time there was anything
1689 * on the worklist other than syncer vnodes.
1690 * Return to the SHUTTING_DOWN state if any
1691 * new work appears.
1692 */
1693 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1694 last_work_seen = syncer_delayno;
1695 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1696 syncer_state = SYNCER_SHUTTING_DOWN;
1697 while ((bo = LIST_FIRST(slp)) != NULL) {
1698 error = sync_vnode(bo, td);
1699 if (error == 1) {
1700 LIST_REMOVE(bo, bo_synclist);
1701 LIST_INSERT_HEAD(next, bo, bo_synclist);
1702 continue;
1703 }
1704 }
1705 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1706 syncer_final_iter--;
1707 mtx_unlock(&sync_mtx);
1708 /*
1709 * The variable rushjob allows the kernel to speed up the
1710 * processing of the filesystem syncer process. A rushjob
1711 * value of N tells the filesystem syncer to process the next
1712 * N seconds worth of work on its queue ASAP. Currently rushjob
1713 * is used by the soft update code to speed up the filesystem
1714 * syncer process when the incore state is getting so far
1715 * ahead of the disk that the kernel memory pool is being
1716 * threatened with exhaustion.
1717 */
1718 mtx_lock(&sync_mtx);
1719 if (rushjob > 0) {
1720 rushjob -= 1;
1721 mtx_unlock(&sync_mtx);
1722 continue;
1723 }
1724 mtx_unlock(&sync_mtx);
1725 /*
1726 * Just sleep for a short period if time between
1727 * iterations when shutting down to allow some I/O
1728 * to happen.
1729 *
1730 * If it has taken us less than a second to process the
1731 * current work, then wait. Otherwise start right over
1732 * again. We can still lose time if any single round
1733 * takes more than two seconds, but it does not really
1734 * matter as we are just trying to generally pace the
1735 * filesystem activity.
1736 */
1737 if (syncer_state != SYNCER_RUNNING)
1738 tsleep(&dummychan, PPAUSE, "syncfnl",
1739 hz / SYNCER_SHUTDOWN_SPEEDUP);
1740 else if (time_second == starttime)
1741 tsleep(&lbolt, PPAUSE, "syncer", 0);
1742 }
1743 }
1744
1745 /*
1746 * Request the syncer daemon to speed up its work.
1747 * We never push it to speed up more than half of its
1748 * normal turn time, otherwise it could take over the cpu.
1749 */
1750 int
1751 speedup_syncer()
1752 {
1753 struct thread *td;
1754 int ret = 0;
1755
1756 td = FIRST_THREAD_IN_PROC(updateproc);
1757 sleepq_remove(td, &lbolt);
1758 mtx_lock(&sync_mtx);
1759 if (rushjob < syncdelay / 2) {
1760 rushjob += 1;
1761 stat_rush_requests += 1;
1762 ret = 1;
1763 }
1764 mtx_unlock(&sync_mtx);
1765 return (ret);
1766 }
1767
1768 /*
1769 * Tell the syncer to speed up its work and run though its work
1770 * list several times, then tell it to shut down.
1771 */
1772 static void
1773 syncer_shutdown(void *arg, int howto)
1774 {
1775 struct thread *td;
1776
1777 if (howto & RB_NOSYNC)
1778 return;
1779 td = FIRST_THREAD_IN_PROC(updateproc);
1780 sleepq_remove(td, &lbolt);
1781 mtx_lock(&sync_mtx);
1782 syncer_state = SYNCER_SHUTTING_DOWN;
1783 rushjob = 0;
1784 mtx_unlock(&sync_mtx);
1785 kproc_shutdown(arg, howto);
1786 }
1787
1788 /*
1789 * Reassign a buffer from one vnode to another.
1790 * Used to assign file specific control information
1791 * (indirect blocks) to the vnode to which they belong.
1792 */
1793 void
1794 reassignbuf(struct buf *bp)
1795 {
1796 struct vnode *vp;
1797 struct bufobj *bo;
1798 int delay;
1799 #ifdef INVARIANTS
1800 struct bufv *bv;
1801 #endif
1802
1803 vp = bp->b_vp;
1804 bo = bp->b_bufobj;
1805 ++reassignbufcalls;
1806
1807 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1808 bp, bp->b_vp, bp->b_flags);
1809 /*
1810 * B_PAGING flagged buffers cannot be reassigned because their vp
1811 * is not fully linked in.
1812 */
1813 if (bp->b_flags & B_PAGING)
1814 panic("cannot reassign paging buffer");
1815
1816 /*
1817 * Delete from old vnode list, if on one.
1818 */
1819 VI_LOCK(vp);
1820 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1821 buf_vlist_remove(bp);
1822 else
1823 panic("reassignbuf: Buffer %p not on queue.", bp);
1824 /*
1825 * If dirty, put on list of dirty buffers; otherwise insert onto list
1826 * of clean buffers.
1827 */
1828 if (bp->b_flags & B_DELWRI) {
1829 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1830 switch (vp->v_type) {
1831 case VDIR:
1832 delay = dirdelay;
1833 break;
1834 case VCHR:
1835 delay = metadelay;
1836 break;
1837 default:
1838 delay = filedelay;
1839 }
1840 vn_syncer_add_to_worklist(bo, delay);
1841 }
1842 buf_vlist_add(bp, bo, BX_VNDIRTY);
1843 } else {
1844 buf_vlist_add(bp, bo, BX_VNCLEAN);
1845
1846 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1847 mtx_lock(&sync_mtx);
1848 LIST_REMOVE(bo, bo_synclist);
1849 syncer_worklist_len--;
1850 mtx_unlock(&sync_mtx);
1851 bo->bo_flag &= ~BO_ONWORKLST;
1852 }
1853 }
1854 #ifdef INVARIANTS
1855 bv = &bo->bo_clean;
1856 bp = TAILQ_FIRST(&bv->bv_hd);
1857 KASSERT(bp == NULL || bp->b_bufobj == bo,
1858 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1859 bp = TAILQ_LAST(&bv->bv_hd, buflists);
1860 KASSERT(bp == NULL || bp->b_bufobj == bo,
1861 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1862 bv = &bo->bo_dirty;
1863 bp = TAILQ_FIRST(&bv->bv_hd);
1864 KASSERT(bp == NULL || bp->b_bufobj == bo,
1865 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1866 bp = TAILQ_LAST(&bv->bv_hd, buflists);
1867 KASSERT(bp == NULL || bp->b_bufobj == bo,
1868 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1869 #endif
1870 VI_UNLOCK(vp);
1871 }
1872
1873 /*
1874 * Increment the use and hold counts on the vnode, taking care to reference
1875 * the driver's usecount if this is a chardev. The vholdl() will remove
1876 * the vnode from the free list if it is presently free. Requires the
1877 * vnode interlock and returns with it held.
1878 */
1879 static void
1880 v_incr_usecount(struct vnode *vp)
1881 {
1882
1883 CTR3(KTR_VFS, "v_incr_usecount: vp %p holdcnt %d usecount %d\n",
1884 vp, vp->v_holdcnt, vp->v_usecount);
1885 vp->v_usecount++;
1886 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1887 dev_lock();
1888 vp->v_rdev->si_usecount++;
1889 dev_unlock();
1890 }
1891 vholdl(vp);
1892 }
1893
1894 /*
1895 * Turn a holdcnt into a use+holdcnt such that only one call to
1896 * v_decr_usecount is needed.
1897 */
1898 static void
1899 v_upgrade_usecount(struct vnode *vp)
1900 {
1901
1902 CTR3(KTR_VFS, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n",
1903 vp, vp->v_holdcnt, vp->v_usecount);
1904 vp->v_usecount++;
1905 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1906 dev_lock();
1907 vp->v_rdev->si_usecount++;
1908 dev_unlock();
1909 }
1910 }
1911
1912 /*
1913 * Decrement the vnode use and hold count along with the driver's usecount
1914 * if this is a chardev. The vdropl() below releases the vnode interlock
1915 * as it may free the vnode.
1916 */
1917 static void
1918 v_decr_usecount(struct vnode *vp)
1919 {
1920
1921 CTR3(KTR_VFS, "v_decr_usecount: vp %p holdcnt %d usecount %d\n",
1922 vp, vp->v_holdcnt, vp->v_usecount);
1923 ASSERT_VI_LOCKED(vp, __FUNCTION__);
1924 VNASSERT(vp->v_usecount > 0, vp,
1925 ("v_decr_usecount: negative usecount"));
1926 vp->v_usecount--;
1927 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1928 dev_lock();
1929 vp->v_rdev->si_usecount--;
1930 dev_unlock();
1931 }
1932 vdropl(vp);
1933 }
1934
1935 /*
1936 * Decrement only the use count and driver use count. This is intended to
1937 * be paired with a follow on vdropl() to release the remaining hold count.
1938 * In this way we may vgone() a vnode with a 0 usecount without risk of
1939 * having it end up on a free list because the hold count is kept above 0.
1940 */
1941 static void
1942 v_decr_useonly(struct vnode *vp)
1943 {
1944
1945 CTR3(KTR_VFS, "v_decr_useonly: vp %p holdcnt %d usecount %d\n",
1946 vp, vp->v_holdcnt, vp->v_usecount);
1947 ASSERT_VI_LOCKED(vp, __FUNCTION__);
1948 VNASSERT(vp->v_usecount > 0, vp,
1949 ("v_decr_useonly: negative usecount"));
1950 vp->v_usecount--;
1951 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1952 dev_lock();
1953 vp->v_rdev->si_usecount--;
1954 dev_unlock();
1955 }
1956 }
1957
1958 /*
1959 * Grab a particular vnode from the free list, increment its
1960 * reference count and lock it. The vnode lock bit is set if the
1961 * vnode is being eliminated in vgone. The process is awakened
1962 * when the transition is completed, and an error returned to
1963 * indicate that the vnode is no longer usable (possibly having
1964 * been changed to a new filesystem type).
1965 */
1966 int
1967 vget(struct vnode *vp, int flags, struct thread *td)
1968 {
1969 int oweinact;
1970 int oldflags;
1971 int error;
1972
1973 error = 0;
1974 oldflags = flags;
1975 oweinact = 0;
1976 VFS_ASSERT_GIANT(vp->v_mount);
1977 if ((flags & LK_INTERLOCK) == 0)
1978 VI_LOCK(vp);
1979 /*
1980 * If the inactive call was deferred because vput() was called
1981 * with a shared lock, we have to do it here before another thread
1982 * gets a reference to data that should be dead.
1983 */
1984 if (vp->v_iflag & VI_OWEINACT) {
1985 if (flags & LK_NOWAIT) {
1986 VI_UNLOCK(vp);
1987 return (EBUSY);
1988 }
1989 flags &= ~LK_TYPE_MASK;
1990 flags |= LK_EXCLUSIVE;
1991 oweinact = 1;
1992 }
1993 vholdl(vp);
1994 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
1995 vdrop(vp);
1996 return (error);
1997 }
1998 VI_LOCK(vp);
1999 /* Upgrade our holdcnt to a usecount. */
2000 v_upgrade_usecount(vp);
2001 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2002 panic("vget: vn_lock failed to return ENOENT\n");
2003 if (oweinact) {
2004 if (vp->v_iflag & VI_OWEINACT)
2005 vinactive(vp, td);
2006 VI_UNLOCK(vp);
2007 if ((oldflags & LK_TYPE_MASK) == 0)
2008 VOP_UNLOCK(vp, 0, td);
2009 } else
2010 VI_UNLOCK(vp);
2011 return (0);
2012 }
2013
2014 /*
2015 * Increase the reference count of a vnode.
2016 */
2017 void
2018 vref(struct vnode *vp)
2019 {
2020
2021 VI_LOCK(vp);
2022 v_incr_usecount(vp);
2023 VI_UNLOCK(vp);
2024 }
2025
2026 /*
2027 * Return reference count of a vnode.
2028 *
2029 * The results of this call are only guaranteed when some mechanism other
2030 * than the VI lock is used to stop other processes from gaining references
2031 * to the vnode. This may be the case if the caller holds the only reference.
2032 * This is also useful when stale data is acceptable as race conditions may
2033 * be accounted for by some other means.
2034 */
2035 int
2036 vrefcnt(struct vnode *vp)
2037 {
2038 int usecnt;
2039
2040 VI_LOCK(vp);
2041 usecnt = vp->v_usecount;
2042 VI_UNLOCK(vp);
2043
2044 return (usecnt);
2045 }
2046
2047
2048 /*
2049 * Vnode put/release.
2050 * If count drops to zero, call inactive routine and return to freelist.
2051 */
2052 void
2053 vrele(vp)
2054 struct vnode *vp;
2055 {
2056 struct thread *td = curthread; /* XXX */
2057
2058 KASSERT(vp != NULL, ("vrele: null vp"));
2059 VFS_ASSERT_GIANT(vp->v_mount);
2060
2061 VI_LOCK(vp);
2062
2063 /* Skip this v_writecount check if we're going to panic below. */
2064 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2065 ("vrele: missed vn_close"));
2066
2067 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2068 vp->v_usecount == 1)) {
2069 v_decr_usecount(vp);
2070 return;
2071 }
2072 if (vp->v_usecount != 1) {
2073 #ifdef DIAGNOSTIC
2074 vprint("vrele: negative ref count", vp);
2075 #endif
2076 VI_UNLOCK(vp);
2077 panic("vrele: negative ref cnt");
2078 }
2079 /*
2080 * We want to hold the vnode until the inactive finishes to
2081 * prevent vgone() races. We drop the use count here and the
2082 * hold count below when we're done.
2083 */
2084 v_decr_useonly(vp);
2085 /*
2086 * We must call VOP_INACTIVE with the node locked. Mark
2087 * as VI_DOINGINACT to avoid recursion.
2088 */
2089 vp->v_iflag |= VI_OWEINACT;
2090 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2091 VI_LOCK(vp);
2092 if (vp->v_usecount > 0)
2093 vp->v_iflag &= ~VI_OWEINACT;
2094 if (vp->v_iflag & VI_OWEINACT)
2095 vinactive(vp, td);
2096 VOP_UNLOCK(vp, 0, td);
2097 } else {
2098 VI_LOCK(vp);
2099 if (vp->v_usecount > 0)
2100 vp->v_iflag &= ~VI_OWEINACT;
2101 }
2102 vdropl(vp);
2103 }
2104
2105 /*
2106 * Release an already locked vnode. This give the same effects as
2107 * unlock+vrele(), but takes less time and avoids releasing and
2108 * re-aquiring the lock (as vrele() aquires the lock internally.)
2109 */
2110 void
2111 vput(vp)
2112 struct vnode *vp;
2113 {
2114 struct thread *td = curthread; /* XXX */
2115 int error;
2116
2117 KASSERT(vp != NULL, ("vput: null vp"));
2118 ASSERT_VOP_LOCKED(vp, "vput");
2119 VFS_ASSERT_GIANT(vp->v_mount);
2120 VI_LOCK(vp);
2121 /* Skip this v_writecount check if we're going to panic below. */
2122 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2123 ("vput: missed vn_close"));
2124 error = 0;
2125
2126 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2127 vp->v_usecount == 1)) {
2128 VOP_UNLOCK(vp, 0, td);
2129 v_decr_usecount(vp);
2130 return;
2131 }
2132
2133 if (vp->v_usecount != 1) {
2134 #ifdef DIAGNOSTIC
2135 vprint("vput: negative ref count", vp);
2136 #endif
2137 panic("vput: negative ref cnt");
2138 }
2139 /*
2140 * We want to hold the vnode until the inactive finishes to
2141 * prevent vgone() races. We drop the use count here and the
2142 * hold count below when we're done.
2143 */
2144 v_decr_useonly(vp);
2145 vp->v_iflag |= VI_OWEINACT;
2146 if (VOP_ISLOCKED(vp, NULL) != LK_EXCLUSIVE) {
2147 error = VOP_LOCK(vp, LK_EXCLUPGRADE|LK_INTERLOCK|LK_NOWAIT, td);
2148 VI_LOCK(vp);
2149 if (error) {
2150 if (vp->v_usecount > 0)
2151 vp->v_iflag &= ~VI_OWEINACT;
2152 goto done;
2153 }
2154 }
2155 if (vp->v_usecount > 0)
2156 vp->v_iflag &= ~VI_OWEINACT;
2157 if (vp->v_iflag & VI_OWEINACT)
2158 vinactive(vp, td);
2159 VOP_UNLOCK(vp, 0, td);
2160 done:
2161 vdropl(vp);
2162 }
2163
2164 /*
2165 * Somebody doesn't want the vnode recycled.
2166 */
2167 void
2168 vhold(struct vnode *vp)
2169 {
2170
2171 VI_LOCK(vp);
2172 vholdl(vp);
2173 VI_UNLOCK(vp);
2174 }
2175
2176 void
2177 vholdl(struct vnode *vp)
2178 {
2179
2180 vp->v_holdcnt++;
2181 if (VSHOULDBUSY(vp))
2182 vbusy(vp);
2183 }
2184
2185 /*
2186 * Note that there is one less who cares about this vnode. vdrop() is the
2187 * opposite of vhold().
2188 */
2189 void
2190 vdrop(struct vnode *vp)
2191 {
2192
2193 VI_LOCK(vp);
2194 vdropl(vp);
2195 }
2196
2197 /*
2198 * Drop the hold count of the vnode. If this is the last reference to
2199 * the vnode we will free it if it has been vgone'd otherwise it is
2200 * placed on the free list.
2201 */
2202 static void
2203 vdropl(struct vnode *vp)
2204 {
2205
2206 if (vp->v_holdcnt <= 0)
2207 panic("vdrop: holdcnt %d", vp->v_holdcnt);
2208 vp->v_holdcnt--;
2209 if (vp->v_holdcnt == 0) {
2210 if (vp->v_iflag & VI_DOOMED) {
2211 vdestroy(vp);
2212 return;
2213 } else
2214 vfree(vp);
2215 }
2216 VI_UNLOCK(vp);
2217 }
2218
2219 /*
2220 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2221 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
2222 * OWEINACT tracks whether a vnode missed a call to inactive due to a
2223 * failed lock upgrade.
2224 */
2225 static void
2226 vinactive(struct vnode *vp, struct thread *td)
2227 {
2228
2229 ASSERT_VOP_LOCKED(vp, "vinactive");
2230 ASSERT_VI_LOCKED(vp, "vinactive");
2231 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2232 ("vinactive: recursed on VI_DOINGINACT"));
2233 vp->v_iflag |= VI_DOINGINACT;
2234 vp->v_iflag &= ~VI_OWEINACT;
2235 VI_UNLOCK(vp);
2236 VOP_INACTIVE(vp, td);
2237 VI_LOCK(vp);
2238 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2239 ("vinactive: lost VI_DOINGINACT"));
2240 vp->v_iflag &= ~VI_DOINGINACT;
2241 }
2242
2243 /*
2244 * Remove any vnodes in the vnode table belonging to mount point mp.
2245 *
2246 * If FORCECLOSE is not specified, there should not be any active ones,
2247 * return error if any are found (nb: this is a user error, not a
2248 * system error). If FORCECLOSE is specified, detach any active vnodes
2249 * that are found.
2250 *
2251 * If WRITECLOSE is set, only flush out regular file vnodes open for
2252 * writing.
2253 *
2254 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2255 *
2256 * `rootrefs' specifies the base reference count for the root vnode
2257 * of this filesystem. The root vnode is considered busy if its
2258 * v_usecount exceeds this value. On a successful return, vflush(, td)
2259 * will call vrele() on the root vnode exactly rootrefs times.
2260 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2261 * be zero.
2262 */
2263 #ifdef DIAGNOSTIC
2264 static int busyprt = 0; /* print out busy vnodes */
2265 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2266 #endif
2267
2268 int
2269 vflush(mp, rootrefs, flags, td)
2270 struct mount *mp;
2271 int rootrefs;
2272 int flags;
2273 struct thread *td;
2274 {
2275 struct vnode *vp, *mvp, *rootvp = NULL;
2276 struct vattr vattr;
2277 int busy = 0, error;
2278
2279 CTR1(KTR_VFS, "vflush: mp %p", mp);
2280 if (rootrefs > 0) {
2281 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2282 ("vflush: bad args"));
2283 /*
2284 * Get the filesystem root vnode. We can vput() it
2285 * immediately, since with rootrefs > 0, it won't go away.
2286 */
2287 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0)
2288 return (error);
2289 vput(rootvp);
2290
2291 }
2292 MNT_ILOCK(mp);
2293 loop:
2294 MNT_VNODE_FOREACH(vp, mp, mvp) {
2295
2296 VI_LOCK(vp);
2297 vholdl(vp);
2298 MNT_IUNLOCK(mp);
2299 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2300 if (error) {
2301 vdrop(vp);
2302 MNT_ILOCK(mp);
2303 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
2304 goto loop;
2305 }
2306 /*
2307 * Skip over a vnodes marked VV_SYSTEM.
2308 */
2309 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2310 VOP_UNLOCK(vp, 0, td);
2311 vdrop(vp);
2312 MNT_ILOCK(mp);
2313 continue;
2314 }
2315 /*
2316 * If WRITECLOSE is set, flush out unlinked but still open
2317 * files (even if open only for reading) and regular file
2318 * vnodes open for writing.
2319 */
2320 if (flags & WRITECLOSE) {
2321 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2322 VI_LOCK(vp);
2323
2324 if ((vp->v_type == VNON ||
2325 (error == 0 && vattr.va_nlink > 0)) &&
2326 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2327 VOP_UNLOCK(vp, 0, td);
2328 vdropl(vp);
2329 MNT_ILOCK(mp);
2330 continue;
2331 }
2332 } else
2333 VI_LOCK(vp);
2334 /*
2335 * With v_usecount == 0, all we need to do is clear out the
2336 * vnode data structures and we are done.
2337 *
2338 * If FORCECLOSE is set, forcibly close the vnode.
2339 */
2340 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2341 VNASSERT(vp->v_usecount == 0 ||
2342 (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2343 ("device VNODE %p is FORCECLOSED", vp));
2344 vgonel(vp);
2345 } else {
2346 busy++;
2347 #ifdef DIAGNOSTIC
2348 if (busyprt)
2349 vprint("vflush: busy vnode", vp);
2350 #endif
2351 }
2352 VOP_UNLOCK(vp, 0, td);
2353 vdropl(vp);
2354 MNT_ILOCK(mp);
2355 }
2356 MNT_IUNLOCK(mp);
2357 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2358 /*
2359 * If just the root vnode is busy, and if its refcount
2360 * is equal to `rootrefs', then go ahead and kill it.
2361 */
2362 VI_LOCK(rootvp);
2363 KASSERT(busy > 0, ("vflush: not busy"));
2364 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2365 ("vflush: usecount %d < rootrefs %d",
2366 rootvp->v_usecount, rootrefs));
2367 if (busy == 1 && rootvp->v_usecount == rootrefs) {
2368 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK, td);
2369 vgone(rootvp);
2370 VOP_UNLOCK(rootvp, 0, td);
2371 busy = 0;
2372 } else
2373 VI_UNLOCK(rootvp);
2374 }
2375 if (busy)
2376 return (EBUSY);
2377 for (; rootrefs > 0; rootrefs--)
2378 vrele(rootvp);
2379 return (0);
2380 }
2381
2382 /*
2383 * Recycle an unused vnode to the front of the free list.
2384 */
2385 int
2386 vrecycle(struct vnode *vp, struct thread *td)
2387 {
2388 int recycled;
2389
2390 ASSERT_VOP_LOCKED(vp, "vrecycle");
2391 recycled = 0;
2392 VI_LOCK(vp);
2393 if (vp->v_usecount == 0) {
2394 recycled = 1;
2395 vgonel(vp);
2396 }
2397 VI_UNLOCK(vp);
2398 return (recycled);
2399 }
2400
2401 /*
2402 * Eliminate all activity associated with a vnode
2403 * in preparation for reuse.
2404 */
2405 void
2406 vgone(struct vnode *vp)
2407 {
2408 VI_LOCK(vp);
2409 vgonel(vp);
2410 VI_UNLOCK(vp);
2411 }
2412
2413 /*
2414 * vgone, with the vp interlock held.
2415 */
2416 void
2417 vgonel(struct vnode *vp)
2418 {
2419 struct thread *td;
2420 int oweinact;
2421 int active;
2422 struct mount *mp;
2423
2424 CTR1(KTR_VFS, "vgonel: vp %p", vp);
2425 ASSERT_VOP_LOCKED(vp, "vgonel");
2426 ASSERT_VI_LOCKED(vp, "vgonel");
2427 #if 0
2428 /* XXX Need to fix ttyvp before I enable this. */
2429 VNASSERT(vp->v_holdcnt, vp,
2430 ("vgonel: vp %p has no reference.", vp));
2431 #endif
2432 td = curthread;
2433
2434 /*
2435 * Don't vgonel if we're already doomed.
2436 */
2437 if (vp->v_iflag & VI_DOOMED)
2438 return;
2439 vp->v_iflag |= VI_DOOMED;
2440 /*
2441 * Check to see if the vnode is in use. If so, we have to call
2442 * VOP_CLOSE() and VOP_INACTIVE().
2443 */
2444 active = vp->v_usecount;
2445 oweinact = (vp->v_iflag & VI_OWEINACT);
2446 VI_UNLOCK(vp);
2447 /*
2448 * Clean out any buffers associated with the vnode.
2449 * If the flush fails, just toss the buffers.
2450 */
2451 mp = NULL;
2452 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2453 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
2454 if (vinvalbuf(vp, V_SAVE, td, 0, 0) != 0)
2455 vinvalbuf(vp, 0, td, 0, 0);
2456
2457 /*
2458 * If purging an active vnode, it must be closed and
2459 * deactivated before being reclaimed.
2460 */
2461 if (active)
2462 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2463 if (oweinact || active) {
2464 VI_LOCK(vp);
2465 if ((vp->v_iflag & VI_DOINGINACT) == 0)
2466 vinactive(vp, td);
2467 VI_UNLOCK(vp);
2468 }
2469 /*
2470 * Reclaim the vnode.
2471 */
2472 if (VOP_RECLAIM(vp, td))
2473 panic("vgone: cannot reclaim");
2474 if (mp != NULL)
2475 vn_finished_secondary_write(mp);
2476 VNASSERT(vp->v_object == NULL, vp,
2477 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2478 /*
2479 * Delete from old mount point vnode list.
2480 */
2481 delmntque(vp);
2482 cache_purge(vp);
2483 /*
2484 * Done with purge, reset to the standard lock and invalidate
2485 * the vnode.
2486 */
2487 VI_LOCK(vp);
2488 vp->v_vnlock = &vp->v_lock;
2489 vp->v_op = &dead_vnodeops;
2490 vp->v_tag = "none";
2491 vp->v_type = VBAD;
2492 }
2493
2494 /*
2495 * Calculate the total number of references to a special device.
2496 */
2497 int
2498 vcount(vp)
2499 struct vnode *vp;
2500 {
2501 int count;
2502
2503 dev_lock();
2504 count = vp->v_rdev->si_usecount;
2505 dev_unlock();
2506 return (count);
2507 }
2508
2509 /*
2510 * Same as above, but using the struct cdev *as argument
2511 */
2512 int
2513 count_dev(dev)
2514 struct cdev *dev;
2515 {
2516 int count;
2517
2518 dev_lock();
2519 count = dev->si_usecount;
2520 dev_unlock();
2521 return(count);
2522 }
2523
2524 /*
2525 * Print out a description of a vnode.
2526 */
2527 static char *typename[] =
2528 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2529 "VMARKER"};
2530
2531 void
2532 vn_printf(struct vnode *vp, const char *fmt, ...)
2533 {
2534 va_list ap;
2535 char buf[96];
2536
2537 va_start(ap, fmt);
2538 vprintf(fmt, ap);
2539 va_end(ap);
2540 printf("%p: ", (void *)vp);
2541 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2542 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n",
2543 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2544 buf[0] = '\0';
2545 buf[1] = '\0';
2546 if (vp->v_vflag & VV_ROOT)
2547 strcat(buf, "|VV_ROOT");
2548 if (vp->v_vflag & VV_TEXT)
2549 strcat(buf, "|VV_TEXT");
2550 if (vp->v_vflag & VV_SYSTEM)
2551 strcat(buf, "|VV_SYSTEM");
2552 if (vp->v_iflag & VI_DOOMED)
2553 strcat(buf, "|VI_DOOMED");
2554 if (vp->v_iflag & VI_FREE)
2555 strcat(buf, "|VI_FREE");
2556 printf(" flags (%s)\n", buf + 1);
2557 if (mtx_owned(VI_MTX(vp)))
2558 printf(" VI_LOCKed");
2559 if (vp->v_object != NULL)
2560 printf(" v_object %p ref %d pages %d\n",
2561 vp->v_object, vp->v_object->ref_count,
2562 vp->v_object->resident_page_count);
2563 printf(" ");
2564 lockmgr_printinfo(vp->v_vnlock);
2565 printf("\n");
2566 if (vp->v_data != NULL)
2567 VOP_PRINT(vp);
2568 }
2569
2570 #ifdef DDB
2571 /*
2572 * List all of the locked vnodes in the system.
2573 * Called when debugging the kernel.
2574 */
2575 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2576 {
2577 struct mount *mp, *nmp;
2578 struct vnode *vp;
2579
2580 /*
2581 * Note: because this is DDB, we can't obey the locking semantics
2582 * for these structures, which means we could catch an inconsistent
2583 * state and dereference a nasty pointer. Not much to be done
2584 * about that.
2585 */
2586 printf("Locked vnodes\n");
2587 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2588 nmp = TAILQ_NEXT(mp, mnt_list);
2589 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2590 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp, NULL))
2591 vprint("", vp);
2592 }
2593 nmp = TAILQ_NEXT(mp, mnt_list);
2594 }
2595 }
2596
2597 /*
2598 * Show details about the given vnode.
2599 */
2600 DB_SHOW_COMMAND(vnode, db_show_vnode)
2601 {
2602 struct vnode *vp;
2603
2604 if (!have_addr)
2605 return;
2606 vp = (struct vnode *)addr;
2607 vn_printf(vp, "vnode ");
2608 }
2609 #endif /* DDB */
2610
2611 /*
2612 * Fill in a struct xvfsconf based on a struct vfsconf.
2613 */
2614 static void
2615 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2616 {
2617
2618 strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2619 xvfsp->vfc_typenum = vfsp->vfc_typenum;
2620 xvfsp->vfc_refcount = vfsp->vfc_refcount;
2621 xvfsp->vfc_flags = vfsp->vfc_flags;
2622 /*
2623 * These are unused in userland, we keep them
2624 * to not break binary compatibility.
2625 */
2626 xvfsp->vfc_vfsops = NULL;
2627 xvfsp->vfc_next = NULL;
2628 }
2629
2630 /*
2631 * Top level filesystem related information gathering.
2632 */
2633 static int
2634 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2635 {
2636 struct vfsconf *vfsp;
2637 struct xvfsconf xvfsp;
2638 int error;
2639
2640 error = 0;
2641 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2642 bzero(&xvfsp, sizeof(xvfsp));
2643 vfsconf2x(vfsp, &xvfsp);
2644 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2645 if (error)
2646 break;
2647 }
2648 return (error);
2649 }
2650
2651 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2652 "S,xvfsconf", "List of all configured filesystems");
2653
2654 #ifndef BURN_BRIDGES
2655 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2656
2657 static int
2658 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2659 {
2660 int *name = (int *)arg1 - 1; /* XXX */
2661 u_int namelen = arg2 + 1; /* XXX */
2662 struct vfsconf *vfsp;
2663 struct xvfsconf xvfsp;
2664
2665 printf("WARNING: userland calling deprecated sysctl, "
2666 "please rebuild world\n");
2667
2668 #if 1 || defined(COMPAT_PRELITE2)
2669 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2670 if (namelen == 1)
2671 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2672 #endif
2673
2674 switch (name[1]) {
2675 case VFS_MAXTYPENUM:
2676 if (namelen != 2)
2677 return (ENOTDIR);
2678 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2679 case VFS_CONF:
2680 if (namelen != 3)
2681 return (ENOTDIR); /* overloaded */
2682 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2683 if (vfsp->vfc_typenum == name[2])
2684 break;
2685 if (vfsp == NULL)
2686 return (EOPNOTSUPP);
2687 bzero(&xvfsp, sizeof(xvfsp));
2688 vfsconf2x(vfsp, &xvfsp);
2689 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2690 }
2691 return (EOPNOTSUPP);
2692 }
2693
2694 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
2695 vfs_sysctl, "Generic filesystem");
2696
2697 #if 1 || defined(COMPAT_PRELITE2)
2698
2699 static int
2700 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2701 {
2702 int error;
2703 struct vfsconf *vfsp;
2704 struct ovfsconf ovfs;
2705
2706 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2707 bzero(&ovfs, sizeof(ovfs));
2708 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
2709 strcpy(ovfs.vfc_name, vfsp->vfc_name);
2710 ovfs.vfc_index = vfsp->vfc_typenum;
2711 ovfs.vfc_refcount = vfsp->vfc_refcount;
2712 ovfs.vfc_flags = vfsp->vfc_flags;
2713 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2714 if (error)
2715 return error;
2716 }
2717 return 0;
2718 }
2719
2720 #endif /* 1 || COMPAT_PRELITE2 */
2721 #endif /* !BURN_BRIDGES */
2722
2723 #define KINFO_VNODESLOP 10
2724 #ifdef notyet
2725 /*
2726 * Dump vnode list (via sysctl).
2727 */
2728 /* ARGSUSED */
2729 static int
2730 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2731 {
2732 struct xvnode *xvn;
2733 struct thread *td = req->td;
2734 struct mount *mp;
2735 struct vnode *vp;
2736 int error, len, n;
2737
2738 /*
2739 * Stale numvnodes access is not fatal here.
2740 */
2741 req->lock = 0;
2742 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2743 if (!req->oldptr)
2744 /* Make an estimate */
2745 return (SYSCTL_OUT(req, 0, len));
2746
2747 error = sysctl_wire_old_buffer(req, 0);
2748 if (error != 0)
2749 return (error);
2750 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
2751 n = 0;
2752 mtx_lock(&mountlist_mtx);
2753 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2754 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2755 continue;
2756 MNT_ILOCK(mp);
2757 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2758 if (n == len)
2759 break;
2760 vref(vp);
2761 xvn[n].xv_size = sizeof *xvn;
2762 xvn[n].xv_vnode = vp;
2763 xvn[n].xv_id = 0; /* XXX compat */
2764 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
2765 XV_COPY(usecount);
2766 XV_COPY(writecount);
2767 XV_COPY(holdcnt);
2768 XV_COPY(mount);
2769 XV_COPY(numoutput);
2770 XV_COPY(type);
2771 #undef XV_COPY
2772 xvn[n].xv_flag = vp->v_vflag;
2773
2774 switch (vp->v_type) {
2775 case VREG:
2776 case VDIR:
2777 case VLNK:
2778 break;
2779 case VBLK:
2780 case VCHR:
2781 if (vp->v_rdev == NULL) {
2782 vrele(vp);
2783 continue;
2784 }
2785 xvn[n].xv_dev = dev2udev(vp->v_rdev);
2786 break;
2787 case VSOCK:
2788 xvn[n].xv_socket = vp->v_socket;
2789 break;
2790 case VFIFO:
2791 xvn[n].xv_fifo = vp->v_fifoinfo;
2792 break;
2793 case VNON:
2794 case VBAD:
2795 default:
2796 /* shouldn't happen? */
2797 vrele(vp);
2798 continue;
2799 }
2800 vrele(vp);
2801 ++n;
2802 }
2803 MNT_IUNLOCK(mp);
2804 mtx_lock(&mountlist_mtx);
2805 vfs_unbusy(mp, td);
2806 if (n == len)
2807 break;
2808 }
2809 mtx_unlock(&mountlist_mtx);
2810
2811 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
2812 free(xvn, M_TEMP);
2813 return (error);
2814 }
2815
2816 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2817 0, 0, sysctl_vnode, "S,xvnode", "");
2818 #endif
2819
2820 /*
2821 * Unmount all filesystems. The list is traversed in reverse order
2822 * of mounting to avoid dependencies.
2823 */
2824 void
2825 vfs_unmountall()
2826 {
2827 struct mount *mp;
2828 struct thread *td;
2829 int error;
2830
2831 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
2832 td = curthread;
2833 /*
2834 * Since this only runs when rebooting, it is not interlocked.
2835 */
2836 while(!TAILQ_EMPTY(&mountlist)) {
2837 mp = TAILQ_LAST(&mountlist, mntlist);
2838 error = dounmount(mp, MNT_FORCE, td);
2839 if (error) {
2840 TAILQ_REMOVE(&mountlist, mp, mnt_list);
2841 /*
2842 * XXX: Due to the way in which we mount the root
2843 * file system off of devfs, devfs will generate a
2844 * "busy" warning when we try to unmount it before
2845 * the root. Don't print a warning as a result in
2846 * order to avoid false positive errors that may
2847 * cause needless upset.
2848 */
2849 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
2850 printf("unmount of %s failed (",
2851 mp->mnt_stat.f_mntonname);
2852 if (error == EBUSY)
2853 printf("BUSY)\n");
2854 else
2855 printf("%d)\n", error);
2856 }
2857 } else {
2858 /* The unmount has removed mp from the mountlist */
2859 }
2860 }
2861 }
2862
2863 /*
2864 * perform msync on all vnodes under a mount point
2865 * the mount point must be locked.
2866 */
2867 void
2868 vfs_msync(struct mount *mp, int flags)
2869 {
2870 struct vnode *vp, *mvp;
2871 struct vm_object *obj;
2872
2873 MNT_ILOCK(mp);
2874 MNT_VNODE_FOREACH(vp, mp, mvp) {
2875 VI_LOCK(vp);
2876 if ((vp->v_iflag & VI_OBJDIRTY) &&
2877 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
2878 MNT_IUNLOCK(mp);
2879 if (!vget(vp,
2880 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
2881 curthread)) {
2882 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
2883 vput(vp);
2884 MNT_ILOCK(mp);
2885 continue;
2886 }
2887
2888 obj = vp->v_object;
2889 if (obj != NULL) {
2890 VM_OBJECT_LOCK(obj);
2891 vm_object_page_clean(obj, 0, 0,
2892 flags == MNT_WAIT ?
2893 OBJPC_SYNC : OBJPC_NOSYNC);
2894 VM_OBJECT_UNLOCK(obj);
2895 }
2896 vput(vp);
2897 }
2898 MNT_ILOCK(mp);
2899 } else
2900 VI_UNLOCK(vp);
2901 }
2902 MNT_IUNLOCK(mp);
2903 }
2904
2905 /*
2906 * Mark a vnode as free, putting it up for recycling.
2907 */
2908 static void
2909 vfree(struct vnode *vp)
2910 {
2911
2912 CTR1(KTR_VFS, "vfree vp %p", vp);
2913 ASSERT_VI_LOCKED(vp, "vfree");
2914 mtx_lock(&vnode_free_list_mtx);
2915 VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
2916 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
2917 VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
2918 VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
2919 ("vfree: Freeing doomed vnode"));
2920 if (vp->v_iflag & VI_AGE) {
2921 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2922 } else {
2923 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2924 }
2925 freevnodes++;
2926 vp->v_iflag &= ~VI_AGE;
2927 vp->v_iflag |= VI_FREE;
2928 mtx_unlock(&vnode_free_list_mtx);
2929 }
2930
2931 /*
2932 * Opposite of vfree() - mark a vnode as in use.
2933 */
2934 static void
2935 vbusy(struct vnode *vp)
2936 {
2937 CTR1(KTR_VFS, "vbusy vp %p", vp);
2938 ASSERT_VI_LOCKED(vp, "vbusy");
2939 VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
2940 VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
2941
2942 mtx_lock(&vnode_free_list_mtx);
2943 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2944 freevnodes--;
2945 vp->v_iflag &= ~(VI_FREE|VI_AGE);
2946 mtx_unlock(&vnode_free_list_mtx);
2947 }
2948
2949 /*
2950 * Initalize per-vnode helper structure to hold poll-related state.
2951 */
2952 void
2953 v_addpollinfo(struct vnode *vp)
2954 {
2955 struct vpollinfo *vi;
2956
2957 vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
2958 if (vp->v_pollinfo != NULL) {
2959 uma_zfree(vnodepoll_zone, vi);
2960 return;
2961 }
2962 vp->v_pollinfo = vi;
2963 mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
2964 knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note, vp, vfs_knllock,
2965 vfs_knlunlock, vfs_knllocked);
2966 }
2967
2968 /*
2969 * Record a process's interest in events which might happen to
2970 * a vnode. Because poll uses the historic select-style interface
2971 * internally, this routine serves as both the ``check for any
2972 * pending events'' and the ``record my interest in future events''
2973 * functions. (These are done together, while the lock is held,
2974 * to avoid race conditions.)
2975 */
2976 int
2977 vn_pollrecord(vp, td, events)
2978 struct vnode *vp;
2979 struct thread *td;
2980 short events;
2981 {
2982
2983 if (vp->v_pollinfo == NULL)
2984 v_addpollinfo(vp);
2985 mtx_lock(&vp->v_pollinfo->vpi_lock);
2986 if (vp->v_pollinfo->vpi_revents & events) {
2987 /*
2988 * This leaves events we are not interested
2989 * in available for the other process which
2990 * which presumably had requested them
2991 * (otherwise they would never have been
2992 * recorded).
2993 */
2994 events &= vp->v_pollinfo->vpi_revents;
2995 vp->v_pollinfo->vpi_revents &= ~events;
2996
2997 mtx_unlock(&vp->v_pollinfo->vpi_lock);
2998 return events;
2999 }
3000 vp->v_pollinfo->vpi_events |= events;
3001 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3002 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3003 return 0;
3004 }
3005
3006 /*
3007 * Routine to create and manage a filesystem syncer vnode.
3008 */
3009 #define sync_close ((int (*)(struct vop_close_args *))nullop)
3010 static int sync_fsync(struct vop_fsync_args *);
3011 static int sync_inactive(struct vop_inactive_args *);
3012 static int sync_reclaim(struct vop_reclaim_args *);
3013
3014 static struct vop_vector sync_vnodeops = {
3015 .vop_bypass = VOP_EOPNOTSUPP,
3016 .vop_close = sync_close, /* close */
3017 .vop_fsync = sync_fsync, /* fsync */
3018 .vop_inactive = sync_inactive, /* inactive */
3019 .vop_reclaim = sync_reclaim, /* reclaim */
3020 .vop_lock = vop_stdlock, /* lock */
3021 .vop_unlock = vop_stdunlock, /* unlock */
3022 .vop_islocked = vop_stdislocked, /* islocked */
3023 };
3024
3025 /*
3026 * Create a new filesystem syncer vnode for the specified mount point.
3027 */
3028 int
3029 vfs_allocate_syncvnode(mp)
3030 struct mount *mp;
3031 {
3032 struct vnode *vp;
3033 static long start, incr, next;
3034 int error;
3035
3036 /* Allocate a new vnode */
3037 if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3038 mp->mnt_syncer = NULL;
3039 return (error);
3040 }
3041 vp->v_type = VNON;
3042 /*
3043 * Place the vnode onto the syncer worklist. We attempt to
3044 * scatter them about on the list so that they will go off
3045 * at evenly distributed times even if all the filesystems
3046 * are mounted at once.
3047 */
3048 next += incr;
3049 if (next == 0 || next > syncer_maxdelay) {
3050 start /= 2;
3051 incr /= 2;
3052 if (start == 0) {
3053 start = syncer_maxdelay / 2;
3054 incr = syncer_maxdelay;
3055 }
3056 next = start;
3057 }
3058 VI_LOCK(vp);
3059 vn_syncer_add_to_worklist(&vp->v_bufobj,
3060 syncdelay > 0 ? next % syncdelay : 0);
3061 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3062 mtx_lock(&sync_mtx);
3063 sync_vnode_count++;
3064 mtx_unlock(&sync_mtx);
3065 VI_UNLOCK(vp);
3066 mp->mnt_syncer = vp;
3067 return (0);
3068 }
3069
3070 /*
3071 * Do a lazy sync of the filesystem.
3072 */
3073 static int
3074 sync_fsync(ap)
3075 struct vop_fsync_args /* {
3076 struct vnode *a_vp;
3077 struct ucred *a_cred;
3078 int a_waitfor;
3079 struct thread *a_td;
3080 } */ *ap;
3081 {
3082 struct vnode *syncvp = ap->a_vp;
3083 struct mount *mp = syncvp->v_mount;
3084 struct thread *td = ap->a_td;
3085 int error, asyncflag;
3086 struct bufobj *bo;
3087
3088 /*
3089 * We only need to do something if this is a lazy evaluation.
3090 */
3091 if (ap->a_waitfor != MNT_LAZY)
3092 return (0);
3093
3094 /*
3095 * Move ourselves to the back of the sync list.
3096 */
3097 bo = &syncvp->v_bufobj;
3098 BO_LOCK(bo);
3099 vn_syncer_add_to_worklist(bo, syncdelay);
3100 BO_UNLOCK(bo);
3101
3102 /*
3103 * Walk the list of vnodes pushing all that are dirty and
3104 * not already on the sync list.
3105 */
3106 mtx_lock(&mountlist_mtx);
3107 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3108 mtx_unlock(&mountlist_mtx);
3109 return (0);
3110 }
3111 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3112 vfs_unbusy(mp, td);
3113 return (0);
3114 }
3115 MNT_ILOCK(mp);
3116 asyncflag = mp->mnt_flag & MNT_ASYNC;
3117 mp->mnt_flag &= ~MNT_ASYNC;
3118 MNT_IUNLOCK(mp);
3119 vfs_msync(mp, MNT_NOWAIT);
3120 error = VFS_SYNC(mp, MNT_LAZY, td);
3121 MNT_ILOCK(mp);
3122 if (asyncflag)
3123 mp->mnt_flag |= MNT_ASYNC;
3124 MNT_IUNLOCK(mp);
3125 vn_finished_write(mp);
3126 vfs_unbusy(mp, td);
3127 return (error);
3128 }
3129
3130 /*
3131 * The syncer vnode is no referenced.
3132 */
3133 static int
3134 sync_inactive(ap)
3135 struct vop_inactive_args /* {
3136 struct vnode *a_vp;
3137 struct thread *a_td;
3138 } */ *ap;
3139 {
3140
3141 vgone(ap->a_vp);
3142 return (0);
3143 }
3144
3145 /*
3146 * The syncer vnode is no longer needed and is being decommissioned.
3147 *
3148 * Modifications to the worklist must be protected by sync_mtx.
3149 */
3150 static int
3151 sync_reclaim(ap)
3152 struct vop_reclaim_args /* {
3153 struct vnode *a_vp;
3154 } */ *ap;
3155 {
3156 struct vnode *vp = ap->a_vp;
3157 struct bufobj *bo;
3158
3159 VI_LOCK(vp);
3160 bo = &vp->v_bufobj;
3161 vp->v_mount->mnt_syncer = NULL;
3162 if (bo->bo_flag & BO_ONWORKLST) {
3163 mtx_lock(&sync_mtx);
3164 LIST_REMOVE(bo, bo_synclist);
3165 syncer_worklist_len--;
3166 sync_vnode_count--;
3167 mtx_unlock(&sync_mtx);
3168 bo->bo_flag &= ~BO_ONWORKLST;
3169 }
3170 VI_UNLOCK(vp);
3171
3172 return (0);
3173 }
3174
3175 /*
3176 * Check if vnode represents a disk device
3177 */
3178 int
3179 vn_isdisk(vp, errp)
3180 struct vnode *vp;
3181 int *errp;
3182 {
3183 int error;
3184
3185 error = 0;
3186 dev_lock();
3187 if (vp->v_type != VCHR)
3188 error = ENOTBLK;
3189 else if (vp->v_rdev == NULL)
3190 error = ENXIO;
3191 else if (vp->v_rdev->si_devsw == NULL)
3192 error = ENXIO;
3193 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3194 error = ENOTBLK;
3195 dev_unlock();
3196 if (errp != NULL)
3197 *errp = error;
3198 return (error == 0);
3199 }
3200
3201 /*
3202 * Common filesystem object access control check routine. Accepts a
3203 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3204 * and optional call-by-reference privused argument allowing vaccess()
3205 * to indicate to the caller whether privilege was used to satisfy the
3206 * request (obsoleted). Returns 0 on success, or an errno on failure.
3207 */
3208 int
3209 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3210 enum vtype type;
3211 mode_t file_mode;
3212 uid_t file_uid;
3213 gid_t file_gid;
3214 mode_t acc_mode;
3215 struct ucred *cred;
3216 int *privused;
3217 {
3218 mode_t dac_granted;
3219 #ifdef CAPABILITIES
3220 mode_t cap_granted;
3221 #endif
3222
3223 /*
3224 * Look for a normal, non-privileged way to access the file/directory
3225 * as requested. If it exists, go with that.
3226 */
3227
3228 if (privused != NULL)
3229 *privused = 0;
3230
3231 dac_granted = 0;
3232
3233 /* Check the owner. */
3234 if (cred->cr_uid == file_uid) {
3235 dac_granted |= VADMIN;
3236 if (file_mode & S_IXUSR)
3237 dac_granted |= VEXEC;
3238 if (file_mode & S_IRUSR)
3239 dac_granted |= VREAD;
3240 if (file_mode & S_IWUSR)
3241 dac_granted |= (VWRITE | VAPPEND);
3242
3243 if ((acc_mode & dac_granted) == acc_mode)
3244 return (0);
3245
3246 goto privcheck;
3247 }
3248
3249 /* Otherwise, check the groups (first match) */
3250 if (groupmember(file_gid, cred)) {
3251 if (file_mode & S_IXGRP)
3252 dac_granted |= VEXEC;
3253 if (file_mode & S_IRGRP)
3254 dac_granted |= VREAD;
3255 if (file_mode & S_IWGRP)
3256 dac_granted |= (VWRITE | VAPPEND);
3257
3258 if ((acc_mode & dac_granted) == acc_mode)
3259 return (0);
3260
3261 goto privcheck;
3262 }
3263
3264 /* Otherwise, check everyone else. */
3265 if (file_mode & S_IXOTH)
3266 dac_granted |= VEXEC;
3267 if (file_mode & S_IROTH)
3268 dac_granted |= VREAD;
3269 if (file_mode & S_IWOTH)
3270 dac_granted |= (VWRITE | VAPPEND);
3271 if ((acc_mode & dac_granted) == acc_mode)
3272 return (0);
3273
3274 privcheck:
3275 if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
3276 /* XXX audit: privilege used */
3277 if (privused != NULL)
3278 *privused = 1;
3279 return (0);
3280 }
3281
3282 #ifdef CAPABILITIES
3283 /*
3284 * Build a capability mask to determine if the set of capabilities
3285 * satisfies the requirements when combined with the granted mask
3286 * from above.
3287 * For each capability, if the capability is required, bitwise
3288 * or the request type onto the cap_granted mask.
3289 */
3290 cap_granted = 0;
3291
3292 if (type == VDIR) {
3293 /*
3294 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3295 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3296 */
3297 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3298 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3299 cap_granted |= VEXEC;
3300 } else {
3301 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3302 !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
3303 cap_granted |= VEXEC;
3304 }
3305
3306 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3307 !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3308 cap_granted |= VREAD;
3309
3310 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3311 !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
3312 cap_granted |= (VWRITE | VAPPEND);
3313
3314 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3315 !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
3316 cap_granted |= VADMIN;
3317
3318 if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3319 /* XXX audit: privilege used */
3320 if (privused != NULL)
3321 *privused = 1;
3322 return (0);
3323 }
3324 #endif
3325
3326 return ((acc_mode & VADMIN) ? EPERM : EACCES);
3327 }
3328
3329 /*
3330 * Credential check based on process requesting service, and per-attribute
3331 * permissions.
3332 */
3333 int
3334 extattr_check_cred(struct vnode *vp, int attrnamespace,
3335 struct ucred *cred, struct thread *td, int access)
3336 {
3337
3338 /*
3339 * Kernel-invoked always succeeds.
3340 */
3341 if (cred == NOCRED)
3342 return (0);
3343
3344 /*
3345 * Do not allow privileged processes in jail to directly
3346 * manipulate system attributes.
3347 *
3348 * XXX What capability should apply here?
3349 * Probably CAP_SYS_SETFFLAG.
3350 */
3351 switch (attrnamespace) {
3352 case EXTATTR_NAMESPACE_SYSTEM:
3353 /* Potentially should be: return (EPERM); */
3354 return (suser_cred(cred, 0));
3355 case EXTATTR_NAMESPACE_USER:
3356 return (VOP_ACCESS(vp, access, cred, td));
3357 default:
3358 return (EPERM);
3359 }
3360 }
3361
3362 #ifdef DEBUG_VFS_LOCKS
3363 /*
3364 * This only exists to supress warnings from unlocked specfs accesses. It is
3365 * no longer ok to have an unlocked VFS.
3366 */
3367 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3368
3369 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
3370 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3371
3372 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
3373 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3374
3375 int vfs_badlock_print = 1; /* Print lock violations. */
3376 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3377
3378 #ifdef KDB
3379 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
3380 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3381 #endif
3382
3383 static void
3384 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3385 {
3386
3387 #ifdef KDB
3388 if (vfs_badlock_backtrace)
3389 kdb_backtrace();
3390 #endif
3391 if (vfs_badlock_print)
3392 printf("%s: %p %s\n", str, (void *)vp, msg);
3393 if (vfs_badlock_ddb)
3394 kdb_enter("lock violation");
3395 }
3396
3397 void
3398 assert_vi_locked(struct vnode *vp, const char *str)
3399 {
3400
3401 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3402 vfs_badlock("interlock is not locked but should be", str, vp);
3403 }
3404
3405 void
3406 assert_vi_unlocked(struct vnode *vp, const char *str)
3407 {
3408
3409 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3410 vfs_badlock("interlock is locked but should not be", str, vp);
3411 }
3412
3413 void
3414 assert_vop_locked(struct vnode *vp, const char *str)
3415 {
3416
3417 if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3418 vfs_badlock("is not locked but should be", str, vp);
3419 }
3420
3421 void
3422 assert_vop_unlocked(struct vnode *vp, const char *str)
3423 {
3424
3425 if (vp && !IGNORE_LOCK(vp) &&
3426 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3427 vfs_badlock("is locked but should not be", str, vp);
3428 }
3429
3430 void
3431 assert_vop_elocked(struct vnode *vp, const char *str)
3432 {
3433
3434 if (vp && !IGNORE_LOCK(vp) &&
3435 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3436 vfs_badlock("is not exclusive locked but should be", str, vp);
3437 }
3438
3439 #if 0
3440 void
3441 assert_vop_elocked_other(struct vnode *vp, const char *str)
3442 {
3443
3444 if (vp && !IGNORE_LOCK(vp) &&
3445 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3446 vfs_badlock("is not exclusive locked by another thread",
3447 str, vp);
3448 }
3449
3450 void
3451 assert_vop_slocked(struct vnode *vp, const char *str)
3452 {
3453
3454 if (vp && !IGNORE_LOCK(vp) &&
3455 VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3456 vfs_badlock("is not locked shared but should be", str, vp);
3457 }
3458 #endif /* 0 */
3459 #endif /* DEBUG_VFS_LOCKS */
3460
3461 void
3462 vop_rename_pre(void *ap)
3463 {
3464 struct vop_rename_args *a = ap;
3465
3466 #ifdef DEBUG_VFS_LOCKS
3467 if (a->a_tvp)
3468 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3469 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3470 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3471 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3472
3473 /* Check the source (from). */
3474 if (a->a_tdvp != a->a_fdvp)
3475 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3476 if (a->a_tvp != a->a_fvp)
3477 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3478
3479 /* Check the target. */
3480 if (a->a_tvp)
3481 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3482 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3483 #endif
3484 if (a->a_tdvp != a->a_fdvp)
3485 vhold(a->a_fdvp);
3486 if (a->a_tvp != a->a_fvp)
3487 vhold(a->a_fvp);
3488 vhold(a->a_tdvp);
3489 if (a->a_tvp)
3490 vhold(a->a_tvp);
3491 }
3492
3493 void
3494 vop_strategy_pre(void *ap)
3495 {
3496 #ifdef DEBUG_VFS_LOCKS
3497 struct vop_strategy_args *a;
3498 struct buf *bp;
3499
3500 a = ap;
3501 bp = a->a_bp;
3502
3503 /*
3504 * Cluster ops lock their component buffers but not the IO container.
3505 */
3506 if ((bp->b_flags & B_CLUSTER) != 0)
3507 return;
3508
3509 if (BUF_REFCNT(bp) < 1) {
3510 if (vfs_badlock_print)
3511 printf(
3512 "VOP_STRATEGY: bp is not locked but should be\n");
3513 if (vfs_badlock_ddb)
3514 kdb_enter("lock violation");
3515 }
3516 #endif
3517 }
3518
3519 void
3520 vop_lookup_pre(void *ap)
3521 {
3522 #ifdef DEBUG_VFS_LOCKS
3523 struct vop_lookup_args *a;
3524 struct vnode *dvp;
3525
3526 a = ap;
3527 dvp = a->a_dvp;
3528 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3529 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3530 #endif
3531 }
3532
3533 void
3534 vop_lookup_post(void *ap, int rc)
3535 {
3536 #ifdef DEBUG_VFS_LOCKS
3537 struct vop_lookup_args *a;
3538 struct vnode *dvp;
3539 struct vnode *vp;
3540
3541 a = ap;
3542 dvp = a->a_dvp;
3543 vp = *(a->a_vpp);
3544
3545 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3546 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3547
3548 if (!rc)
3549 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
3550 #endif
3551 }
3552
3553 void
3554 vop_lock_pre(void *ap)
3555 {
3556 #ifdef DEBUG_VFS_LOCKS
3557 struct vop_lock_args *a = ap;
3558
3559 if ((a->a_flags & LK_INTERLOCK) == 0)
3560 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3561 else
3562 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3563 #endif
3564 }
3565
3566 void
3567 vop_lock_post(void *ap, int rc)
3568 {
3569 #ifdef DEBUG_VFS_LOCKS
3570 struct vop_lock_args *a = ap;
3571
3572 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3573 if (rc == 0)
3574 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3575 #endif
3576 }
3577
3578 void
3579 vop_unlock_pre(void *ap)
3580 {
3581 #ifdef DEBUG_VFS_LOCKS
3582 struct vop_unlock_args *a = ap;
3583
3584 if (a->a_flags & LK_INTERLOCK)
3585 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3586 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3587 #endif
3588 }
3589
3590 void
3591 vop_unlock_post(void *ap, int rc)
3592 {
3593 #ifdef DEBUG_VFS_LOCKS
3594 struct vop_unlock_args *a = ap;
3595
3596 if (a->a_flags & LK_INTERLOCK)
3597 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3598 #endif
3599 }
3600
3601 void
3602 vop_create_post(void *ap, int rc)
3603 {
3604 struct vop_create_args *a = ap;
3605
3606 if (!rc)
3607 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3608 }
3609
3610 void
3611 vop_link_post(void *ap, int rc)
3612 {
3613 struct vop_link_args *a = ap;
3614
3615 if (!rc) {
3616 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
3617 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
3618 }
3619 }
3620
3621 void
3622 vop_mkdir_post(void *ap, int rc)
3623 {
3624 struct vop_mkdir_args *a = ap;
3625
3626 if (!rc)
3627 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3628 }
3629
3630 void
3631 vop_mknod_post(void *ap, int rc)
3632 {
3633 struct vop_mknod_args *a = ap;
3634
3635 if (!rc)
3636 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3637 }
3638
3639 void
3640 vop_remove_post(void *ap, int rc)
3641 {
3642 struct vop_remove_args *a = ap;
3643
3644 if (!rc) {
3645 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3646 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3647 }
3648 }
3649
3650 void
3651 vop_rename_post(void *ap, int rc)
3652 {
3653 struct vop_rename_args *a = ap;
3654
3655 if (!rc) {
3656 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
3657 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
3658 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
3659 if (a->a_tvp)
3660 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
3661 }
3662 if (a->a_tdvp != a->a_fdvp)
3663 vdrop(a->a_fdvp);
3664 if (a->a_tvp != a->a_fvp)
3665 vdrop(a->a_fvp);
3666 vdrop(a->a_tdvp);
3667 if (a->a_tvp)
3668 vdrop(a->a_tvp);
3669 }
3670
3671 void
3672 vop_rmdir_post(void *ap, int rc)
3673 {
3674 struct vop_rmdir_args *a = ap;
3675
3676 if (!rc) {
3677 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3678 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3679 }
3680 }
3681
3682 void
3683 vop_setattr_post(void *ap, int rc)
3684 {
3685 struct vop_setattr_args *a = ap;
3686
3687 if (!rc)
3688 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
3689 }
3690
3691 void
3692 vop_symlink_post(void *ap, int rc)
3693 {
3694 struct vop_symlink_args *a = ap;
3695
3696 if (!rc)
3697 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3698 }
3699
3700 static struct knlist fs_knlist;
3701
3702 static void
3703 vfs_event_init(void *arg)
3704 {
3705 knlist_init(&fs_knlist, NULL, NULL, NULL, NULL);
3706 }
3707 /* XXX - correct order? */
3708 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3709
3710 void
3711 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3712 {
3713
3714 KNOTE_UNLOCKED(&fs_knlist, event);
3715 }
3716
3717 static int filt_fsattach(struct knote *kn);
3718 static void filt_fsdetach(struct knote *kn);
3719 static int filt_fsevent(struct knote *kn, long hint);
3720
3721 struct filterops fs_filtops =
3722 { 0, filt_fsattach, filt_fsdetach, filt_fsevent };
3723
3724 static int
3725 filt_fsattach(struct knote *kn)
3726 {
3727
3728 kn->kn_flags |= EV_CLEAR;
3729 knlist_add(&fs_knlist, kn, 0);
3730 return (0);
3731 }
3732
3733 static void
3734 filt_fsdetach(struct knote *kn)
3735 {
3736
3737 knlist_remove(&fs_knlist, kn, 0);
3738 }
3739
3740 static int
3741 filt_fsevent(struct knote *kn, long hint)
3742 {
3743
3744 kn->kn_fflags |= hint;
3745 return (kn->kn_fflags != 0);
3746 }
3747
3748 static int
3749 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
3750 {
3751 struct vfsidctl vc;
3752 int error;
3753 struct mount *mp;
3754
3755 error = SYSCTL_IN(req, &vc, sizeof(vc));
3756 if (error)
3757 return (error);
3758 if (vc.vc_vers != VFS_CTL_VERS1)
3759 return (EINVAL);
3760 mp = vfs_getvfs(&vc.vc_fsid);
3761 if (mp == NULL)
3762 return (ENOENT);
3763 /* ensure that a specific sysctl goes to the right filesystem. */
3764 if (strcmp(vc.vc_fstypename, "*") != 0 &&
3765 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
3766 vfs_rel(mp);
3767 return (EINVAL);
3768 }
3769 VCTLTOREQ(&vc, req);
3770 error = VFS_SYSCTL(mp, vc.vc_op, req);
3771 vfs_rel(mp);
3772 return (error);
3773 }
3774
3775 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
3776 NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
3777
3778 /*
3779 * Function to initialize a va_filerev field sensibly.
3780 * XXX: Wouldn't a random number make a lot more sense ??
3781 */
3782 u_quad_t
3783 init_va_filerev(void)
3784 {
3785 struct bintime bt;
3786
3787 getbinuptime(&bt);
3788 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
3789 }
3790
3791 static int filt_vfsread(struct knote *kn, long hint);
3792 static int filt_vfswrite(struct knote *kn, long hint);
3793 static int filt_vfsvnode(struct knote *kn, long hint);
3794 static void filt_vfsdetach(struct knote *kn);
3795 static struct filterops vfsread_filtops =
3796 { 1, NULL, filt_vfsdetach, filt_vfsread };
3797 static struct filterops vfswrite_filtops =
3798 { 1, NULL, filt_vfsdetach, filt_vfswrite };
3799 static struct filterops vfsvnode_filtops =
3800 { 1, NULL, filt_vfsdetach, filt_vfsvnode };
3801
3802 static void
3803 vfs_knllock(void *arg)
3804 {
3805 struct vnode *vp = arg;
3806
3807 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
3808 }
3809
3810 static void
3811 vfs_knlunlock(void *arg)
3812 {
3813 struct vnode *vp = arg;
3814
3815 VOP_UNLOCK(vp, 0, curthread);
3816 }
3817
3818 static int
3819 vfs_knllocked(void *arg)
3820 {
3821 struct vnode *vp = arg;
3822
3823 return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE);
3824 }
3825
3826 int
3827 vfs_kqfilter(struct vop_kqfilter_args *ap)
3828 {
3829 struct vnode *vp = ap->a_vp;
3830 struct knote *kn = ap->a_kn;
3831 struct knlist *knl;
3832
3833 switch (kn->kn_filter) {
3834 case EVFILT_READ:
3835 kn->kn_fop = &vfsread_filtops;
3836 break;
3837 case EVFILT_WRITE:
3838 kn->kn_fop = &vfswrite_filtops;
3839 break;
3840 case EVFILT_VNODE:
3841 kn->kn_fop = &vfsvnode_filtops;
3842 break;
3843 default:
3844 return (EINVAL);
3845 }
3846
3847 kn->kn_hook = (caddr_t)vp;
3848
3849 if (vp->v_pollinfo == NULL)
3850 v_addpollinfo(vp);
3851 if (vp->v_pollinfo == NULL)
3852 return (ENOMEM);
3853 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
3854 knlist_add(knl, kn, 0);
3855
3856 return (0);
3857 }
3858
3859 /*
3860 * Detach knote from vnode
3861 */
3862 static void
3863 filt_vfsdetach(struct knote *kn)
3864 {
3865 struct vnode *vp = (struct vnode *)kn->kn_hook;
3866
3867 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
3868 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
3869 }
3870
3871 /*ARGSUSED*/
3872 static int
3873 filt_vfsread(struct knote *kn, long hint)
3874 {
3875 struct vnode *vp = (struct vnode *)kn->kn_hook;
3876 struct vattr va;
3877
3878 /*
3879 * filesystem is gone, so set the EOF flag and schedule
3880 * the knote for deletion.
3881 */
3882 if (hint == NOTE_REVOKE) {
3883 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3884 return (1);
3885 }
3886
3887 if (VOP_GETATTR(vp, &va, curthread->td_ucred, curthread))
3888 return (0);
3889
3890 kn->kn_data = va.va_size - kn->kn_fp->f_offset;
3891 return (kn->kn_data != 0);
3892 }
3893
3894 /*ARGSUSED*/
3895 static int
3896 filt_vfswrite(struct knote *kn, long hint)
3897 {
3898 /*
3899 * filesystem is gone, so set the EOF flag and schedule
3900 * the knote for deletion.
3901 */
3902 if (hint == NOTE_REVOKE)
3903 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3904
3905 kn->kn_data = 0;
3906 return (1);
3907 }
3908
3909 static int
3910 filt_vfsvnode(struct knote *kn, long hint)
3911 {
3912 if (kn->kn_sfflags & hint)
3913 kn->kn_fflags |= hint;
3914 if (hint == NOTE_REVOKE) {
3915 kn->kn_flags |= EV_EOF;
3916 return (1);
3917 }
3918 return (kn->kn_fflags != 0);
3919 }
3920
3921 int
3922 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
3923 {
3924 int error;
3925
3926 if (dp->d_reclen > ap->a_uio->uio_resid)
3927 return (ENAMETOOLONG);
3928 error = uiomove(dp, dp->d_reclen, ap->a_uio);
3929 if (error) {
3930 if (ap->a_ncookies != NULL) {
3931 if (ap->a_cookies != NULL)
3932 free(ap->a_cookies, M_TEMP);
3933 ap->a_cookies = NULL;
3934 *ap->a_ncookies = 0;
3935 }
3936 return (error);
3937 }
3938 if (ap->a_ncookies == NULL)
3939 return (0);
3940
3941 KASSERT(ap->a_cookies,
3942 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
3943
3944 *ap->a_cookies = realloc(*ap->a_cookies,
3945 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
3946 (*ap->a_cookies)[*ap->a_ncookies] = off;
3947 return (0);
3948 }
3949
3950 /*
3951 * Mark for update the access time of the file if the filesystem
3952 * supports VA_MARK_ATIME. This functionality is used by execve
3953 * and mmap, so we want to avoid the synchronous I/O implied by
3954 * directly setting va_atime for the sake of efficiency.
3955 */
3956 void
3957 vfs_mark_atime(struct vnode *vp, struct thread *td)
3958 {
3959 struct vattr atimeattr;
3960
3961 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) {
3962 VATTR_NULL(&atimeattr);
3963 atimeattr.va_vaflags |= VA_MARK_ATIME;
3964 (void)VOP_SETATTR(vp, &atimeattr, td->td_ucred, td);
3965 }
3966 }
Cache object: 813d2f1dabe90d2745d0e76c3720788e
|