FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c
1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
35 */
36
37 /*
38 * External virtual filesystem routines
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/dirent.h>
53 #include <sys/event.h>
54 #include <sys/eventhandler.h>
55 #include <sys/extattr.h>
56 #include <sys/file.h>
57 #include <sys/fcntl.h>
58 #include <sys/jail.h>
59 #include <sys/kdb.h>
60 #include <sys/kernel.h>
61 #include <sys/kthread.h>
62 #include <sys/lockf.h>
63 #include <sys/malloc.h>
64 #include <sys/mount.h>
65 #include <sys/namei.h>
66 #include <sys/priv.h>
67 #include <sys/reboot.h>
68 #include <sys/sleepqueue.h>
69 #include <sys/stat.h>
70 #include <sys/sysctl.h>
71 #include <sys/syslog.h>
72 #include <sys/vmmeter.h>
73 #include <sys/vnode.h>
74
75 #include <machine/stdarg.h>
76
77 #include <security/mac/mac_framework.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_object.h>
81 #include <vm/vm_extern.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_kern.h>
86 #include <vm/uma.h>
87
88 #ifdef DDB
89 #include <ddb/ddb.h>
90 #endif
91
92 static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure");
93
94 static void delmntque(struct vnode *vp);
95 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
96 int slpflag, int slptimeo);
97 static void syncer_shutdown(void *arg, int howto);
98 static int vtryrecycle(struct vnode *vp);
99 static void vbusy(struct vnode *vp);
100 static void vinactive(struct vnode *, struct thread *);
101 static void v_incr_usecount(struct vnode *);
102 static void v_decr_usecount(struct vnode *);
103 static void v_decr_useonly(struct vnode *);
104 static void v_upgrade_usecount(struct vnode *);
105 static void vfree(struct vnode *);
106 static void vnlru_free(int);
107 static void vdestroy(struct vnode *);
108 static void vgonel(struct vnode *);
109 static void vfs_knllock(void *arg);
110 static void vfs_knlunlock(void *arg);
111 static int vfs_knllocked(void *arg);
112 static void destroy_vpollinfo(struct vpollinfo *vi);
113
114 /*
115 * Enable Giant pushdown based on whether or not the vm is mpsafe in this
116 * build. Without mpsafevm the buffer cache can not run Giant free.
117 */
118 int mpsafe_vfs = 1;
119 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs);
120 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0,
121 "MPSAFE VFS");
122
123 /*
124 * Number of vnodes in existence. Increased whenever getnewvnode()
125 * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
126 * vnode.
127 */
128 static unsigned long numvnodes;
129
130 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
131
132 /*
133 * Conversion tables for conversion from vnode types to inode formats
134 * and back.
135 */
136 enum vtype iftovt_tab[16] = {
137 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
138 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
139 };
140 int vttoif_tab[10] = {
141 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
142 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
143 };
144
145 /*
146 * List of vnodes that are ready for recycling.
147 */
148 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
149
150 /*
151 * Free vnode target. Free vnodes may simply be files which have been stat'd
152 * but not read. This is somewhat common, and a small cache of such files
153 * should be kept to avoid recreation costs.
154 */
155 static u_long wantfreevnodes;
156 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
157 /* Number of vnodes in the free list. */
158 static u_long freevnodes;
159 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
160
161 /*
162 * Various variables used for debugging the new implementation of
163 * reassignbuf().
164 * XXX these are probably of (very) limited utility now.
165 */
166 static int reassignbufcalls;
167 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
168
169 /*
170 * Cache for the mount type id assigned to NFS. This is used for
171 * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
172 */
173 int nfs_mount_type = -1;
174
175 /* To keep more than one thread at a time from running vfs_getnewfsid */
176 static struct mtx mntid_mtx;
177
178 /*
179 * Lock for any access to the following:
180 * vnode_free_list
181 * numvnodes
182 * freevnodes
183 */
184 static struct mtx vnode_free_list_mtx;
185
186 /* Publicly exported FS */
187 struct nfs_public nfs_pub;
188
189 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
190 static uma_zone_t vnode_zone;
191 static uma_zone_t vnodepoll_zone;
192
193 /* Set to 1 to print out reclaim of active vnodes */
194 int prtactive;
195
196 /*
197 * The workitem queue.
198 *
199 * It is useful to delay writes of file data and filesystem metadata
200 * for tens of seconds so that quickly created and deleted files need
201 * not waste disk bandwidth being created and removed. To realize this,
202 * we append vnodes to a "workitem" queue. When running with a soft
203 * updates implementation, most pending metadata dependencies should
204 * not wait for more than a few seconds. Thus, mounted on block devices
205 * are delayed only about a half the time that file data is delayed.
206 * Similarly, directory updates are more critical, so are only delayed
207 * about a third the time that file data is delayed. Thus, there are
208 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
209 * one each second (driven off the filesystem syncer process). The
210 * syncer_delayno variable indicates the next queue that is to be processed.
211 * Items that need to be processed soon are placed in this queue:
212 *
213 * syncer_workitem_pending[syncer_delayno]
214 *
215 * A delay of fifteen seconds is done by placing the request fifteen
216 * entries later in the queue:
217 *
218 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
219 *
220 */
221 static int syncer_delayno;
222 static long syncer_mask;
223 LIST_HEAD(synclist, bufobj);
224 static struct synclist *syncer_workitem_pending;
225 /*
226 * The sync_mtx protects:
227 * bo->bo_synclist
228 * sync_vnode_count
229 * syncer_delayno
230 * syncer_state
231 * syncer_workitem_pending
232 * syncer_worklist_len
233 * rushjob
234 */
235 static struct mtx sync_mtx;
236
237 #define SYNCER_MAXDELAY 32
238 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
239 static int syncdelay = 30; /* max time to delay syncing data */
240 static int filedelay = 30; /* time to delay syncing files */
241 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
242 static int dirdelay = 29; /* time to delay syncing directories */
243 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
244 static int metadelay = 28; /* time to delay syncing metadata */
245 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
246 static int rushjob; /* number of slots to run ASAP */
247 static int stat_rush_requests; /* number of times I/O speeded up */
248 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
249
250 /*
251 * When shutting down the syncer, run it at four times normal speed.
252 */
253 #define SYNCER_SHUTDOWN_SPEEDUP 4
254 static int sync_vnode_count;
255 static int syncer_worklist_len;
256 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
257 syncer_state;
258
259 /*
260 * Number of vnodes we want to exist at any one time. This is mostly used
261 * to size hash tables in vnode-related code. It is normally not used in
262 * getnewvnode(), as wantfreevnodes is normally nonzero.)
263 *
264 * XXX desiredvnodes is historical cruft and should not exist.
265 */
266 int desiredvnodes;
267 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
268 &desiredvnodes, 0, "Maximum number of vnodes");
269 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
270 &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
271 static int vnlru_nowhere;
272 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
273 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
274
275 /*
276 * Macros to control when a vnode is freed and recycled. All require
277 * the vnode interlock.
278 */
279 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
280 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
281 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
282
283
284 /*
285 * Initialize the vnode management data structures.
286 */
287 #ifndef MAXVNODES_MAX
288 #define MAXVNODES_MAX 100000
289 #endif
290 static void
291 vntblinit(void *dummy __unused)
292 {
293
294 /*
295 * Desiredvnodes is a function of the physical memory size and
296 * the kernel's heap size. Specifically, desiredvnodes scales
297 * in proportion to the physical memory size until two fifths
298 * of the kernel's heap size is consumed by vnodes and vm
299 * objects.
300 */
301 desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
302 (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
303 if (desiredvnodes > MAXVNODES_MAX) {
304 if (bootverbose)
305 printf("Reducing kern.maxvnodes %d -> %d\n",
306 desiredvnodes, MAXVNODES_MAX);
307 desiredvnodes = MAXVNODES_MAX;
308 }
309 wantfreevnodes = desiredvnodes / 4;
310 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
311 TAILQ_INIT(&vnode_free_list);
312 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
313 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
314 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
315 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
316 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
317 /*
318 * Initialize the filesystem syncer.
319 */
320 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
321 &syncer_mask);
322 syncer_maxdelay = syncer_mask + 1;
323 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
324 }
325 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
326
327
328 /*
329 * Mark a mount point as busy. Used to synchronize access and to delay
330 * unmounting. Interlock is not released on failure.
331 */
332 int
333 vfs_busy(struct mount *mp, int flags, struct mtx *interlkp,
334 struct thread *td)
335 {
336 int lkflags;
337
338 MNT_ILOCK(mp);
339 MNT_REF(mp);
340 while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
341 if (flags & LK_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
342 MNT_REL(mp);
343 MNT_IUNLOCK(mp);
344 return (ENOENT);
345 }
346 if (interlkp)
347 mtx_unlock(interlkp);
348 mp->mnt_kern_flag |= MNTK_MWAIT;
349 /*
350 * Since all busy locks are shared except the exclusive
351 * lock granted when unmounting, the only place that a
352 * wakeup needs to be done is at the release of the
353 * exclusive lock at the end of dounmount.
354 */
355 msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
356 if (interlkp)
357 mtx_lock(interlkp);
358 }
359 if (interlkp)
360 mtx_unlock(interlkp);
361 lkflags = LK_SHARED | LK_INTERLOCK | LK_NOWAIT;
362 if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
363 panic("vfs_busy: unexpected lock failure");
364 return (0);
365 }
366
367 /*
368 * Free a busy filesystem.
369 */
370 void
371 vfs_unbusy(struct mount *mp, struct thread *td)
372 {
373
374 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
375 vfs_rel(mp);
376 }
377
378 /*
379 * Lookup a mount point by filesystem identifier.
380 */
381 struct mount *
382 vfs_getvfs(fsid_t *fsid)
383 {
384 struct mount *mp;
385
386 mtx_lock(&mountlist_mtx);
387 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
388 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
389 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
390 vfs_ref(mp);
391 mtx_unlock(&mountlist_mtx);
392 return (mp);
393 }
394 }
395 mtx_unlock(&mountlist_mtx);
396 return ((struct mount *) 0);
397 }
398
399 /*
400 * Check if a user can access privileged mount options.
401 */
402 int
403 vfs_suser(struct mount *mp, struct thread *td)
404 {
405 int error;
406
407 /*
408 * If the thread is jailed, but this is not a jail-friendly file
409 * system, deny immediately.
410 */
411 if (jailed(td->td_ucred) && !(mp->mnt_vfc->vfc_flags & VFCF_JAIL))
412 return (EPERM);
413
414 /*
415 * If the file system was mounted outside a jail and a jailed thread
416 * tries to access it, deny immediately.
417 */
418 if (!jailed(mp->mnt_cred) && jailed(td->td_ucred))
419 return (EPERM);
420
421 /*
422 * If the file system was mounted inside different jail that the jail of
423 * the calling thread, deny immediately.
424 */
425 if (jailed(mp->mnt_cred) && jailed(td->td_ucred) &&
426 mp->mnt_cred->cr_prison != td->td_ucred->cr_prison) {
427 return (EPERM);
428 }
429
430 if ((mp->mnt_flag & MNT_USER) == 0 ||
431 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
432 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
433 return (error);
434 }
435 return (0);
436 }
437
438 /*
439 * Get a new unique fsid. Try to make its val[0] unique, since this value
440 * will be used to create fake device numbers for stat(). Also try (but
441 * not so hard) make its val[0] unique mod 2^16, since some emulators only
442 * support 16-bit device numbers. We end up with unique val[0]'s for the
443 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
444 *
445 * Keep in mind that several mounts may be running in parallel. Starting
446 * the search one past where the previous search terminated is both a
447 * micro-optimization and a defense against returning the same fsid to
448 * different mounts.
449 */
450 void
451 vfs_getnewfsid(struct mount *mp)
452 {
453 static u_int16_t mntid_base;
454 struct mount *nmp;
455 fsid_t tfsid;
456 int mtype;
457
458 mtx_lock(&mntid_mtx);
459 mtype = mp->mnt_vfc->vfc_typenum;
460 tfsid.val[1] = mtype;
461 mtype = (mtype & 0xFF) << 24;
462 for (;;) {
463 tfsid.val[0] = makedev(255,
464 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
465 mntid_base++;
466 if ((nmp = vfs_getvfs(&tfsid)) == NULL)
467 break;
468 vfs_rel(nmp);
469 }
470 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
471 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
472 mtx_unlock(&mntid_mtx);
473 }
474
475 /*
476 * Knob to control the precision of file timestamps:
477 *
478 * 0 = seconds only; nanoseconds zeroed.
479 * 1 = seconds and nanoseconds, accurate within 1/HZ.
480 * 2 = seconds and nanoseconds, truncated to microseconds.
481 * >=3 = seconds and nanoseconds, maximum precision.
482 */
483 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
484
485 static int timestamp_precision = TSP_SEC;
486 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
487 ×tamp_precision, 0, "");
488
489 /*
490 * Get a current timestamp.
491 */
492 void
493 vfs_timestamp(struct timespec *tsp)
494 {
495 struct timeval tv;
496
497 switch (timestamp_precision) {
498 case TSP_SEC:
499 tsp->tv_sec = time_second;
500 tsp->tv_nsec = 0;
501 break;
502 case TSP_HZ:
503 getnanotime(tsp);
504 break;
505 case TSP_USEC:
506 microtime(&tv);
507 TIMEVAL_TO_TIMESPEC(&tv, tsp);
508 break;
509 case TSP_NSEC:
510 default:
511 nanotime(tsp);
512 break;
513 }
514 }
515
516 /*
517 * Set vnode attributes to VNOVAL
518 */
519 void
520 vattr_null(struct vattr *vap)
521 {
522
523 vap->va_type = VNON;
524 vap->va_size = VNOVAL;
525 vap->va_bytes = VNOVAL;
526 vap->va_mode = VNOVAL;
527 vap->va_nlink = VNOVAL;
528 vap->va_uid = VNOVAL;
529 vap->va_gid = VNOVAL;
530 vap->va_fsid = VNOVAL;
531 vap->va_fileid = VNOVAL;
532 vap->va_blocksize = VNOVAL;
533 vap->va_rdev = VNOVAL;
534 vap->va_atime.tv_sec = VNOVAL;
535 vap->va_atime.tv_nsec = VNOVAL;
536 vap->va_mtime.tv_sec = VNOVAL;
537 vap->va_mtime.tv_nsec = VNOVAL;
538 vap->va_ctime.tv_sec = VNOVAL;
539 vap->va_ctime.tv_nsec = VNOVAL;
540 vap->va_birthtime.tv_sec = VNOVAL;
541 vap->va_birthtime.tv_nsec = VNOVAL;
542 vap->va_flags = VNOVAL;
543 vap->va_gen = VNOVAL;
544 vap->va_vaflags = 0;
545 }
546
547 /*
548 * This routine is called when we have too many vnodes. It attempts
549 * to free <count> vnodes and will potentially free vnodes that still
550 * have VM backing store (VM backing store is typically the cause
551 * of a vnode blowout so we want to do this). Therefore, this operation
552 * is not considered cheap.
553 *
554 * A number of conditions may prevent a vnode from being reclaimed.
555 * the buffer cache may have references on the vnode, a directory
556 * vnode may still have references due to the namei cache representing
557 * underlying files, or the vnode may be in active use. It is not
558 * desireable to reuse such vnodes. These conditions may cause the
559 * number of vnodes to reach some minimum value regardless of what
560 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
561 */
562 static int
563 vlrureclaim(struct mount *mp)
564 {
565 struct thread *td;
566 struct vnode *vp;
567 int done;
568 int trigger;
569 int usevnodes;
570 int count;
571
572 /*
573 * Calculate the trigger point, don't allow user
574 * screwups to blow us up. This prevents us from
575 * recycling vnodes with lots of resident pages. We
576 * aren't trying to free memory, we are trying to
577 * free vnodes.
578 */
579 usevnodes = desiredvnodes;
580 if (usevnodes <= 0)
581 usevnodes = 1;
582 trigger = cnt.v_page_count * 2 / usevnodes;
583 done = 0;
584 td = curthread;
585 vn_start_write(NULL, &mp, V_WAIT);
586 MNT_ILOCK(mp);
587 count = mp->mnt_nvnodelistsize / 10 + 1;
588 while (count != 0) {
589 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
590 while (vp != NULL && vp->v_type == VMARKER)
591 vp = TAILQ_NEXT(vp, v_nmntvnodes);
592 if (vp == NULL)
593 break;
594 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
595 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
596 --count;
597 if (!VI_TRYLOCK(vp))
598 goto next_iter;
599 /*
600 * If it's been deconstructed already, it's still
601 * referenced, or it exceeds the trigger, skip it.
602 */
603 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
604 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
605 vp->v_object->resident_page_count > trigger)) {
606 VI_UNLOCK(vp);
607 goto next_iter;
608 }
609 MNT_IUNLOCK(mp);
610 vholdl(vp);
611 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT, td)) {
612 vdrop(vp);
613 goto next_iter_mntunlocked;
614 }
615 VI_LOCK(vp);
616 /*
617 * v_usecount may have been bumped after VOP_LOCK() dropped
618 * the vnode interlock and before it was locked again.
619 *
620 * It is not necessary to recheck VI_DOOMED because it can
621 * only be set by another thread that holds both the vnode
622 * lock and vnode interlock. If another thread has the
623 * vnode lock before we get to VOP_LOCK() and obtains the
624 * vnode interlock after VOP_LOCK() drops the vnode
625 * interlock, the other thread will be unable to drop the
626 * vnode lock before our VOP_LOCK() call fails.
627 */
628 if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
629 (vp->v_object != NULL &&
630 vp->v_object->resident_page_count > trigger)) {
631 VOP_UNLOCK(vp, LK_INTERLOCK, td);
632 goto next_iter_mntunlocked;
633 }
634 KASSERT((vp->v_iflag & VI_DOOMED) == 0,
635 ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
636 vgonel(vp);
637 VOP_UNLOCK(vp, 0, td);
638 vdropl(vp);
639 done++;
640 next_iter_mntunlocked:
641 if ((count % 256) != 0)
642 goto relock_mnt;
643 goto yield;
644 next_iter:
645 if ((count % 256) != 0)
646 continue;
647 MNT_IUNLOCK(mp);
648 yield:
649 uio_yield();
650 relock_mnt:
651 MNT_ILOCK(mp);
652 }
653 MNT_IUNLOCK(mp);
654 vn_finished_write(mp);
655 return done;
656 }
657
658 /*
659 * Attempt to keep the free list at wantfreevnodes length.
660 */
661 static void
662 vnlru_free(int count)
663 {
664 struct vnode *vp;
665 int vfslocked;
666
667 mtx_assert(&vnode_free_list_mtx, MA_OWNED);
668 for (; count > 0; count--) {
669 vp = TAILQ_FIRST(&vnode_free_list);
670 /*
671 * The list can be modified while the free_list_mtx
672 * has been dropped and vp could be NULL here.
673 */
674 if (!vp)
675 break;
676 VNASSERT(vp->v_op != NULL, vp,
677 ("vnlru_free: vnode already reclaimed."));
678 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
679 /*
680 * Don't recycle if we can't get the interlock.
681 */
682 if (!VI_TRYLOCK(vp)) {
683 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
684 continue;
685 }
686 VNASSERT(VCANRECYCLE(vp), vp,
687 ("vp inconsistent on freelist"));
688 freevnodes--;
689 vp->v_iflag &= ~VI_FREE;
690 vholdl(vp);
691 mtx_unlock(&vnode_free_list_mtx);
692 VI_UNLOCK(vp);
693 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
694 vtryrecycle(vp);
695 VFS_UNLOCK_GIANT(vfslocked);
696 /*
697 * If the recycled succeeded this vdrop will actually free
698 * the vnode. If not it will simply place it back on
699 * the free list.
700 */
701 vdrop(vp);
702 mtx_lock(&vnode_free_list_mtx);
703 }
704 }
705 /*
706 * Attempt to recycle vnodes in a context that is always safe to block.
707 * Calling vlrurecycle() from the bowels of filesystem code has some
708 * interesting deadlock problems.
709 */
710 static struct proc *vnlruproc;
711 static int vnlruproc_sig;
712
713 static void
714 vnlru_proc(void)
715 {
716 struct mount *mp, *nmp;
717 int done, vfslocked;
718 struct proc *p = vnlruproc;
719 struct thread *td = curthread;
720
721 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
722 SHUTDOWN_PRI_FIRST);
723
724 for (;;) {
725 kthread_suspend_check(p);
726 mtx_lock(&vnode_free_list_mtx);
727 if (freevnodes > wantfreevnodes)
728 vnlru_free(freevnodes - wantfreevnodes);
729 if (numvnodes <= desiredvnodes * 9 / 10) {
730 vnlruproc_sig = 0;
731 wakeup(&vnlruproc_sig);
732 msleep(vnlruproc, &vnode_free_list_mtx,
733 PVFS|PDROP, "vlruwt", hz);
734 continue;
735 }
736 mtx_unlock(&vnode_free_list_mtx);
737 done = 0;
738 mtx_lock(&mountlist_mtx);
739 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
740 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
741 nmp = TAILQ_NEXT(mp, mnt_list);
742 continue;
743 }
744 vfslocked = VFS_LOCK_GIANT(mp);
745 done += vlrureclaim(mp);
746 VFS_UNLOCK_GIANT(vfslocked);
747 mtx_lock(&mountlist_mtx);
748 nmp = TAILQ_NEXT(mp, mnt_list);
749 vfs_unbusy(mp, td);
750 }
751 mtx_unlock(&mountlist_mtx);
752 if (done == 0) {
753 EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
754 #if 0
755 /* These messages are temporary debugging aids */
756 if (vnlru_nowhere < 5)
757 printf("vnlru process getting nowhere..\n");
758 else if (vnlru_nowhere == 5)
759 printf("vnlru process messages stopped.\n");
760 #endif
761 vnlru_nowhere++;
762 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
763 } else
764 uio_yield();
765 }
766 }
767
768 static struct kproc_desc vnlru_kp = {
769 "vnlru",
770 vnlru_proc,
771 &vnlruproc
772 };
773 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
774 &vnlru_kp);
775
776 /*
777 * Routines having to do with the management of the vnode table.
778 */
779
780 static void
781 vdestroy(struct vnode *vp)
782 {
783 struct bufobj *bo;
784
785 CTR1(KTR_VFS, "vdestroy vp %p", vp);
786 mtx_lock(&vnode_free_list_mtx);
787 numvnodes--;
788 mtx_unlock(&vnode_free_list_mtx);
789 bo = &vp->v_bufobj;
790 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
791 ("cleaned vnode still on the free list."));
792 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
793 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
794 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
795 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
796 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
797 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
798 VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
799 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
800 VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
801 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
802 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
803 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
804 VI_UNLOCK(vp);
805 #ifdef MAC
806 mac_destroy_vnode(vp);
807 #endif
808 if (vp->v_pollinfo != NULL)
809 destroy_vpollinfo(vp->v_pollinfo);
810 #ifdef INVARIANTS
811 /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
812 vp->v_op = NULL;
813 #endif
814 lockdestroy(vp->v_vnlock);
815 mtx_destroy(&vp->v_interlock);
816 uma_zfree(vnode_zone, vp);
817 }
818
819 /*
820 * Try to recycle a freed vnode. We abort if anyone picks up a reference
821 * before we actually vgone(). This function must be called with the vnode
822 * held to prevent the vnode from being returned to the free list midway
823 * through vgone().
824 */
825 static int
826 vtryrecycle(struct vnode *vp)
827 {
828 struct thread *td = curthread;
829 struct mount *vnmp;
830
831 CTR1(KTR_VFS, "vtryrecycle: trying vp %p", vp);
832 VNASSERT(vp->v_holdcnt, vp,
833 ("vtryrecycle: Recycling vp %p without a reference.", vp));
834 /*
835 * This vnode may found and locked via some other list, if so we
836 * can't recycle it yet.
837 */
838 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
839 return (EWOULDBLOCK);
840 /*
841 * Don't recycle if its filesystem is being suspended.
842 */
843 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
844 VOP_UNLOCK(vp, 0, td);
845 return (EBUSY);
846 }
847 /*
848 * If we got this far, we need to acquire the interlock and see if
849 * anyone picked up this vnode from another list. If not, we will
850 * mark it with DOOMED via vgonel() so that anyone who does find it
851 * will skip over it.
852 */
853 VI_LOCK(vp);
854 if (vp->v_usecount) {
855 VOP_UNLOCK(vp, LK_INTERLOCK, td);
856 vn_finished_write(vnmp);
857 return (EBUSY);
858 }
859 if ((vp->v_iflag & VI_DOOMED) == 0)
860 vgonel(vp);
861 VOP_UNLOCK(vp, LK_INTERLOCK, td);
862 vn_finished_write(vnmp);
863 CTR1(KTR_VFS, "vtryrecycle: recycled vp %p", vp);
864 return (0);
865 }
866
867 /*
868 * Return the next vnode from the free list.
869 */
870 int
871 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
872 struct vnode **vpp)
873 {
874 struct vnode *vp = NULL;
875 struct bufobj *bo;
876
877 mtx_lock(&vnode_free_list_mtx);
878 /*
879 * Lend our context to reclaim vnodes if they've exceeded the max.
880 */
881 if (freevnodes > wantfreevnodes)
882 vnlru_free(1);
883 /*
884 * Wait for available vnodes.
885 */
886 if (numvnodes > desiredvnodes) {
887 if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
888 /*
889 * File system is beeing suspended, we cannot risk a
890 * deadlock here, so allocate new vnode anyway.
891 */
892 if (freevnodes > wantfreevnodes)
893 vnlru_free(freevnodes - wantfreevnodes);
894 goto alloc;
895 }
896 if (vnlruproc_sig == 0) {
897 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
898 wakeup(vnlruproc);
899 }
900 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
901 "vlruwk", hz);
902 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
903 if (numvnodes > desiredvnodes) {
904 mtx_unlock(&vnode_free_list_mtx);
905 return (ENFILE);
906 }
907 #endif
908 }
909 alloc:
910 numvnodes++;
911 mtx_unlock(&vnode_free_list_mtx);
912 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
913 /*
914 * Setup locks.
915 */
916 vp->v_vnlock = &vp->v_lock;
917 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
918 /*
919 * By default, don't allow shared locks unless filesystems
920 * opt-in.
921 */
922 lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
923 /*
924 * Initialize bufobj.
925 */
926 bo = &vp->v_bufobj;
927 bo->__bo_vnode = vp;
928 bo->bo_mtx = &vp->v_interlock;
929 bo->bo_ops = &buf_ops_bio;
930 bo->bo_private = vp;
931 TAILQ_INIT(&bo->bo_clean.bv_hd);
932 TAILQ_INIT(&bo->bo_dirty.bv_hd);
933 /*
934 * Initialize namecache.
935 */
936 LIST_INIT(&vp->v_cache_src);
937 TAILQ_INIT(&vp->v_cache_dst);
938 /*
939 * Finalize various vnode identity bits.
940 */
941 vp->v_type = VNON;
942 vp->v_tag = tag;
943 vp->v_op = vops;
944 v_incr_usecount(vp);
945 vp->v_data = 0;
946 #ifdef MAC
947 mac_init_vnode(vp);
948 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
949 mac_associate_vnode_singlelabel(mp, vp);
950 else if (mp == NULL && vops != &dead_vnodeops)
951 printf("NULL mp in getnewvnode()\n");
952 #endif
953 if (mp != NULL) {
954 bo->bo_bsize = mp->mnt_stat.f_iosize;
955 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
956 vp->v_vflag |= VV_NOKNOTE;
957 }
958
959 CTR2(KTR_VFS, "getnewvnode: mp %p vp %p", mp, vp);
960 *vpp = vp;
961 return (0);
962 }
963
964 /*
965 * Delete from old mount point vnode list, if on one.
966 */
967 static void
968 delmntque(struct vnode *vp)
969 {
970 struct mount *mp;
971
972 mp = vp->v_mount;
973 if (mp == NULL)
974 return;
975 MNT_ILOCK(mp);
976 vp->v_mount = NULL;
977 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
978 ("bad mount point vnode list size"));
979 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
980 mp->mnt_nvnodelistsize--;
981 MNT_REL(mp);
982 MNT_IUNLOCK(mp);
983 }
984
985 static void
986 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
987 {
988 struct thread *td;
989
990 td = curthread; /* XXX ? */
991 vp->v_data = NULL;
992 vp->v_op = &dead_vnodeops;
993 /* XXX non mp-safe fs may still call insmntque with vnode
994 unlocked */
995 if (!VOP_ISLOCKED(vp, td))
996 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
997 vgone(vp);
998 vput(vp);
999 }
1000
1001 /*
1002 * Insert into list of vnodes for the new mount point, if available.
1003 */
1004 int
1005 insmntque1(struct vnode *vp, struct mount *mp,
1006 void (*dtr)(struct vnode *, void *), void *dtr_arg)
1007 {
1008 int locked;
1009
1010 KASSERT(vp->v_mount == NULL,
1011 ("insmntque: vnode already on per mount vnode list"));
1012 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1013 #if 0
1014 #ifdef DEBUG_VFS_LOCKS
1015 if (!VFS_NEEDSGIANT(mp))
1016 ASSERT_VOP_ELOCKED(vp,
1017 "insmntque: mp-safe fs and non-locked vp");
1018 #endif
1019 #endif
1020 MNT_ILOCK(mp);
1021 if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1022 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1023 mp->mnt_nvnodelistsize == 0)) {
1024 locked = VOP_ISLOCKED(vp, curthread);
1025 if (!locked || (locked == LK_EXCLUSIVE &&
1026 (vp->v_vflag & VV_FORCEINSMQ) == 0)) {
1027 MNT_IUNLOCK(mp);
1028 if (dtr != NULL)
1029 dtr(vp, dtr_arg);
1030 return (EBUSY);
1031 }
1032 }
1033 vp->v_mount = mp;
1034 MNT_REF(mp);
1035 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1036 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1037 ("neg mount point vnode list size"));
1038 mp->mnt_nvnodelistsize++;
1039 MNT_IUNLOCK(mp);
1040 return (0);
1041 }
1042
1043 int
1044 insmntque(struct vnode *vp, struct mount *mp)
1045 {
1046
1047 return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1048 }
1049
1050 /*
1051 * Flush out and invalidate all buffers associated with a bufobj
1052 * Called with the underlying object locked.
1053 */
1054 int
1055 bufobj_invalbuf(struct bufobj *bo, int flags, struct thread *td, int slpflag,
1056 int slptimeo)
1057 {
1058 int error;
1059
1060 BO_LOCK(bo);
1061 if (flags & V_SAVE) {
1062 error = bufobj_wwait(bo, slpflag, slptimeo);
1063 if (error) {
1064 BO_UNLOCK(bo);
1065 return (error);
1066 }
1067 if (bo->bo_dirty.bv_cnt > 0) {
1068 BO_UNLOCK(bo);
1069 if ((error = BO_SYNC(bo, MNT_WAIT, td)) != 0)
1070 return (error);
1071 /*
1072 * XXX We could save a lock/unlock if this was only
1073 * enabled under INVARIANTS
1074 */
1075 BO_LOCK(bo);
1076 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1077 panic("vinvalbuf: dirty bufs");
1078 }
1079 }
1080 /*
1081 * If you alter this loop please notice that interlock is dropped and
1082 * reacquired in flushbuflist. Special care is needed to ensure that
1083 * no race conditions occur from this.
1084 */
1085 do {
1086 error = flushbuflist(&bo->bo_clean,
1087 flags, bo, slpflag, slptimeo);
1088 if (error == 0)
1089 error = flushbuflist(&bo->bo_dirty,
1090 flags, bo, slpflag, slptimeo);
1091 if (error != 0 && error != EAGAIN) {
1092 BO_UNLOCK(bo);
1093 return (error);
1094 }
1095 } while (error != 0);
1096
1097 /*
1098 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1099 * have write I/O in-progress but if there is a VM object then the
1100 * VM object can also have read-I/O in-progress.
1101 */
1102 do {
1103 bufobj_wwait(bo, 0, 0);
1104 BO_UNLOCK(bo);
1105 if (bo->bo_object != NULL) {
1106 VM_OBJECT_LOCK(bo->bo_object);
1107 vm_object_pip_wait(bo->bo_object, "bovlbx");
1108 VM_OBJECT_UNLOCK(bo->bo_object);
1109 }
1110 BO_LOCK(bo);
1111 } while (bo->bo_numoutput > 0);
1112 BO_UNLOCK(bo);
1113
1114 /*
1115 * Destroy the copy in the VM cache, too.
1116 */
1117 if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) {
1118 VM_OBJECT_LOCK(bo->bo_object);
1119 vm_object_page_remove(bo->bo_object, 0, 0,
1120 (flags & V_SAVE) ? TRUE : FALSE);
1121 VM_OBJECT_UNLOCK(bo->bo_object);
1122 }
1123
1124 #ifdef INVARIANTS
1125 BO_LOCK(bo);
1126 if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1127 (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1128 panic("vinvalbuf: flush failed");
1129 BO_UNLOCK(bo);
1130 #endif
1131 return (0);
1132 }
1133
1134 /*
1135 * Flush out and invalidate all buffers associated with a vnode.
1136 * Called with the underlying object locked.
1137 */
1138 int
1139 vinvalbuf(struct vnode *vp, int flags, struct thread *td, int slpflag,
1140 int slptimeo)
1141 {
1142
1143 CTR2(KTR_VFS, "vinvalbuf vp %p flags %d", vp, flags);
1144 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1145 return (bufobj_invalbuf(&vp->v_bufobj, flags, td, slpflag, slptimeo));
1146 }
1147
1148 /*
1149 * Flush out buffers on the specified list.
1150 *
1151 */
1152 static int
1153 flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1154 int slptimeo)
1155 {
1156 struct buf *bp, *nbp;
1157 int retval, error;
1158 daddr_t lblkno;
1159 b_xflags_t xflags;
1160
1161 ASSERT_BO_LOCKED(bo);
1162
1163 retval = 0;
1164 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1165 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1166 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1167 continue;
1168 }
1169 lblkno = 0;
1170 xflags = 0;
1171 if (nbp != NULL) {
1172 lblkno = nbp->b_lblkno;
1173 xflags = nbp->b_xflags &
1174 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
1175 }
1176 retval = EAGAIN;
1177 error = BUF_TIMELOCK(bp,
1178 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1179 "flushbuf", slpflag, slptimeo);
1180 if (error) {
1181 BO_LOCK(bo);
1182 return (error != ENOLCK ? error : EAGAIN);
1183 }
1184 KASSERT(bp->b_bufobj == bo,
1185 ("bp %p wrong b_bufobj %p should be %p",
1186 bp, bp->b_bufobj, bo));
1187 if (bp->b_bufobj != bo) { /* XXX: necessary ? */
1188 BUF_UNLOCK(bp);
1189 BO_LOCK(bo);
1190 return (EAGAIN);
1191 }
1192 /*
1193 * XXX Since there are no node locks for NFS, I
1194 * believe there is a slight chance that a delayed
1195 * write will occur while sleeping just above, so
1196 * check for it.
1197 */
1198 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1199 (flags & V_SAVE)) {
1200 bremfree(bp);
1201 bp->b_flags |= B_ASYNC;
1202 bwrite(bp);
1203 BO_LOCK(bo);
1204 return (EAGAIN); /* XXX: why not loop ? */
1205 }
1206 bremfree(bp);
1207 bp->b_flags |= (B_INVAL | B_RELBUF);
1208 bp->b_flags &= ~B_ASYNC;
1209 brelse(bp);
1210 BO_LOCK(bo);
1211 if (nbp != NULL &&
1212 (nbp->b_bufobj != bo ||
1213 nbp->b_lblkno != lblkno ||
1214 (nbp->b_xflags &
1215 (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1216 break; /* nbp invalid */
1217 }
1218 return (retval);
1219 }
1220
1221 /*
1222 * Truncate a file's buffer and pages to a specified length. This
1223 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1224 * sync activity.
1225 */
1226 int
1227 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
1228 off_t length, int blksize)
1229 {
1230 struct buf *bp, *nbp;
1231 int anyfreed;
1232 int trunclbn;
1233 struct bufobj *bo;
1234
1235 CTR2(KTR_VFS, "vtruncbuf vp %p length %jd", vp, length);
1236 /*
1237 * Round up to the *next* lbn.
1238 */
1239 trunclbn = (length + blksize - 1) / blksize;
1240
1241 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1242 restart:
1243 VI_LOCK(vp);
1244 bo = &vp->v_bufobj;
1245 anyfreed = 1;
1246 for (;anyfreed;) {
1247 anyfreed = 0;
1248 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1249 if (bp->b_lblkno < trunclbn)
1250 continue;
1251 if (BUF_LOCK(bp,
1252 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1253 VI_MTX(vp)) == ENOLCK)
1254 goto restart;
1255
1256 bremfree(bp);
1257 bp->b_flags |= (B_INVAL | B_RELBUF);
1258 bp->b_flags &= ~B_ASYNC;
1259 brelse(bp);
1260 anyfreed = 1;
1261
1262 if (nbp != NULL &&
1263 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1264 (nbp->b_vp != vp) ||
1265 (nbp->b_flags & B_DELWRI))) {
1266 goto restart;
1267 }
1268 VI_LOCK(vp);
1269 }
1270
1271 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1272 if (bp->b_lblkno < trunclbn)
1273 continue;
1274 if (BUF_LOCK(bp,
1275 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1276 VI_MTX(vp)) == ENOLCK)
1277 goto restart;
1278 bremfree(bp);
1279 bp->b_flags |= (B_INVAL | B_RELBUF);
1280 bp->b_flags &= ~B_ASYNC;
1281 brelse(bp);
1282 anyfreed = 1;
1283 if (nbp != NULL &&
1284 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1285 (nbp->b_vp != vp) ||
1286 (nbp->b_flags & B_DELWRI) == 0)) {
1287 goto restart;
1288 }
1289 VI_LOCK(vp);
1290 }
1291 }
1292
1293 if (length > 0) {
1294 restartsync:
1295 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1296 if (bp->b_lblkno > 0)
1297 continue;
1298 /*
1299 * Since we hold the vnode lock this should only
1300 * fail if we're racing with the buf daemon.
1301 */
1302 if (BUF_LOCK(bp,
1303 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1304 VI_MTX(vp)) == ENOLCK) {
1305 goto restart;
1306 }
1307 VNASSERT((bp->b_flags & B_DELWRI), vp,
1308 ("buf(%p) on dirty queue without DELWRI", bp));
1309
1310 bremfree(bp);
1311 bawrite(bp);
1312 VI_LOCK(vp);
1313 goto restartsync;
1314 }
1315 }
1316
1317 bufobj_wwait(bo, 0, 0);
1318 VI_UNLOCK(vp);
1319 vnode_pager_setsize(vp, length);
1320
1321 return (0);
1322 }
1323
1324 /*
1325 * buf_splay() - splay tree core for the clean/dirty list of buffers in
1326 * a vnode.
1327 *
1328 * NOTE: We have to deal with the special case of a background bitmap
1329 * buffer, a situation where two buffers will have the same logical
1330 * block offset. We want (1) only the foreground buffer to be accessed
1331 * in a lookup and (2) must differentiate between the foreground and
1332 * background buffer in the splay tree algorithm because the splay
1333 * tree cannot normally handle multiple entities with the same 'index'.
1334 * We accomplish this by adding differentiating flags to the splay tree's
1335 * numerical domain.
1336 */
1337 static
1338 struct buf *
1339 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1340 {
1341 struct buf dummy;
1342 struct buf *lefttreemax, *righttreemin, *y;
1343
1344 if (root == NULL)
1345 return (NULL);
1346 lefttreemax = righttreemin = &dummy;
1347 for (;;) {
1348 if (lblkno < root->b_lblkno ||
1349 (lblkno == root->b_lblkno &&
1350 (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1351 if ((y = root->b_left) == NULL)
1352 break;
1353 if (lblkno < y->b_lblkno) {
1354 /* Rotate right. */
1355 root->b_left = y->b_right;
1356 y->b_right = root;
1357 root = y;
1358 if ((y = root->b_left) == NULL)
1359 break;
1360 }
1361 /* Link into the new root's right tree. */
1362 righttreemin->b_left = root;
1363 righttreemin = root;
1364 } else if (lblkno > root->b_lblkno ||
1365 (lblkno == root->b_lblkno &&
1366 (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1367 if ((y = root->b_right) == NULL)
1368 break;
1369 if (lblkno > y->b_lblkno) {
1370 /* Rotate left. */
1371 root->b_right = y->b_left;
1372 y->b_left = root;
1373 root = y;
1374 if ((y = root->b_right) == NULL)
1375 break;
1376 }
1377 /* Link into the new root's left tree. */
1378 lefttreemax->b_right = root;
1379 lefttreemax = root;
1380 } else {
1381 break;
1382 }
1383 root = y;
1384 }
1385 /* Assemble the new root. */
1386 lefttreemax->b_right = root->b_left;
1387 righttreemin->b_left = root->b_right;
1388 root->b_left = dummy.b_right;
1389 root->b_right = dummy.b_left;
1390 return (root);
1391 }
1392
1393 static void
1394 buf_vlist_remove(struct buf *bp)
1395 {
1396 struct buf *root;
1397 struct bufv *bv;
1398
1399 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1400 ASSERT_BO_LOCKED(bp->b_bufobj);
1401 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1402 (BX_VNDIRTY|BX_VNCLEAN),
1403 ("buf_vlist_remove: Buf %p is on two lists", bp));
1404 if (bp->b_xflags & BX_VNDIRTY)
1405 bv = &bp->b_bufobj->bo_dirty;
1406 else
1407 bv = &bp->b_bufobj->bo_clean;
1408 if (bp != bv->bv_root) {
1409 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1410 KASSERT(root == bp, ("splay lookup failed in remove"));
1411 }
1412 if (bp->b_left == NULL) {
1413 root = bp->b_right;
1414 } else {
1415 root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1416 root->b_right = bp->b_right;
1417 }
1418 bv->bv_root = root;
1419 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1420 bv->bv_cnt--;
1421 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1422 }
1423
1424 /*
1425 * Add the buffer to the sorted clean or dirty block list using a
1426 * splay tree algorithm.
1427 *
1428 * NOTE: xflags is passed as a constant, optimizing this inline function!
1429 */
1430 static void
1431 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1432 {
1433 struct buf *root;
1434 struct bufv *bv;
1435
1436 ASSERT_BO_LOCKED(bo);
1437 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1438 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1439 bp->b_xflags |= xflags;
1440 if (xflags & BX_VNDIRTY)
1441 bv = &bo->bo_dirty;
1442 else
1443 bv = &bo->bo_clean;
1444
1445 root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1446 if (root == NULL) {
1447 bp->b_left = NULL;
1448 bp->b_right = NULL;
1449 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1450 } else if (bp->b_lblkno < root->b_lblkno ||
1451 (bp->b_lblkno == root->b_lblkno &&
1452 (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1453 bp->b_left = root->b_left;
1454 bp->b_right = root;
1455 root->b_left = NULL;
1456 TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1457 } else {
1458 bp->b_right = root->b_right;
1459 bp->b_left = root;
1460 root->b_right = NULL;
1461 TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1462 }
1463 bv->bv_cnt++;
1464 bv->bv_root = bp;
1465 }
1466
1467 /*
1468 * Lookup a buffer using the splay tree. Note that we specifically avoid
1469 * shadow buffers used in background bitmap writes.
1470 *
1471 * This code isn't quite efficient as it could be because we are maintaining
1472 * two sorted lists and do not know which list the block resides in.
1473 *
1474 * During a "make buildworld" the desired buffer is found at one of
1475 * the roots more than 60% of the time. Thus, checking both roots
1476 * before performing either splay eliminates unnecessary splays on the
1477 * first tree splayed.
1478 */
1479 struct buf *
1480 gbincore(struct bufobj *bo, daddr_t lblkno)
1481 {
1482 struct buf *bp;
1483
1484 ASSERT_BO_LOCKED(bo);
1485 if ((bp = bo->bo_clean.bv_root) != NULL &&
1486 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1487 return (bp);
1488 if ((bp = bo->bo_dirty.bv_root) != NULL &&
1489 bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1490 return (bp);
1491 if ((bp = bo->bo_clean.bv_root) != NULL) {
1492 bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1493 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1494 return (bp);
1495 }
1496 if ((bp = bo->bo_dirty.bv_root) != NULL) {
1497 bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1498 if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1499 return (bp);
1500 }
1501 return (NULL);
1502 }
1503
1504 /*
1505 * Associate a buffer with a vnode.
1506 */
1507 void
1508 bgetvp(struct vnode *vp, struct buf *bp)
1509 {
1510
1511 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1512
1513 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1514 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1515 ("bgetvp: bp already attached! %p", bp));
1516
1517 ASSERT_VI_LOCKED(vp, "bgetvp");
1518 vholdl(vp);
1519 if (VFS_NEEDSGIANT(vp->v_mount) ||
1520 vp->v_bufobj.bo_flag & BO_NEEDSGIANT)
1521 bp->b_flags |= B_NEEDSGIANT;
1522 bp->b_vp = vp;
1523 bp->b_bufobj = &vp->v_bufobj;
1524 /*
1525 * Insert onto list for new vnode.
1526 */
1527 buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
1528 }
1529
1530 /*
1531 * Disassociate a buffer from a vnode.
1532 */
1533 void
1534 brelvp(struct buf *bp)
1535 {
1536 struct bufobj *bo;
1537 struct vnode *vp;
1538
1539 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1540 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1541
1542 /*
1543 * Delete from old vnode list, if on one.
1544 */
1545 vp = bp->b_vp; /* XXX */
1546 bo = bp->b_bufobj;
1547 BO_LOCK(bo);
1548 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1549 buf_vlist_remove(bp);
1550 else
1551 panic("brelvp: Buffer %p not on queue.", bp);
1552 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1553 bo->bo_flag &= ~BO_ONWORKLST;
1554 mtx_lock(&sync_mtx);
1555 LIST_REMOVE(bo, bo_synclist);
1556 syncer_worklist_len--;
1557 mtx_unlock(&sync_mtx);
1558 }
1559 bp->b_flags &= ~B_NEEDSGIANT;
1560 bp->b_vp = NULL;
1561 bp->b_bufobj = NULL;
1562 vdropl(vp);
1563 }
1564
1565 /*
1566 * Add an item to the syncer work queue.
1567 */
1568 static void
1569 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1570 {
1571 int slot;
1572
1573 ASSERT_BO_LOCKED(bo);
1574
1575 mtx_lock(&sync_mtx);
1576 if (bo->bo_flag & BO_ONWORKLST)
1577 LIST_REMOVE(bo, bo_synclist);
1578 else {
1579 bo->bo_flag |= BO_ONWORKLST;
1580 syncer_worklist_len++;
1581 }
1582
1583 if (delay > syncer_maxdelay - 2)
1584 delay = syncer_maxdelay - 2;
1585 slot = (syncer_delayno + delay) & syncer_mask;
1586
1587 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1588 mtx_unlock(&sync_mtx);
1589 }
1590
1591 static int
1592 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1593 {
1594 int error, len;
1595
1596 mtx_lock(&sync_mtx);
1597 len = syncer_worklist_len - sync_vnode_count;
1598 mtx_unlock(&sync_mtx);
1599 error = SYSCTL_OUT(req, &len, sizeof(len));
1600 return (error);
1601 }
1602
1603 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1604 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1605
1606 static struct proc *updateproc;
1607 static void sched_sync(void);
1608 static struct kproc_desc up_kp = {
1609 "syncer",
1610 sched_sync,
1611 &updateproc
1612 };
1613 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
1614
1615 static int
1616 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
1617 {
1618 struct vnode *vp;
1619 struct mount *mp;
1620 int vfslocked;
1621
1622 vfslocked = 0;
1623 restart:
1624 *bo = LIST_FIRST(slp);
1625 if (*bo == NULL) {
1626 VFS_UNLOCK_GIANT(vfslocked);
1627 return (0);
1628 }
1629 vp = (*bo)->__bo_vnode; /* XXX */
1630 if (VFS_NEEDSGIANT(vp->v_mount)) {
1631 if (!vfslocked) {
1632 vfslocked = 1;
1633 if (mtx_trylock(&Giant) == 0) {
1634 mtx_unlock(&sync_mtx);
1635 mtx_lock(&Giant);
1636 mtx_lock(&sync_mtx);
1637 goto restart;
1638 }
1639 }
1640 } else {
1641 VFS_UNLOCK_GIANT(vfslocked);
1642 vfslocked = 0;
1643 }
1644 if (VOP_ISLOCKED(vp, NULL) != 0) {
1645 VFS_UNLOCK_GIANT(vfslocked);
1646 return (1);
1647 }
1648 if (VI_TRYLOCK(vp) == 0) {
1649 VFS_UNLOCK_GIANT(vfslocked);
1650 return (1);
1651 }
1652 /*
1653 * We use vhold in case the vnode does not
1654 * successfully sync. vhold prevents the vnode from
1655 * going away when we unlock the sync_mtx so that
1656 * we can acquire the vnode interlock.
1657 */
1658 vholdl(vp);
1659 mtx_unlock(&sync_mtx);
1660 VI_UNLOCK(vp);
1661 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1662 vdrop(vp);
1663 VFS_UNLOCK_GIANT(vfslocked);
1664 mtx_lock(&sync_mtx);
1665 return (*bo == LIST_FIRST(slp));
1666 }
1667 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1668 (void) VOP_FSYNC(vp, MNT_LAZY, td);
1669 VOP_UNLOCK(vp, 0, td);
1670 vn_finished_write(mp);
1671 VI_LOCK(vp);
1672 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
1673 /*
1674 * Put us back on the worklist. The worklist
1675 * routine will remove us from our current
1676 * position and then add us back in at a later
1677 * position.
1678 */
1679 vn_syncer_add_to_worklist(*bo, syncdelay);
1680 }
1681 vdropl(vp);
1682 VFS_UNLOCK_GIANT(vfslocked);
1683 mtx_lock(&sync_mtx);
1684 return (0);
1685 }
1686
1687 /*
1688 * System filesystem synchronizer daemon.
1689 */
1690 static void
1691 sched_sync(void)
1692 {
1693 struct synclist *next;
1694 struct synclist *slp;
1695 struct bufobj *bo;
1696 long starttime;
1697 struct thread *td = curthread;
1698 static int dummychan;
1699 int last_work_seen;
1700 int net_worklist_len;
1701 int syncer_final_iter;
1702 int first_printf;
1703 int error;
1704
1705 last_work_seen = 0;
1706 syncer_final_iter = 0;
1707 first_printf = 1;
1708 syncer_state = SYNCER_RUNNING;
1709 starttime = time_uptime;
1710 td->td_pflags |= TDP_NORUNNINGBUF;
1711
1712 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1713 SHUTDOWN_PRI_LAST);
1714
1715 mtx_lock(&sync_mtx);
1716 for (;;) {
1717 if (syncer_state == SYNCER_FINAL_DELAY &&
1718 syncer_final_iter == 0) {
1719 mtx_unlock(&sync_mtx);
1720 kthread_suspend_check(td->td_proc);
1721 mtx_lock(&sync_mtx);
1722 }
1723 net_worklist_len = syncer_worklist_len - sync_vnode_count;
1724 if (syncer_state != SYNCER_RUNNING &&
1725 starttime != time_uptime) {
1726 if (first_printf) {
1727 printf("\nSyncing disks, vnodes remaining...");
1728 first_printf = 0;
1729 }
1730 printf("%d ", net_worklist_len);
1731 }
1732 starttime = time_uptime;
1733
1734 /*
1735 * Push files whose dirty time has expired. Be careful
1736 * of interrupt race on slp queue.
1737 *
1738 * Skip over empty worklist slots when shutting down.
1739 */
1740 do {
1741 slp = &syncer_workitem_pending[syncer_delayno];
1742 syncer_delayno += 1;
1743 if (syncer_delayno == syncer_maxdelay)
1744 syncer_delayno = 0;
1745 next = &syncer_workitem_pending[syncer_delayno];
1746 /*
1747 * If the worklist has wrapped since the
1748 * it was emptied of all but syncer vnodes,
1749 * switch to the FINAL_DELAY state and run
1750 * for one more second.
1751 */
1752 if (syncer_state == SYNCER_SHUTTING_DOWN &&
1753 net_worklist_len == 0 &&
1754 last_work_seen == syncer_delayno) {
1755 syncer_state = SYNCER_FINAL_DELAY;
1756 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1757 }
1758 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1759 syncer_worklist_len > 0);
1760
1761 /*
1762 * Keep track of the last time there was anything
1763 * on the worklist other than syncer vnodes.
1764 * Return to the SHUTTING_DOWN state if any
1765 * new work appears.
1766 */
1767 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1768 last_work_seen = syncer_delayno;
1769 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1770 syncer_state = SYNCER_SHUTTING_DOWN;
1771 while (!LIST_EMPTY(slp)) {
1772 error = sync_vnode(slp, &bo, td);
1773 if (error == 1) {
1774 LIST_REMOVE(bo, bo_synclist);
1775 LIST_INSERT_HEAD(next, bo, bo_synclist);
1776 continue;
1777 }
1778 }
1779 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1780 syncer_final_iter--;
1781 /*
1782 * The variable rushjob allows the kernel to speed up the
1783 * processing of the filesystem syncer process. A rushjob
1784 * value of N tells the filesystem syncer to process the next
1785 * N seconds worth of work on its queue ASAP. Currently rushjob
1786 * is used by the soft update code to speed up the filesystem
1787 * syncer process when the incore state is getting so far
1788 * ahead of the disk that the kernel memory pool is being
1789 * threatened with exhaustion.
1790 */
1791 if (rushjob > 0) {
1792 rushjob -= 1;
1793 continue;
1794 }
1795 /*
1796 * Just sleep for a short period of time between
1797 * iterations when shutting down to allow some I/O
1798 * to happen.
1799 *
1800 * If it has taken us less than a second to process the
1801 * current work, then wait. Otherwise start right over
1802 * again. We can still lose time if any single round
1803 * takes more than two seconds, but it does not really
1804 * matter as we are just trying to generally pace the
1805 * filesystem activity.
1806 */
1807 if (syncer_state != SYNCER_RUNNING)
1808 msleep(&dummychan, &sync_mtx, PPAUSE, "syncfnl",
1809 hz / SYNCER_SHUTDOWN_SPEEDUP);
1810 else if (time_uptime == starttime)
1811 msleep(&lbolt, &sync_mtx, PPAUSE, "syncer", 0);
1812 }
1813 }
1814
1815 /*
1816 * Request the syncer daemon to speed up its work.
1817 * We never push it to speed up more than half of its
1818 * normal turn time, otherwise it could take over the cpu.
1819 */
1820 int
1821 speedup_syncer(void)
1822 {
1823 struct thread *td;
1824 int ret = 0;
1825
1826 td = FIRST_THREAD_IN_PROC(updateproc);
1827 mtx_lock(&sync_mtx);
1828 if (rushjob < syncdelay / 2) {
1829 rushjob += 1;
1830 stat_rush_requests += 1;
1831 ret = 1;
1832 }
1833 mtx_unlock(&sync_mtx);
1834 sleepq_remove(td, &lbolt);
1835 return (ret);
1836 }
1837
1838 /*
1839 * Tell the syncer to speed up its work and run though its work
1840 * list several times, then tell it to shut down.
1841 */
1842 static void
1843 syncer_shutdown(void *arg, int howto)
1844 {
1845 struct thread *td;
1846
1847 if (howto & RB_NOSYNC)
1848 return;
1849 td = FIRST_THREAD_IN_PROC(updateproc);
1850 mtx_lock(&sync_mtx);
1851 syncer_state = SYNCER_SHUTTING_DOWN;
1852 rushjob = 0;
1853 mtx_unlock(&sync_mtx);
1854 sleepq_remove(td, &lbolt);
1855 kproc_shutdown(arg, howto);
1856 }
1857
1858 /*
1859 * Reassign a buffer from one vnode to another.
1860 * Used to assign file specific control information
1861 * (indirect blocks) to the vnode to which they belong.
1862 */
1863 void
1864 reassignbuf(struct buf *bp)
1865 {
1866 struct vnode *vp;
1867 struct bufobj *bo;
1868 int delay;
1869 #ifdef INVARIANTS
1870 struct bufv *bv;
1871 #endif
1872
1873 vp = bp->b_vp;
1874 bo = bp->b_bufobj;
1875 ++reassignbufcalls;
1876
1877 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1878 bp, bp->b_vp, bp->b_flags);
1879 /*
1880 * B_PAGING flagged buffers cannot be reassigned because their vp
1881 * is not fully linked in.
1882 */
1883 if (bp->b_flags & B_PAGING)
1884 panic("cannot reassign paging buffer");
1885
1886 /*
1887 * Delete from old vnode list, if on one.
1888 */
1889 VI_LOCK(vp);
1890 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1891 buf_vlist_remove(bp);
1892 else
1893 panic("reassignbuf: Buffer %p not on queue.", bp);
1894 /*
1895 * If dirty, put on list of dirty buffers; otherwise insert onto list
1896 * of clean buffers.
1897 */
1898 if (bp->b_flags & B_DELWRI) {
1899 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1900 switch (vp->v_type) {
1901 case VDIR:
1902 delay = dirdelay;
1903 break;
1904 case VCHR:
1905 delay = metadelay;
1906 break;
1907 default:
1908 delay = filedelay;
1909 }
1910 vn_syncer_add_to_worklist(bo, delay);
1911 }
1912 buf_vlist_add(bp, bo, BX_VNDIRTY);
1913 } else {
1914 buf_vlist_add(bp, bo, BX_VNCLEAN);
1915
1916 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1917 mtx_lock(&sync_mtx);
1918 LIST_REMOVE(bo, bo_synclist);
1919 syncer_worklist_len--;
1920 mtx_unlock(&sync_mtx);
1921 bo->bo_flag &= ~BO_ONWORKLST;
1922 }
1923 }
1924 #ifdef INVARIANTS
1925 bv = &bo->bo_clean;
1926 bp = TAILQ_FIRST(&bv->bv_hd);
1927 KASSERT(bp == NULL || bp->b_bufobj == bo,
1928 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1929 bp = TAILQ_LAST(&bv->bv_hd, buflists);
1930 KASSERT(bp == NULL || bp->b_bufobj == bo,
1931 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1932 bv = &bo->bo_dirty;
1933 bp = TAILQ_FIRST(&bv->bv_hd);
1934 KASSERT(bp == NULL || bp->b_bufobj == bo,
1935 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1936 bp = TAILQ_LAST(&bv->bv_hd, buflists);
1937 KASSERT(bp == NULL || bp->b_bufobj == bo,
1938 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1939 #endif
1940 VI_UNLOCK(vp);
1941 }
1942
1943 /*
1944 * Increment the use and hold counts on the vnode, taking care to reference
1945 * the driver's usecount if this is a chardev. The vholdl() will remove
1946 * the vnode from the free list if it is presently free. Requires the
1947 * vnode interlock and returns with it held.
1948 */
1949 static void
1950 v_incr_usecount(struct vnode *vp)
1951 {
1952
1953 CTR3(KTR_VFS, "v_incr_usecount: vp %p holdcnt %d usecount %d\n",
1954 vp, vp->v_holdcnt, vp->v_usecount);
1955 vp->v_usecount++;
1956 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1957 dev_lock();
1958 vp->v_rdev->si_usecount++;
1959 dev_unlock();
1960 }
1961 vholdl(vp);
1962 }
1963
1964 /*
1965 * Turn a holdcnt into a use+holdcnt such that only one call to
1966 * v_decr_usecount is needed.
1967 */
1968 static void
1969 v_upgrade_usecount(struct vnode *vp)
1970 {
1971
1972 CTR3(KTR_VFS, "v_upgrade_usecount: vp %p holdcnt %d usecount %d\n",
1973 vp, vp->v_holdcnt, vp->v_usecount);
1974 vp->v_usecount++;
1975 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1976 dev_lock();
1977 vp->v_rdev->si_usecount++;
1978 dev_unlock();
1979 }
1980 }
1981
1982 /*
1983 * Decrement the vnode use and hold count along with the driver's usecount
1984 * if this is a chardev. The vdropl() below releases the vnode interlock
1985 * as it may free the vnode.
1986 */
1987 static void
1988 v_decr_usecount(struct vnode *vp)
1989 {
1990
1991 CTR3(KTR_VFS, "v_decr_usecount: vp %p holdcnt %d usecount %d\n",
1992 vp, vp->v_holdcnt, vp->v_usecount);
1993 ASSERT_VI_LOCKED(vp, __FUNCTION__);
1994 VNASSERT(vp->v_usecount > 0, vp,
1995 ("v_decr_usecount: negative usecount"));
1996 vp->v_usecount--;
1997 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1998 dev_lock();
1999 vp->v_rdev->si_usecount--;
2000 dev_unlock();
2001 }
2002 vdropl(vp);
2003 }
2004
2005 /*
2006 * Decrement only the use count and driver use count. This is intended to
2007 * be paired with a follow on vdropl() to release the remaining hold count.
2008 * In this way we may vgone() a vnode with a 0 usecount without risk of
2009 * having it end up on a free list because the hold count is kept above 0.
2010 */
2011 static void
2012 v_decr_useonly(struct vnode *vp)
2013 {
2014
2015 CTR3(KTR_VFS, "v_decr_useonly: vp %p holdcnt %d usecount %d\n",
2016 vp, vp->v_holdcnt, vp->v_usecount);
2017 ASSERT_VI_LOCKED(vp, __FUNCTION__);
2018 VNASSERT(vp->v_usecount > 0, vp,
2019 ("v_decr_useonly: negative usecount"));
2020 vp->v_usecount--;
2021 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2022 dev_lock();
2023 vp->v_rdev->si_usecount--;
2024 dev_unlock();
2025 }
2026 }
2027
2028 /*
2029 * Grab a particular vnode from the free list, increment its
2030 * reference count and lock it. The vnode lock bit is set if the
2031 * vnode is being eliminated in vgone. The process is awakened
2032 * when the transition is completed, and an error returned to
2033 * indicate that the vnode is no longer usable (possibly having
2034 * been changed to a new filesystem type).
2035 */
2036 int
2037 vget(struct vnode *vp, int flags, struct thread *td)
2038 {
2039 int oweinact;
2040 int oldflags;
2041 int error;
2042
2043 error = 0;
2044 oldflags = flags;
2045 oweinact = 0;
2046 VFS_ASSERT_GIANT(vp->v_mount);
2047 if ((flags & LK_INTERLOCK) == 0)
2048 VI_LOCK(vp);
2049 /*
2050 * If the inactive call was deferred because vput() was called
2051 * with a shared lock, we have to do it here before another thread
2052 * gets a reference to data that should be dead.
2053 */
2054 if (vp->v_iflag & VI_OWEINACT) {
2055 if (flags & LK_NOWAIT) {
2056 VI_UNLOCK(vp);
2057 return (EBUSY);
2058 }
2059 flags &= ~LK_TYPE_MASK;
2060 flags |= LK_EXCLUSIVE;
2061 oweinact = 1;
2062 }
2063 vholdl(vp);
2064 if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2065 vdrop(vp);
2066 return (error);
2067 }
2068 VI_LOCK(vp);
2069 /* Upgrade our holdcnt to a usecount. */
2070 v_upgrade_usecount(vp);
2071 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2072 panic("vget: vn_lock failed to return ENOENT\n");
2073 if (oweinact) {
2074 if (vp->v_iflag & VI_OWEINACT)
2075 vinactive(vp, td);
2076 VI_UNLOCK(vp);
2077 if ((oldflags & LK_TYPE_MASK) == 0)
2078 VOP_UNLOCK(vp, 0, td);
2079 } else
2080 VI_UNLOCK(vp);
2081 return (0);
2082 }
2083
2084 /*
2085 * Increase the reference count of a vnode.
2086 */
2087 void
2088 vref(struct vnode *vp)
2089 {
2090
2091 VI_LOCK(vp);
2092 v_incr_usecount(vp);
2093 VI_UNLOCK(vp);
2094 }
2095
2096 /*
2097 * Return reference count of a vnode.
2098 *
2099 * The results of this call are only guaranteed when some mechanism other
2100 * than the VI lock is used to stop other processes from gaining references
2101 * to the vnode. This may be the case if the caller holds the only reference.
2102 * This is also useful when stale data is acceptable as race conditions may
2103 * be accounted for by some other means.
2104 */
2105 int
2106 vrefcnt(struct vnode *vp)
2107 {
2108 int usecnt;
2109
2110 VI_LOCK(vp);
2111 usecnt = vp->v_usecount;
2112 VI_UNLOCK(vp);
2113
2114 return (usecnt);
2115 }
2116
2117
2118 /*
2119 * Vnode put/release.
2120 * If count drops to zero, call inactive routine and return to freelist.
2121 */
2122 void
2123 vrele(struct vnode *vp)
2124 {
2125 struct thread *td = curthread; /* XXX */
2126
2127 KASSERT(vp != NULL, ("vrele: null vp"));
2128 VFS_ASSERT_GIANT(vp->v_mount);
2129
2130 VI_LOCK(vp);
2131
2132 /* Skip this v_writecount check if we're going to panic below. */
2133 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2134 ("vrele: missed vn_close"));
2135
2136 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2137 vp->v_usecount == 1)) {
2138 v_decr_usecount(vp);
2139 return;
2140 }
2141 if (vp->v_usecount != 1) {
2142 #ifdef DIAGNOSTIC
2143 vprint("vrele: negative ref count", vp);
2144 #endif
2145 VI_UNLOCK(vp);
2146 panic("vrele: negative ref cnt");
2147 }
2148 /*
2149 * We want to hold the vnode until the inactive finishes to
2150 * prevent vgone() races. We drop the use count here and the
2151 * hold count below when we're done.
2152 */
2153 v_decr_useonly(vp);
2154 /*
2155 * We must call VOP_INACTIVE with the node locked. Mark
2156 * as VI_DOINGINACT to avoid recursion.
2157 */
2158 vp->v_iflag |= VI_OWEINACT;
2159 if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2160 VI_LOCK(vp);
2161 if (vp->v_usecount > 0)
2162 vp->v_iflag &= ~VI_OWEINACT;
2163 if (vp->v_iflag & VI_OWEINACT)
2164 vinactive(vp, td);
2165 VOP_UNLOCK(vp, 0, td);
2166 } else {
2167 VI_LOCK(vp);
2168 if (vp->v_usecount > 0)
2169 vp->v_iflag &= ~VI_OWEINACT;
2170 }
2171 vdropl(vp);
2172 }
2173
2174 /*
2175 * Release an already locked vnode. This give the same effects as
2176 * unlock+vrele(), but takes less time and avoids releasing and
2177 * re-aquiring the lock (as vrele() acquires the lock internally.)
2178 */
2179 void
2180 vput(struct vnode *vp)
2181 {
2182 struct thread *td = curthread; /* XXX */
2183 int error;
2184
2185 KASSERT(vp != NULL, ("vput: null vp"));
2186 ASSERT_VOP_LOCKED(vp, "vput");
2187 VFS_ASSERT_GIANT(vp->v_mount);
2188 VI_LOCK(vp);
2189 /* Skip this v_writecount check if we're going to panic below. */
2190 VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2191 ("vput: missed vn_close"));
2192 error = 0;
2193
2194 if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2195 vp->v_usecount == 1)) {
2196 VOP_UNLOCK(vp, 0, td);
2197 v_decr_usecount(vp);
2198 return;
2199 }
2200
2201 if (vp->v_usecount != 1) {
2202 #ifdef DIAGNOSTIC
2203 vprint("vput: negative ref count", vp);
2204 #endif
2205 panic("vput: negative ref cnt");
2206 }
2207 /*
2208 * We want to hold the vnode until the inactive finishes to
2209 * prevent vgone() races. We drop the use count here and the
2210 * hold count below when we're done.
2211 */
2212 v_decr_useonly(vp);
2213 vp->v_iflag |= VI_OWEINACT;
2214 if (VOP_ISLOCKED(vp, NULL) != LK_EXCLUSIVE) {
2215 error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT, td);
2216 VI_LOCK(vp);
2217 if (error) {
2218 if (vp->v_usecount > 0)
2219 vp->v_iflag &= ~VI_OWEINACT;
2220 goto done;
2221 }
2222 }
2223 if (vp->v_usecount > 0)
2224 vp->v_iflag &= ~VI_OWEINACT;
2225 if (vp->v_iflag & VI_OWEINACT)
2226 vinactive(vp, td);
2227 VOP_UNLOCK(vp, 0, td);
2228 done:
2229 vdropl(vp);
2230 }
2231
2232 /*
2233 * Somebody doesn't want the vnode recycled.
2234 */
2235 void
2236 vhold(struct vnode *vp)
2237 {
2238
2239 VI_LOCK(vp);
2240 vholdl(vp);
2241 VI_UNLOCK(vp);
2242 }
2243
2244 void
2245 vholdl(struct vnode *vp)
2246 {
2247
2248 vp->v_holdcnt++;
2249 if (VSHOULDBUSY(vp))
2250 vbusy(vp);
2251 }
2252
2253 /*
2254 * Note that there is one less who cares about this vnode. vdrop() is the
2255 * opposite of vhold().
2256 */
2257 void
2258 vdrop(struct vnode *vp)
2259 {
2260
2261 VI_LOCK(vp);
2262 vdropl(vp);
2263 }
2264
2265 /*
2266 * Drop the hold count of the vnode. If this is the last reference to
2267 * the vnode we will free it if it has been vgone'd otherwise it is
2268 * placed on the free list.
2269 */
2270 void
2271 vdropl(struct vnode *vp)
2272 {
2273
2274 ASSERT_VI_LOCKED(vp, "vdropl");
2275 if (vp->v_holdcnt <= 0)
2276 panic("vdrop: holdcnt %d", vp->v_holdcnt);
2277 vp->v_holdcnt--;
2278 if (vp->v_holdcnt == 0) {
2279 if (vp->v_iflag & VI_DOOMED) {
2280 vdestroy(vp);
2281 return;
2282 } else
2283 vfree(vp);
2284 }
2285 VI_UNLOCK(vp);
2286 }
2287
2288 /*
2289 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2290 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
2291 * OWEINACT tracks whether a vnode missed a call to inactive due to a
2292 * failed lock upgrade.
2293 */
2294 static void
2295 vinactive(struct vnode *vp, struct thread *td)
2296 {
2297
2298 ASSERT_VOP_LOCKED(vp, "vinactive");
2299 ASSERT_VI_LOCKED(vp, "vinactive");
2300 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2301 ("vinactive: recursed on VI_DOINGINACT"));
2302 vp->v_iflag |= VI_DOINGINACT;
2303 vp->v_iflag &= ~VI_OWEINACT;
2304 VI_UNLOCK(vp);
2305 VOP_INACTIVE(vp, td);
2306 VI_LOCK(vp);
2307 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2308 ("vinactive: lost VI_DOINGINACT"));
2309 vp->v_iflag &= ~VI_DOINGINACT;
2310 }
2311
2312 /*
2313 * Remove any vnodes in the vnode table belonging to mount point mp.
2314 *
2315 * If FORCECLOSE is not specified, there should not be any active ones,
2316 * return error if any are found (nb: this is a user error, not a
2317 * system error). If FORCECLOSE is specified, detach any active vnodes
2318 * that are found.
2319 *
2320 * If WRITECLOSE is set, only flush out regular file vnodes open for
2321 * writing.
2322 *
2323 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2324 *
2325 * `rootrefs' specifies the base reference count for the root vnode
2326 * of this filesystem. The root vnode is considered busy if its
2327 * v_usecount exceeds this value. On a successful return, vflush(, td)
2328 * will call vrele() on the root vnode exactly rootrefs times.
2329 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2330 * be zero.
2331 */
2332 #ifdef DIAGNOSTIC
2333 static int busyprt = 0; /* print out busy vnodes */
2334 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2335 #endif
2336
2337 int
2338 vflush( struct mount *mp, int rootrefs, int flags, struct thread *td)
2339 {
2340 struct vnode *vp, *mvp, *rootvp = NULL;
2341 struct vattr vattr;
2342 int busy = 0, error;
2343
2344 CTR1(KTR_VFS, "vflush: mp %p", mp);
2345 if (rootrefs > 0) {
2346 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2347 ("vflush: bad args"));
2348 /*
2349 * Get the filesystem root vnode. We can vput() it
2350 * immediately, since with rootrefs > 0, it won't go away.
2351 */
2352 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0)
2353 return (error);
2354 vput(rootvp);
2355
2356 }
2357 MNT_ILOCK(mp);
2358 loop:
2359 MNT_VNODE_FOREACH(vp, mp, mvp) {
2360
2361 VI_LOCK(vp);
2362 vholdl(vp);
2363 MNT_IUNLOCK(mp);
2364 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2365 if (error) {
2366 vdrop(vp);
2367 MNT_ILOCK(mp);
2368 MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
2369 goto loop;
2370 }
2371 /*
2372 * Skip over a vnodes marked VV_SYSTEM.
2373 */
2374 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2375 VOP_UNLOCK(vp, 0, td);
2376 vdrop(vp);
2377 MNT_ILOCK(mp);
2378 continue;
2379 }
2380 /*
2381 * If WRITECLOSE is set, flush out unlinked but still open
2382 * files (even if open only for reading) and regular file
2383 * vnodes open for writing.
2384 */
2385 if (flags & WRITECLOSE) {
2386 error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2387 VI_LOCK(vp);
2388
2389 if ((vp->v_type == VNON ||
2390 (error == 0 && vattr.va_nlink > 0)) &&
2391 (vp->v_writecount == 0 || vp->v_type != VREG)) {
2392 VOP_UNLOCK(vp, 0, td);
2393 vdropl(vp);
2394 MNT_ILOCK(mp);
2395 continue;
2396 }
2397 } else
2398 VI_LOCK(vp);
2399 /*
2400 * With v_usecount == 0, all we need to do is clear out the
2401 * vnode data structures and we are done.
2402 *
2403 * If FORCECLOSE is set, forcibly close the vnode.
2404 */
2405 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2406 VNASSERT(vp->v_usecount == 0 ||
2407 (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2408 ("device VNODE %p is FORCECLOSED", vp));
2409 vgonel(vp);
2410 } else {
2411 busy++;
2412 #ifdef DIAGNOSTIC
2413 if (busyprt)
2414 vprint("vflush: busy vnode", vp);
2415 #endif
2416 }
2417 VOP_UNLOCK(vp, 0, td);
2418 vdropl(vp);
2419 MNT_ILOCK(mp);
2420 }
2421 MNT_IUNLOCK(mp);
2422 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2423 /*
2424 * If just the root vnode is busy, and if its refcount
2425 * is equal to `rootrefs', then go ahead and kill it.
2426 */
2427 VI_LOCK(rootvp);
2428 KASSERT(busy > 0, ("vflush: not busy"));
2429 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2430 ("vflush: usecount %d < rootrefs %d",
2431 rootvp->v_usecount, rootrefs));
2432 if (busy == 1 && rootvp->v_usecount == rootrefs) {
2433 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK, td);
2434 vgone(rootvp);
2435 VOP_UNLOCK(rootvp, 0, td);
2436 busy = 0;
2437 } else
2438 VI_UNLOCK(rootvp);
2439 }
2440 if (busy)
2441 return (EBUSY);
2442 for (; rootrefs > 0; rootrefs--)
2443 vrele(rootvp);
2444 return (0);
2445 }
2446
2447 /*
2448 * Recycle an unused vnode to the front of the free list.
2449 */
2450 int
2451 vrecycle(struct vnode *vp, struct thread *td)
2452 {
2453 int recycled;
2454
2455 ASSERT_VOP_LOCKED(vp, "vrecycle");
2456 recycled = 0;
2457 VI_LOCK(vp);
2458 if (vp->v_usecount == 0) {
2459 recycled = 1;
2460 vgonel(vp);
2461 }
2462 VI_UNLOCK(vp);
2463 return (recycled);
2464 }
2465
2466 /*
2467 * Eliminate all activity associated with a vnode
2468 * in preparation for reuse.
2469 */
2470 void
2471 vgone(struct vnode *vp)
2472 {
2473 VI_LOCK(vp);
2474 vgonel(vp);
2475 VI_UNLOCK(vp);
2476 }
2477
2478 /*
2479 * vgone, with the vp interlock held.
2480 */
2481 void
2482 vgonel(struct vnode *vp)
2483 {
2484 struct thread *td;
2485 int oweinact;
2486 int active;
2487 struct mount *mp;
2488
2489 CTR1(KTR_VFS, "vgonel: vp %p", vp);
2490 ASSERT_VOP_LOCKED(vp, "vgonel");
2491 ASSERT_VI_LOCKED(vp, "vgonel");
2492 VNASSERT(vp->v_holdcnt, vp,
2493 ("vgonel: vp %p has no reference.", vp));
2494 td = curthread;
2495
2496 /*
2497 * Don't vgonel if we're already doomed.
2498 */
2499 if (vp->v_iflag & VI_DOOMED)
2500 return;
2501 vp->v_iflag |= VI_DOOMED;
2502 /*
2503 * Check to see if the vnode is in use. If so, we have to call
2504 * VOP_CLOSE() and VOP_INACTIVE().
2505 */
2506 active = vp->v_usecount;
2507 oweinact = (vp->v_iflag & VI_OWEINACT);
2508 VI_UNLOCK(vp);
2509 /*
2510 * Clean out any buffers associated with the vnode.
2511 * If the flush fails, just toss the buffers.
2512 */
2513 mp = NULL;
2514 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2515 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
2516 if (vinvalbuf(vp, V_SAVE, td, 0, 0) != 0)
2517 vinvalbuf(vp, 0, td, 0, 0);
2518
2519 /*
2520 * If purging an active vnode, it must be closed and
2521 * deactivated before being reclaimed.
2522 */
2523 if (active)
2524 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2525 if (oweinact || active) {
2526 VI_LOCK(vp);
2527 if ((vp->v_iflag & VI_DOINGINACT) == 0)
2528 vinactive(vp, td);
2529 VI_UNLOCK(vp);
2530 }
2531 /*
2532 * Reclaim the vnode.
2533 */
2534 if (VOP_RECLAIM(vp, td))
2535 panic("vgone: cannot reclaim");
2536 if (mp != NULL)
2537 vn_finished_secondary_write(mp);
2538 VNASSERT(vp->v_object == NULL, vp,
2539 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2540 /*
2541 * Clear the advisory locks and wake up waiting threads.
2542 */
2543 lf_purgelocks(vp, &(vp->v_lockf));
2544 /*
2545 * Delete from old mount point vnode list.
2546 */
2547 delmntque(vp);
2548 cache_purge(vp);
2549 /*
2550 * Done with purge, reset to the standard lock and invalidate
2551 * the vnode.
2552 */
2553 VI_LOCK(vp);
2554 vp->v_vnlock = &vp->v_lock;
2555 vp->v_op = &dead_vnodeops;
2556 vp->v_tag = "none";
2557 vp->v_type = VBAD;
2558 }
2559
2560 /*
2561 * Calculate the total number of references to a special device.
2562 */
2563 int
2564 vcount(struct vnode *vp)
2565 {
2566 int count;
2567
2568 dev_lock();
2569 count = vp->v_rdev->si_usecount;
2570 dev_unlock();
2571 return (count);
2572 }
2573
2574 /*
2575 * Same as above, but using the struct cdev *as argument
2576 */
2577 int
2578 count_dev(struct cdev *dev)
2579 {
2580 int count;
2581
2582 dev_lock();
2583 count = dev->si_usecount;
2584 dev_unlock();
2585 return(count);
2586 }
2587
2588 /*
2589 * Print out a description of a vnode.
2590 */
2591 static char *typename[] =
2592 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2593 "VMARKER"};
2594
2595 void
2596 vn_printf(struct vnode *vp, const char *fmt, ...)
2597 {
2598 va_list ap;
2599 char buf[256], buf2[16];
2600 u_long flags;
2601
2602 va_start(ap, fmt);
2603 vprintf(fmt, ap);
2604 va_end(ap);
2605 printf("%p: ", (void *)vp);
2606 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2607 printf(" usecount %d, writecount %d, refcount %d mountedhere %p\n",
2608 vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2609 buf[0] = '\0';
2610 buf[1] = '\0';
2611 if (vp->v_vflag & VV_ROOT)
2612 strlcat(buf, "|VV_ROOT", sizeof(buf));
2613 if (vp->v_vflag & VV_ISTTY)
2614 strlcat(buf, "|VV_ISTTY", sizeof(buf));
2615 if (vp->v_vflag & VV_NOSYNC)
2616 strlcat(buf, "|VV_NOSYNC", sizeof(buf));
2617 if (vp->v_vflag & VV_CACHEDLABEL)
2618 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
2619 if (vp->v_vflag & VV_TEXT)
2620 strlcat(buf, "|VV_TEXT", sizeof(buf));
2621 if (vp->v_vflag & VV_COPYONWRITE)
2622 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
2623 if (vp->v_vflag & VV_SYSTEM)
2624 strlcat(buf, "|VV_SYSTEM", sizeof(buf));
2625 if (vp->v_vflag & VV_PROCDEP)
2626 strlcat(buf, "|VV_PROCDEP", sizeof(buf));
2627 if (vp->v_vflag & VV_NOKNOTE)
2628 strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
2629 if (vp->v_vflag & VV_DELETED)
2630 strlcat(buf, "|VV_DELETED", sizeof(buf));
2631 if (vp->v_vflag & VV_MD)
2632 strlcat(buf, "|VV_MD", sizeof(buf));
2633 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC |
2634 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
2635 VV_NOKNOTE | VV_DELETED | VV_MD);
2636 if (flags != 0) {
2637 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
2638 strlcat(buf, buf2, sizeof(buf));
2639 }
2640 if (vp->v_iflag & VI_MOUNT)
2641 strlcat(buf, "|VI_MOUNT", sizeof(buf));
2642 if (vp->v_iflag & VI_AGE)
2643 strlcat(buf, "|VI_AGE", sizeof(buf));
2644 if (vp->v_iflag & VI_DOOMED)
2645 strlcat(buf, "|VI_DOOMED", sizeof(buf));
2646 if (vp->v_iflag & VI_FREE)
2647 strlcat(buf, "|VI_FREE", sizeof(buf));
2648 if (vp->v_iflag & VI_OBJDIRTY)
2649 strlcat(buf, "|VI_OBJDIRTY", sizeof(buf));
2650 if (vp->v_iflag & VI_DOINGINACT)
2651 strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
2652 if (vp->v_iflag & VI_OWEINACT)
2653 strlcat(buf, "|VI_OWEINACT", sizeof(buf));
2654 flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
2655 VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT);
2656 if (flags != 0) {
2657 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
2658 strlcat(buf, buf2, sizeof(buf));
2659 }
2660 printf(" flags (%s)\n", buf + 1);
2661 if (mtx_owned(VI_MTX(vp)))
2662 printf(" VI_LOCKed");
2663 if (vp->v_object != NULL)
2664 printf(" v_object %p ref %d pages %d\n",
2665 vp->v_object, vp->v_object->ref_count,
2666 vp->v_object->resident_page_count);
2667 printf(" ");
2668 lockmgr_printinfo(vp->v_vnlock);
2669 printf("\n");
2670 if (vp->v_data != NULL)
2671 VOP_PRINT(vp);
2672 }
2673
2674 #ifdef DDB
2675 /*
2676 * List all of the locked vnodes in the system.
2677 * Called when debugging the kernel.
2678 */
2679 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2680 {
2681 struct mount *mp, *nmp;
2682 struct vnode *vp;
2683
2684 /*
2685 * Note: because this is DDB, we can't obey the locking semantics
2686 * for these structures, which means we could catch an inconsistent
2687 * state and dereference a nasty pointer. Not much to be done
2688 * about that.
2689 */
2690 db_printf("Locked vnodes\n");
2691 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2692 nmp = TAILQ_NEXT(mp, mnt_list);
2693 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2694 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp, NULL))
2695 vprint("", vp);
2696 }
2697 nmp = TAILQ_NEXT(mp, mnt_list);
2698 }
2699 }
2700
2701 /*
2702 * Show details about the given vnode.
2703 */
2704 DB_SHOW_COMMAND(vnode, db_show_vnode)
2705 {
2706 struct vnode *vp;
2707
2708 if (!have_addr)
2709 return;
2710 vp = (struct vnode *)addr;
2711 vn_printf(vp, "vnode ");
2712 }
2713
2714 /*
2715 * Show details about the given mount point.
2716 */
2717 DB_SHOW_COMMAND(mount, db_show_mount)
2718 {
2719 struct mount *mp;
2720 struct statfs *sp;
2721 struct vnode *vp;
2722 char buf[512];
2723 u_int flags;
2724
2725 if (!have_addr) {
2726 /* No address given, print short info about all mount points. */
2727 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2728 db_printf("%p %s on %s (%s)\n", mp,
2729 mp->mnt_stat.f_mntfromname,
2730 mp->mnt_stat.f_mntonname,
2731 mp->mnt_stat.f_fstypename);
2732 if (db_pager_quit)
2733 break;
2734 }
2735 db_printf("\nMore info: show mount <addr>\n");
2736 return;
2737 }
2738
2739 mp = (struct mount *)addr;
2740 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
2741 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
2742
2743 buf[0] = '\0';
2744 flags = mp->mnt_flag;
2745 #define MNT_FLAG(flag) do { \
2746 if (flags & (flag)) { \
2747 if (buf[0] != '\0') \
2748 strlcat(buf, ", ", sizeof(buf)); \
2749 strlcat(buf, (#flag) + 4, sizeof(buf)); \
2750 flags &= ~(flag); \
2751 } \
2752 } while (0)
2753 MNT_FLAG(MNT_RDONLY);
2754 MNT_FLAG(MNT_SYNCHRONOUS);
2755 MNT_FLAG(MNT_NOEXEC);
2756 MNT_FLAG(MNT_NOSUID);
2757 MNT_FLAG(MNT_UNION);
2758 MNT_FLAG(MNT_ASYNC);
2759 MNT_FLAG(MNT_SUIDDIR);
2760 MNT_FLAG(MNT_SOFTDEP);
2761 MNT_FLAG(MNT_NOSYMFOLLOW);
2762 MNT_FLAG(MNT_GJOURNAL);
2763 MNT_FLAG(MNT_MULTILABEL);
2764 MNT_FLAG(MNT_ACLS);
2765 MNT_FLAG(MNT_NOATIME);
2766 MNT_FLAG(MNT_NOCLUSTERR);
2767 MNT_FLAG(MNT_NOCLUSTERW);
2768 MNT_FLAG(MNT_EXRDONLY);
2769 MNT_FLAG(MNT_EXPORTED);
2770 MNT_FLAG(MNT_DEFEXPORTED);
2771 MNT_FLAG(MNT_EXPORTANON);
2772 MNT_FLAG(MNT_EXKERB);
2773 MNT_FLAG(MNT_EXPUBLIC);
2774 MNT_FLAG(MNT_LOCAL);
2775 MNT_FLAG(MNT_QUOTA);
2776 MNT_FLAG(MNT_ROOTFS);
2777 MNT_FLAG(MNT_USER);
2778 MNT_FLAG(MNT_IGNORE);
2779 MNT_FLAG(MNT_UPDATE);
2780 MNT_FLAG(MNT_DELEXPORT);
2781 MNT_FLAG(MNT_RELOAD);
2782 MNT_FLAG(MNT_FORCE);
2783 MNT_FLAG(MNT_SNAPSHOT);
2784 MNT_FLAG(MNT_BYFSID);
2785 #undef MNT_FLAG
2786 if (flags != 0) {
2787 if (buf[0] != '\0')
2788 strlcat(buf, ", ", sizeof(buf));
2789 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2790 "0x%08x", flags);
2791 }
2792 db_printf(" mnt_flag = %s\n", buf);
2793
2794 buf[0] = '\0';
2795 flags = mp->mnt_kern_flag;
2796 #define MNT_KERN_FLAG(flag) do { \
2797 if (flags & (flag)) { \
2798 if (buf[0] != '\0') \
2799 strlcat(buf, ", ", sizeof(buf)); \
2800 strlcat(buf, (#flag) + 5, sizeof(buf)); \
2801 flags &= ~(flag); \
2802 } \
2803 } while (0)
2804 MNT_KERN_FLAG(MNTK_UNMOUNTF);
2805 MNT_KERN_FLAG(MNTK_ASYNC);
2806 MNT_KERN_FLAG(MNTK_SOFTDEP);
2807 MNT_KERN_FLAG(MNTK_NOINSMNTQ);
2808 MNT_KERN_FLAG(MNTK_UNMOUNT);
2809 MNT_KERN_FLAG(MNTK_MWAIT);
2810 MNT_KERN_FLAG(MNTK_SUSPEND);
2811 MNT_KERN_FLAG(MNTK_SUSPEND2);
2812 MNT_KERN_FLAG(MNTK_SUSPENDED);
2813 MNT_KERN_FLAG(MNTK_MPSAFE);
2814 MNT_KERN_FLAG(MNTK_NOKNOTE);
2815 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
2816 #undef MNT_KERN_FLAG
2817 if (flags != 0) {
2818 if (buf[0] != '\0')
2819 strlcat(buf, ", ", sizeof(buf));
2820 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2821 "0x%08x", flags);
2822 }
2823 db_printf(" mnt_kern_flag = %s\n", buf);
2824
2825 sp = &mp->mnt_stat;
2826 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx "
2827 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
2828 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
2829 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
2830 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
2831 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
2832 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
2833 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
2834 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
2835 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
2836 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
2837 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
2838
2839 db_printf(" mnt_cred = { uid=%u ruid=%u",
2840 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
2841 if (mp->mnt_cred->cr_prison != NULL)
2842 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
2843 db_printf(" }\n");
2844 db_printf(" mnt_ref = %d\n", mp->mnt_ref);
2845 db_printf(" mnt_gen = %d\n", mp->mnt_gen);
2846 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
2847 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount);
2848 db_printf(" mnt_noasync = %u\n", mp->mnt_noasync);
2849 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
2850 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max);
2851 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed);
2852 db_printf(" mnt_markercnt = %d\n", mp->mnt_markercnt);
2853 db_printf(" mnt_holdcnt = %d\n", mp->mnt_holdcnt);
2854 db_printf(" mnt_holdcntwaiters = %d\n", mp->mnt_holdcntwaiters);
2855 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
2856 db_printf(" mnt_secondary_accwrites = %d\n",
2857 mp->mnt_secondary_accwrites);
2858 db_printf(" mnt_gjprovider = %s\n",
2859 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
2860 db_printf("\n");
2861
2862 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2863 if (vp->v_type != VMARKER) {
2864 vn_printf(vp, "vnode ");
2865 if (db_pager_quit)
2866 break;
2867 }
2868 }
2869 }
2870 #endif /* DDB */
2871
2872 /*
2873 * Fill in a struct xvfsconf based on a struct vfsconf.
2874 */
2875 static void
2876 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2877 {
2878
2879 strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2880 xvfsp->vfc_typenum = vfsp->vfc_typenum;
2881 xvfsp->vfc_refcount = vfsp->vfc_refcount;
2882 xvfsp->vfc_flags = vfsp->vfc_flags;
2883 /*
2884 * These are unused in userland, we keep them
2885 * to not break binary compatibility.
2886 */
2887 xvfsp->vfc_vfsops = NULL;
2888 xvfsp->vfc_next = NULL;
2889 }
2890
2891 /*
2892 * Top level filesystem related information gathering.
2893 */
2894 static int
2895 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2896 {
2897 struct vfsconf *vfsp;
2898 struct xvfsconf xvfsp;
2899 int error;
2900
2901 error = 0;
2902 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2903 bzero(&xvfsp, sizeof(xvfsp));
2904 vfsconf2x(vfsp, &xvfsp);
2905 error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2906 if (error)
2907 break;
2908 }
2909 return (error);
2910 }
2911
2912 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2913 "S,xvfsconf", "List of all configured filesystems");
2914
2915 #ifndef BURN_BRIDGES
2916 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2917
2918 static int
2919 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2920 {
2921 int *name = (int *)arg1 - 1; /* XXX */
2922 u_int namelen = arg2 + 1; /* XXX */
2923 struct vfsconf *vfsp;
2924 struct xvfsconf xvfsp;
2925
2926 printf("WARNING: userland calling deprecated sysctl, "
2927 "please rebuild world\n");
2928
2929 #if 1 || defined(COMPAT_PRELITE2)
2930 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2931 if (namelen == 1)
2932 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2933 #endif
2934
2935 switch (name[1]) {
2936 case VFS_MAXTYPENUM:
2937 if (namelen != 2)
2938 return (ENOTDIR);
2939 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2940 case VFS_CONF:
2941 if (namelen != 3)
2942 return (ENOTDIR); /* overloaded */
2943 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2944 if (vfsp->vfc_typenum == name[2])
2945 break;
2946 if (vfsp == NULL)
2947 return (EOPNOTSUPP);
2948 bzero(&xvfsp, sizeof(xvfsp));
2949 vfsconf2x(vfsp, &xvfsp);
2950 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2951 }
2952 return (EOPNOTSUPP);
2953 }
2954
2955 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
2956 vfs_sysctl, "Generic filesystem");
2957
2958 #if 1 || defined(COMPAT_PRELITE2)
2959
2960 static int
2961 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2962 {
2963 int error;
2964 struct vfsconf *vfsp;
2965 struct ovfsconf ovfs;
2966
2967 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2968 bzero(&ovfs, sizeof(ovfs));
2969 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
2970 strcpy(ovfs.vfc_name, vfsp->vfc_name);
2971 ovfs.vfc_index = vfsp->vfc_typenum;
2972 ovfs.vfc_refcount = vfsp->vfc_refcount;
2973 ovfs.vfc_flags = vfsp->vfc_flags;
2974 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2975 if (error)
2976 return error;
2977 }
2978 return 0;
2979 }
2980
2981 #endif /* 1 || COMPAT_PRELITE2 */
2982 #endif /* !BURN_BRIDGES */
2983
2984 #define KINFO_VNODESLOP 10
2985 #ifdef notyet
2986 /*
2987 * Dump vnode list (via sysctl).
2988 */
2989 /* ARGSUSED */
2990 static int
2991 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2992 {
2993 struct xvnode *xvn;
2994 struct thread *td = req->td;
2995 struct mount *mp;
2996 struct vnode *vp;
2997 int error, len, n;
2998
2999 /*
3000 * Stale numvnodes access is not fatal here.
3001 */
3002 req->lock = 0;
3003 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3004 if (!req->oldptr)
3005 /* Make an estimate */
3006 return (SYSCTL_OUT(req, 0, len));
3007
3008 error = sysctl_wire_old_buffer(req, 0);
3009 if (error != 0)
3010 return (error);
3011 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3012 n = 0;
3013 mtx_lock(&mountlist_mtx);
3014 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3015 if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3016 continue;
3017 MNT_ILOCK(mp);
3018 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3019 if (n == len)
3020 break;
3021 vref(vp);
3022 xvn[n].xv_size = sizeof *xvn;
3023 xvn[n].xv_vnode = vp;
3024 xvn[n].xv_id = 0; /* XXX compat */
3025 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3026 XV_COPY(usecount);
3027 XV_COPY(writecount);
3028 XV_COPY(holdcnt);
3029 XV_COPY(mount);
3030 XV_COPY(numoutput);
3031 XV_COPY(type);
3032 #undef XV_COPY
3033 xvn[n].xv_flag = vp->v_vflag;
3034
3035 switch (vp->v_type) {
3036 case VREG:
3037 case VDIR:
3038 case VLNK:
3039 break;
3040 case VBLK:
3041 case VCHR:
3042 if (vp->v_rdev == NULL) {
3043 vrele(vp);
3044 continue;
3045 }
3046 xvn[n].xv_dev = dev2udev(vp->v_rdev);
3047 break;
3048 case VSOCK:
3049 xvn[n].xv_socket = vp->v_socket;
3050 break;
3051 case VFIFO:
3052 xvn[n].xv_fifo = vp->v_fifoinfo;
3053 break;
3054 case VNON:
3055 case VBAD:
3056 default:
3057 /* shouldn't happen? */
3058 vrele(vp);
3059 continue;
3060 }
3061 vrele(vp);
3062 ++n;
3063 }
3064 MNT_IUNLOCK(mp);
3065 mtx_lock(&mountlist_mtx);
3066 vfs_unbusy(mp, td);
3067 if (n == len)
3068 break;
3069 }
3070 mtx_unlock(&mountlist_mtx);
3071
3072 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3073 free(xvn, M_TEMP);
3074 return (error);
3075 }
3076
3077 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3078 0, 0, sysctl_vnode, "S,xvnode", "");
3079 #endif
3080
3081 /*
3082 * Unmount all filesystems. The list is traversed in reverse order
3083 * of mounting to avoid dependencies.
3084 */
3085 void
3086 vfs_unmountall(void)
3087 {
3088 struct mount *mp;
3089 struct thread *td;
3090 int error;
3091
3092 KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
3093 td = curthread;
3094 /*
3095 * Since this only runs when rebooting, it is not interlocked.
3096 */
3097 while(!TAILQ_EMPTY(&mountlist)) {
3098 mp = TAILQ_LAST(&mountlist, mntlist);
3099 error = dounmount(mp, MNT_FORCE, td);
3100 if (error) {
3101 TAILQ_REMOVE(&mountlist, mp, mnt_list);
3102 /*
3103 * XXX: Due to the way in which we mount the root
3104 * file system off of devfs, devfs will generate a
3105 * "busy" warning when we try to unmount it before
3106 * the root. Don't print a warning as a result in
3107 * order to avoid false positive errors that may
3108 * cause needless upset.
3109 */
3110 if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
3111 printf("unmount of %s failed (",
3112 mp->mnt_stat.f_mntonname);
3113 if (error == EBUSY)
3114 printf("BUSY)\n");
3115 else
3116 printf("%d)\n", error);
3117 }
3118 } else {
3119 /* The unmount has removed mp from the mountlist */
3120 }
3121 }
3122 }
3123
3124 /*
3125 * perform msync on all vnodes under a mount point
3126 * the mount point must be locked.
3127 */
3128 void
3129 vfs_msync(struct mount *mp, int flags)
3130 {
3131 struct vnode *vp, *mvp;
3132 struct vm_object *obj;
3133
3134 MNT_ILOCK(mp);
3135 MNT_VNODE_FOREACH(vp, mp, mvp) {
3136 VI_LOCK(vp);
3137 if ((vp->v_iflag & VI_OBJDIRTY) &&
3138 (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3139 MNT_IUNLOCK(mp);
3140 if (!vget(vp,
3141 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3142 curthread)) {
3143 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
3144 vput(vp);
3145 MNT_ILOCK(mp);
3146 continue;
3147 }
3148
3149 obj = vp->v_object;
3150 if (obj != NULL) {
3151 VM_OBJECT_LOCK(obj);
3152 vm_object_page_clean(obj, 0, 0,
3153 flags == MNT_WAIT ?
3154 OBJPC_SYNC : OBJPC_NOSYNC);
3155 VM_OBJECT_UNLOCK(obj);
3156 }
3157 vput(vp);
3158 }
3159 MNT_ILOCK(mp);
3160 } else
3161 VI_UNLOCK(vp);
3162 }
3163 MNT_IUNLOCK(mp);
3164 }
3165
3166 /*
3167 * Mark a vnode as free, putting it up for recycling.
3168 */
3169 static void
3170 vfree(struct vnode *vp)
3171 {
3172
3173 CTR1(KTR_VFS, "vfree vp %p", vp);
3174 ASSERT_VI_LOCKED(vp, "vfree");
3175 mtx_lock(&vnode_free_list_mtx);
3176 VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
3177 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
3178 VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
3179 VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
3180 ("vfree: Freeing doomed vnode"));
3181 if (vp->v_iflag & VI_AGE) {
3182 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3183 } else {
3184 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3185 }
3186 freevnodes++;
3187 vp->v_iflag &= ~VI_AGE;
3188 vp->v_iflag |= VI_FREE;
3189 mtx_unlock(&vnode_free_list_mtx);
3190 }
3191
3192 /*
3193 * Opposite of vfree() - mark a vnode as in use.
3194 */
3195 static void
3196 vbusy(struct vnode *vp)
3197 {
3198 CTR1(KTR_VFS, "vbusy vp %p", vp);
3199 ASSERT_VI_LOCKED(vp, "vbusy");
3200 VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
3201 VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
3202
3203 mtx_lock(&vnode_free_list_mtx);
3204 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3205 freevnodes--;
3206 vp->v_iflag &= ~(VI_FREE|VI_AGE);
3207 mtx_unlock(&vnode_free_list_mtx);
3208 }
3209
3210 static void
3211 destroy_vpollinfo(struct vpollinfo *vi)
3212 {
3213 knlist_destroy(&vi->vpi_selinfo.si_note);
3214 mtx_destroy(&vi->vpi_lock);
3215 uma_zfree(vnodepoll_zone, vi);
3216 }
3217
3218 /*
3219 * Initalize per-vnode helper structure to hold poll-related state.
3220 */
3221 void
3222 v_addpollinfo(struct vnode *vp)
3223 {
3224 struct vpollinfo *vi;
3225
3226 if (vp->v_pollinfo != NULL)
3227 return;
3228 vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3229 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3230 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3231 vfs_knlunlock, vfs_knllocked);
3232 VI_LOCK(vp);
3233 if (vp->v_pollinfo != NULL) {
3234 VI_UNLOCK(vp);
3235 destroy_vpollinfo(vi);
3236 return;
3237 }
3238 vp->v_pollinfo = vi;
3239 VI_UNLOCK(vp);
3240 }
3241
3242 /*
3243 * Record a process's interest in events which might happen to
3244 * a vnode. Because poll uses the historic select-style interface
3245 * internally, this routine serves as both the ``check for any
3246 * pending events'' and the ``record my interest in future events''
3247 * functions. (These are done together, while the lock is held,
3248 * to avoid race conditions.)
3249 */
3250 int
3251 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3252 {
3253
3254 v_addpollinfo(vp);
3255 mtx_lock(&vp->v_pollinfo->vpi_lock);
3256 if (vp->v_pollinfo->vpi_revents & events) {
3257 /*
3258 * This leaves events we are not interested
3259 * in available for the other process which
3260 * which presumably had requested them
3261 * (otherwise they would never have been
3262 * recorded).
3263 */
3264 events &= vp->v_pollinfo->vpi_revents;
3265 vp->v_pollinfo->vpi_revents &= ~events;
3266
3267 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3268 return events;
3269 }
3270 vp->v_pollinfo->vpi_events |= events;
3271 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3272 mtx_unlock(&vp->v_pollinfo->vpi_lock);
3273 return 0;
3274 }
3275
3276 /*
3277 * Routine to create and manage a filesystem syncer vnode.
3278 */
3279 #define sync_close ((int (*)(struct vop_close_args *))nullop)
3280 static int sync_fsync(struct vop_fsync_args *);
3281 static int sync_inactive(struct vop_inactive_args *);
3282 static int sync_reclaim(struct vop_reclaim_args *);
3283
3284 static struct vop_vector sync_vnodeops = {
3285 .vop_bypass = VOP_EOPNOTSUPP,
3286 .vop_close = sync_close, /* close */
3287 .vop_fsync = sync_fsync, /* fsync */
3288 .vop_inactive = sync_inactive, /* inactive */
3289 .vop_reclaim = sync_reclaim, /* reclaim */
3290 .vop_lock1 = vop_stdlock, /* lock */
3291 .vop_unlock = vop_stdunlock, /* unlock */
3292 .vop_islocked = vop_stdislocked, /* islocked */
3293 };
3294
3295 /*
3296 * Create a new filesystem syncer vnode for the specified mount point.
3297 */
3298 int
3299 vfs_allocate_syncvnode(struct mount *mp)
3300 {
3301 struct vnode *vp;
3302 static long start, incr, next;
3303 int error;
3304
3305 /* Allocate a new vnode */
3306 if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3307 mp->mnt_syncer = NULL;
3308 return (error);
3309 }
3310 vp->v_type = VNON;
3311 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
3312 vp->v_vflag |= VV_FORCEINSMQ;
3313 error = insmntque(vp, mp);
3314 if (error != 0)
3315 panic("vfs_allocate_syncvnode: insmntque failed");
3316 vp->v_vflag &= ~VV_FORCEINSMQ;
3317 VOP_UNLOCK(vp, 0, curthread);
3318 /*
3319 * Place the vnode onto the syncer worklist. We attempt to
3320 * scatter them about on the list so that they will go off
3321 * at evenly distributed times even if all the filesystems
3322 * are mounted at once.
3323 */
3324 next += incr;
3325 if (next == 0 || next > syncer_maxdelay) {
3326 start /= 2;
3327 incr /= 2;
3328 if (start == 0) {
3329 start = syncer_maxdelay / 2;
3330 incr = syncer_maxdelay;
3331 }
3332 next = start;
3333 }
3334 VI_LOCK(vp);
3335 vn_syncer_add_to_worklist(&vp->v_bufobj,
3336 syncdelay > 0 ? next % syncdelay : 0);
3337 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3338 mtx_lock(&sync_mtx);
3339 sync_vnode_count++;
3340 mtx_unlock(&sync_mtx);
3341 VI_UNLOCK(vp);
3342 mp->mnt_syncer = vp;
3343 return (0);
3344 }
3345
3346 /*
3347 * Do a lazy sync of the filesystem.
3348 */
3349 static int
3350 sync_fsync(struct vop_fsync_args *ap)
3351 {
3352 struct vnode *syncvp = ap->a_vp;
3353 struct mount *mp = syncvp->v_mount;
3354 struct thread *td = ap->a_td;
3355 int error;
3356 struct bufobj *bo;
3357
3358 /*
3359 * We only need to do something if this is a lazy evaluation.
3360 */
3361 if (ap->a_waitfor != MNT_LAZY)
3362 return (0);
3363
3364 /*
3365 * Move ourselves to the back of the sync list.
3366 */
3367 bo = &syncvp->v_bufobj;
3368 BO_LOCK(bo);
3369 vn_syncer_add_to_worklist(bo, syncdelay);
3370 BO_UNLOCK(bo);
3371
3372 /*
3373 * Walk the list of vnodes pushing all that are dirty and
3374 * not already on the sync list.
3375 */
3376 mtx_lock(&mountlist_mtx);
3377 if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3378 mtx_unlock(&mountlist_mtx);
3379 return (0);
3380 }
3381 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3382 vfs_unbusy(mp, td);
3383 return (0);
3384 }
3385 MNT_ILOCK(mp);
3386 mp->mnt_noasync++;
3387 mp->mnt_kern_flag &= ~MNTK_ASYNC;
3388 MNT_IUNLOCK(mp);
3389 vfs_msync(mp, MNT_NOWAIT);
3390 error = VFS_SYNC(mp, MNT_LAZY, td);
3391 MNT_ILOCK(mp);
3392 mp->mnt_noasync--;
3393 if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3394 mp->mnt_kern_flag |= MNTK_ASYNC;
3395 MNT_IUNLOCK(mp);
3396 vn_finished_write(mp);
3397 vfs_unbusy(mp, td);
3398 return (error);
3399 }
3400
3401 /*
3402 * The syncer vnode is no referenced.
3403 */
3404 static int
3405 sync_inactive(struct vop_inactive_args *ap)
3406 {
3407
3408 vgone(ap->a_vp);
3409 return (0);
3410 }
3411
3412 /*
3413 * The syncer vnode is no longer needed and is being decommissioned.
3414 *
3415 * Modifications to the worklist must be protected by sync_mtx.
3416 */
3417 static int
3418 sync_reclaim(struct vop_reclaim_args *ap)
3419 {
3420 struct vnode *vp = ap->a_vp;
3421 struct bufobj *bo;
3422
3423 VI_LOCK(vp);
3424 bo = &vp->v_bufobj;
3425 vp->v_mount->mnt_syncer = NULL;
3426 if (bo->bo_flag & BO_ONWORKLST) {
3427 mtx_lock(&sync_mtx);
3428 LIST_REMOVE(bo, bo_synclist);
3429 syncer_worklist_len--;
3430 sync_vnode_count--;
3431 mtx_unlock(&sync_mtx);
3432 bo->bo_flag &= ~BO_ONWORKLST;
3433 }
3434 VI_UNLOCK(vp);
3435
3436 return (0);
3437 }
3438
3439 /*
3440 * Check if vnode represents a disk device
3441 */
3442 int
3443 vn_isdisk(struct vnode *vp, int *errp)
3444 {
3445 int error;
3446
3447 error = 0;
3448 dev_lock();
3449 if (vp->v_type != VCHR)
3450 error = ENOTBLK;
3451 else if (vp->v_rdev == NULL)
3452 error = ENXIO;
3453 else if (vp->v_rdev->si_devsw == NULL)
3454 error = ENXIO;
3455 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3456 error = ENOTBLK;
3457 dev_unlock();
3458 if (errp != NULL)
3459 *errp = error;
3460 return (error == 0);
3461 }
3462
3463 /*
3464 * Common filesystem object access control check routine. Accepts a
3465 * vnode's type, "mode", uid and gid, requested access mode, credentials,
3466 * and optional call-by-reference privused argument allowing vaccess()
3467 * to indicate to the caller whether privilege was used to satisfy the
3468 * request (obsoleted). Returns 0 on success, or an errno on failure.
3469 *
3470 * The ifdef'd CAPABILITIES version is here for reference, but is not
3471 * actually used.
3472 */
3473 int
3474 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
3475 mode_t acc_mode, struct ucred *cred, int *privused)
3476 {
3477 mode_t dac_granted;
3478 mode_t priv_granted;
3479
3480 /*
3481 * Look for a normal, non-privileged way to access the file/directory
3482 * as requested. If it exists, go with that.
3483 */
3484
3485 if (privused != NULL)
3486 *privused = 0;
3487
3488 dac_granted = 0;
3489
3490 /* Check the owner. */
3491 if (cred->cr_uid == file_uid) {
3492 dac_granted |= VADMIN;
3493 if (file_mode & S_IXUSR)
3494 dac_granted |= VEXEC;
3495 if (file_mode & S_IRUSR)
3496 dac_granted |= VREAD;
3497 if (file_mode & S_IWUSR)
3498 dac_granted |= (VWRITE | VAPPEND);
3499
3500 if ((acc_mode & dac_granted) == acc_mode)
3501 return (0);
3502
3503 goto privcheck;
3504 }
3505
3506 /* Otherwise, check the groups (first match) */
3507 if (groupmember(file_gid, cred)) {
3508 if (file_mode & S_IXGRP)
3509 dac_granted |= VEXEC;
3510 if (file_mode & S_IRGRP)
3511 dac_granted |= VREAD;
3512 if (file_mode & S_IWGRP)
3513 dac_granted |= (VWRITE | VAPPEND);
3514
3515 if ((acc_mode & dac_granted) == acc_mode)
3516 return (0);
3517
3518 goto privcheck;
3519 }
3520
3521 /* Otherwise, check everyone else. */
3522 if (file_mode & S_IXOTH)
3523 dac_granted |= VEXEC;
3524 if (file_mode & S_IROTH)
3525 dac_granted |= VREAD;
3526 if (file_mode & S_IWOTH)
3527 dac_granted |= (VWRITE | VAPPEND);
3528 if ((acc_mode & dac_granted) == acc_mode)
3529 return (0);
3530
3531 privcheck:
3532 /*
3533 * Build a privilege mask to determine if the set of privileges
3534 * satisfies the requirements when combined with the granted mask
3535 * from above. For each privilege, if the privilege is required,
3536 * bitwise or the request type onto the priv_granted mask.
3537 */
3538 priv_granted = 0;
3539
3540 if (type == VDIR) {
3541 /*
3542 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3543 * requests, instead of PRIV_VFS_EXEC.
3544 */
3545 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3546 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
3547 priv_granted |= VEXEC;
3548 } else {
3549 if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3550 !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
3551 priv_granted |= VEXEC;
3552 }
3553
3554 if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3555 !priv_check_cred(cred, PRIV_VFS_READ, 0))
3556 priv_granted |= VREAD;
3557
3558 if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3559 !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
3560 priv_granted |= (VWRITE | VAPPEND);
3561
3562 if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3563 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
3564 priv_granted |= VADMIN;
3565
3566 if ((acc_mode & (priv_granted | dac_granted)) == acc_mode) {
3567 /* XXX audit: privilege used */
3568 if (privused != NULL)
3569 *privused = 1;
3570 return (0);
3571 }
3572
3573 return ((acc_mode & VADMIN) ? EPERM : EACCES);
3574 }
3575
3576 /*
3577 * Credential check based on process requesting service, and per-attribute
3578 * permissions.
3579 */
3580 int
3581 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
3582 struct thread *td, int access)
3583 {
3584
3585 /*
3586 * Kernel-invoked always succeeds.
3587 */
3588 if (cred == NOCRED)
3589 return (0);
3590
3591 /*
3592 * Do not allow privileged processes in jail to directly manipulate
3593 * system attributes.
3594 */
3595 switch (attrnamespace) {
3596 case EXTATTR_NAMESPACE_SYSTEM:
3597 /* Potentially should be: return (EPERM); */
3598 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
3599 case EXTATTR_NAMESPACE_USER:
3600 return (VOP_ACCESS(vp, access, cred, td));
3601 default:
3602 return (EPERM);
3603 }
3604 }
3605
3606 #ifdef DEBUG_VFS_LOCKS
3607 /*
3608 * This only exists to supress warnings from unlocked specfs accesses. It is
3609 * no longer ok to have an unlocked VFS.
3610 */
3611 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3612
3613 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
3614 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3615
3616 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
3617 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3618
3619 int vfs_badlock_print = 1; /* Print lock violations. */
3620 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3621
3622 #ifdef KDB
3623 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
3624 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3625 #endif
3626
3627 static void
3628 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3629 {
3630
3631 #ifdef KDB
3632 if (vfs_badlock_backtrace)
3633 kdb_backtrace();
3634 #endif
3635 if (vfs_badlock_print)
3636 printf("%s: %p %s\n", str, (void *)vp, msg);
3637 if (vfs_badlock_ddb)
3638 kdb_enter_why(KDB_WHY_VFSLOCK, "lock violation");
3639 }
3640
3641 void
3642 assert_vi_locked(struct vnode *vp, const char *str)
3643 {
3644
3645 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3646 vfs_badlock("interlock is not locked but should be", str, vp);
3647 }
3648
3649 void
3650 assert_vi_unlocked(struct vnode *vp, const char *str)
3651 {
3652
3653 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3654 vfs_badlock("interlock is locked but should not be", str, vp);
3655 }
3656
3657 void
3658 assert_vop_locked(struct vnode *vp, const char *str)
3659 {
3660
3661 if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3662 vfs_badlock("is not locked but should be", str, vp);
3663 }
3664
3665 void
3666 assert_vop_unlocked(struct vnode *vp, const char *str)
3667 {
3668
3669 if (vp && !IGNORE_LOCK(vp) &&
3670 VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3671 vfs_badlock("is locked but should not be", str, vp);
3672 }
3673
3674 void
3675 assert_vop_elocked(struct vnode *vp, const char *str)
3676 {
3677
3678 if (vp && !IGNORE_LOCK(vp) &&
3679 VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3680 vfs_badlock("is not exclusive locked but should be", str, vp);
3681 }
3682
3683 #if 0
3684 void
3685 assert_vop_elocked_other(struct vnode *vp, const char *str)
3686 {
3687
3688 if (vp && !IGNORE_LOCK(vp) &&
3689 VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3690 vfs_badlock("is not exclusive locked by another thread",
3691 str, vp);
3692 }
3693
3694 void
3695 assert_vop_slocked(struct vnode *vp, const char *str)
3696 {
3697
3698 if (vp && !IGNORE_LOCK(vp) &&
3699 VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3700 vfs_badlock("is not locked shared but should be", str, vp);
3701 }
3702 #endif /* 0 */
3703 #endif /* DEBUG_VFS_LOCKS */
3704
3705 void
3706 vop_rename_pre(void *ap)
3707 {
3708 struct vop_rename_args *a = ap;
3709
3710 #ifdef DEBUG_VFS_LOCKS
3711 if (a->a_tvp)
3712 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3713 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3714 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3715 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3716
3717 /* Check the source (from). */
3718 if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp)
3719 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3720 if (a->a_tvp != a->a_fvp)
3721 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
3722
3723 /* Check the target. */
3724 if (a->a_tvp)
3725 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3726 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3727 #endif
3728 if (a->a_tdvp != a->a_fdvp)
3729 vhold(a->a_fdvp);
3730 if (a->a_tvp != a->a_fvp)
3731 vhold(a->a_fvp);
3732 vhold(a->a_tdvp);
3733 if (a->a_tvp)
3734 vhold(a->a_tvp);
3735 }
3736
3737 void
3738 vop_strategy_pre(void *ap)
3739 {
3740 #ifdef DEBUG_VFS_LOCKS
3741 struct vop_strategy_args *a;
3742 struct buf *bp;
3743
3744 a = ap;
3745 bp = a->a_bp;
3746
3747 /*
3748 * Cluster ops lock their component buffers but not the IO container.
3749 */
3750 if ((bp->b_flags & B_CLUSTER) != 0)
3751 return;
3752
3753 if (BUF_REFCNT(bp) < 1) {
3754 if (vfs_badlock_print)
3755 printf(
3756 "VOP_STRATEGY: bp is not locked but should be\n");
3757 if (vfs_badlock_ddb)
3758 kdb_enter_why(KDB_WHY_VFSLOCK, "lock violation");
3759 }
3760 #endif
3761 }
3762
3763 void
3764 vop_lookup_pre(void *ap)
3765 {
3766 #ifdef DEBUG_VFS_LOCKS
3767 struct vop_lookup_args *a;
3768 struct vnode *dvp;
3769
3770 a = ap;
3771 dvp = a->a_dvp;
3772 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3773 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3774 #endif
3775 }
3776
3777 void
3778 vop_lookup_post(void *ap, int rc)
3779 {
3780 #ifdef DEBUG_VFS_LOCKS
3781 struct vop_lookup_args *a;
3782 struct vnode *dvp;
3783 struct vnode *vp;
3784
3785 a = ap;
3786 dvp = a->a_dvp;
3787 vp = *(a->a_vpp);
3788
3789 ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3790 ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3791
3792 if (!rc)
3793 ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
3794 #endif
3795 }
3796
3797 void
3798 vop_lock_pre(void *ap)
3799 {
3800 #ifdef DEBUG_VFS_LOCKS
3801 struct vop_lock1_args *a = ap;
3802
3803 if ((a->a_flags & LK_INTERLOCK) == 0)
3804 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3805 else
3806 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3807 #endif
3808 }
3809
3810 void
3811 vop_lock_post(void *ap, int rc)
3812 {
3813 #ifdef DEBUG_VFS_LOCKS
3814 struct vop_lock1_args *a = ap;
3815
3816 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3817 if (rc == 0)
3818 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3819 #endif
3820 }
3821
3822 void
3823 vop_unlock_pre(void *ap)
3824 {
3825 #ifdef DEBUG_VFS_LOCKS
3826 struct vop_unlock_args *a = ap;
3827
3828 if (a->a_flags & LK_INTERLOCK)
3829 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3830 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3831 #endif
3832 }
3833
3834 void
3835 vop_unlock_post(void *ap, int rc)
3836 {
3837 #ifdef DEBUG_VFS_LOCKS
3838 struct vop_unlock_args *a = ap;
3839
3840 if (a->a_flags & LK_INTERLOCK)
3841 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3842 #endif
3843 }
3844
3845 void
3846 vop_create_post(void *ap, int rc)
3847 {
3848 struct vop_create_args *a = ap;
3849
3850 if (!rc)
3851 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3852 }
3853
3854 void
3855 vop_link_post(void *ap, int rc)
3856 {
3857 struct vop_link_args *a = ap;
3858
3859 if (!rc) {
3860 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
3861 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
3862 }
3863 }
3864
3865 void
3866 vop_mkdir_post(void *ap, int rc)
3867 {
3868 struct vop_mkdir_args *a = ap;
3869
3870 if (!rc)
3871 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3872 }
3873
3874 void
3875 vop_mknod_post(void *ap, int rc)
3876 {
3877 struct vop_mknod_args *a = ap;
3878
3879 if (!rc)
3880 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3881 }
3882
3883 void
3884 vop_remove_post(void *ap, int rc)
3885 {
3886 struct vop_remove_args *a = ap;
3887
3888 if (!rc) {
3889 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3890 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3891 }
3892 }
3893
3894 void
3895 vop_rename_post(void *ap, int rc)
3896 {
3897 struct vop_rename_args *a = ap;
3898
3899 if (!rc) {
3900 VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
3901 VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
3902 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
3903 if (a->a_tvp)
3904 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
3905 }
3906 if (a->a_tdvp != a->a_fdvp)
3907 vdrop(a->a_fdvp);
3908 if (a->a_tvp != a->a_fvp)
3909 vdrop(a->a_fvp);
3910 vdrop(a->a_tdvp);
3911 if (a->a_tvp)
3912 vdrop(a->a_tvp);
3913 }
3914
3915 void
3916 vop_rmdir_post(void *ap, int rc)
3917 {
3918 struct vop_rmdir_args *a = ap;
3919
3920 if (!rc) {
3921 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3922 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3923 }
3924 }
3925
3926 void
3927 vop_setattr_post(void *ap, int rc)
3928 {
3929 struct vop_setattr_args *a = ap;
3930
3931 if (!rc)
3932 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
3933 }
3934
3935 void
3936 vop_symlink_post(void *ap, int rc)
3937 {
3938 struct vop_symlink_args *a = ap;
3939
3940 if (!rc)
3941 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3942 }
3943
3944 static struct knlist fs_knlist;
3945
3946 static void
3947 vfs_event_init(void *arg)
3948 {
3949 knlist_init(&fs_knlist, NULL, NULL, NULL, NULL);
3950 }
3951 /* XXX - correct order? */
3952 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3953
3954 void
3955 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3956 {
3957
3958 KNOTE_UNLOCKED(&fs_knlist, event);
3959 }
3960
3961 static int filt_fsattach(struct knote *kn);
3962 static void filt_fsdetach(struct knote *kn);
3963 static int filt_fsevent(struct knote *kn, long hint);
3964
3965 struct filterops fs_filtops =
3966 { 0, filt_fsattach, filt_fsdetach, filt_fsevent };
3967
3968 static int
3969 filt_fsattach(struct knote *kn)
3970 {
3971
3972 kn->kn_flags |= EV_CLEAR;
3973 knlist_add(&fs_knlist, kn, 0);
3974 return (0);
3975 }
3976
3977 static void
3978 filt_fsdetach(struct knote *kn)
3979 {
3980
3981 knlist_remove(&fs_knlist, kn, 0);
3982 }
3983
3984 static int
3985 filt_fsevent(struct knote *kn, long hint)
3986 {
3987
3988 kn->kn_fflags |= hint;
3989 return (kn->kn_fflags != 0);
3990 }
3991
3992 static int
3993 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
3994 {
3995 struct vfsidctl vc;
3996 int error;
3997 struct mount *mp;
3998
3999 error = SYSCTL_IN(req, &vc, sizeof(vc));
4000 if (error)
4001 return (error);
4002 if (vc.vc_vers != VFS_CTL_VERS1)
4003 return (EINVAL);
4004 mp = vfs_getvfs(&vc.vc_fsid);
4005 if (mp == NULL)
4006 return (ENOENT);
4007 /* ensure that a specific sysctl goes to the right filesystem. */
4008 if (strcmp(vc.vc_fstypename, "*") != 0 &&
4009 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4010 vfs_rel(mp);
4011 return (EINVAL);
4012 }
4013 VCTLTOREQ(&vc, req);
4014 error = VFS_SYSCTL(mp, vc.vc_op, req);
4015 vfs_rel(mp);
4016 return (error);
4017 }
4018
4019 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "",
4020 "Sysctl by fsid");
4021
4022 /*
4023 * Function to initialize a va_filerev field sensibly.
4024 * XXX: Wouldn't a random number make a lot more sense ??
4025 */
4026 u_quad_t
4027 init_va_filerev(void)
4028 {
4029 struct bintime bt;
4030
4031 getbinuptime(&bt);
4032 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4033 }
4034
4035 static int filt_vfsread(struct knote *kn, long hint);
4036 static int filt_vfswrite(struct knote *kn, long hint);
4037 static int filt_vfsvnode(struct knote *kn, long hint);
4038 static void filt_vfsdetach(struct knote *kn);
4039 static struct filterops vfsread_filtops =
4040 { 1, NULL, filt_vfsdetach, filt_vfsread };
4041 static struct filterops vfswrite_filtops =
4042 { 1, NULL, filt_vfsdetach, filt_vfswrite };
4043 static struct filterops vfsvnode_filtops =
4044 { 1, NULL, filt_vfsdetach, filt_vfsvnode };
4045
4046 static void
4047 vfs_knllock(void *arg)
4048 {
4049 struct vnode *vp = arg;
4050
4051 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
4052 }
4053
4054 static void
4055 vfs_knlunlock(void *arg)
4056 {
4057 struct vnode *vp = arg;
4058
4059 VOP_UNLOCK(vp, 0, curthread);
4060 }
4061
4062 static int
4063 vfs_knllocked(void *arg)
4064 {
4065 struct vnode *vp = arg;
4066
4067 return (VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE);
4068 }
4069
4070 int
4071 vfs_kqfilter(struct vop_kqfilter_args *ap)
4072 {
4073 struct vnode *vp = ap->a_vp;
4074 struct knote *kn = ap->a_kn;
4075 struct knlist *knl;
4076
4077 switch (kn->kn_filter) {
4078 case EVFILT_READ:
4079 kn->kn_fop = &vfsread_filtops;
4080 break;
4081 case EVFILT_WRITE:
4082 kn->kn_fop = &vfswrite_filtops;
4083 break;
4084 case EVFILT_VNODE:
4085 kn->kn_fop = &vfsvnode_filtops;
4086 break;
4087 default:
4088 return (EINVAL);
4089 }
4090
4091 kn->kn_hook = (caddr_t)vp;
4092
4093 v_addpollinfo(vp);
4094 if (vp->v_pollinfo == NULL)
4095 return (ENOMEM);
4096 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4097 knlist_add(knl, kn, 0);
4098
4099 return (0);
4100 }
4101
4102 /*
4103 * Detach knote from vnode
4104 */
4105 static void
4106 filt_vfsdetach(struct knote *kn)
4107 {
4108 struct vnode *vp = (struct vnode *)kn->kn_hook;
4109
4110 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4111 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4112 }
4113
4114 /*ARGSUSED*/
4115 static int
4116 filt_vfsread(struct knote *kn, long hint)
4117 {
4118 struct vnode *vp = (struct vnode *)kn->kn_hook;
4119 struct vattr va;
4120
4121 /*
4122 * filesystem is gone, so set the EOF flag and schedule
4123 * the knote for deletion.
4124 */
4125 if (hint == NOTE_REVOKE) {
4126 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4127 return (1);
4128 }
4129
4130 if (VOP_GETATTR(vp, &va, curthread->td_ucred, curthread))
4131 return (0);
4132
4133 kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4134 return (kn->kn_data != 0);
4135 }
4136
4137 /*ARGSUSED*/
4138 static int
4139 filt_vfswrite(struct knote *kn, long hint)
4140 {
4141 /*
4142 * filesystem is gone, so set the EOF flag and schedule
4143 * the knote for deletion.
4144 */
4145 if (hint == NOTE_REVOKE)
4146 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4147
4148 kn->kn_data = 0;
4149 return (1);
4150 }
4151
4152 static int
4153 filt_vfsvnode(struct knote *kn, long hint)
4154 {
4155 if (kn->kn_sfflags & hint)
4156 kn->kn_fflags |= hint;
4157 if (hint == NOTE_REVOKE) {
4158 kn->kn_flags |= EV_EOF;
4159 return (1);
4160 }
4161 return (kn->kn_fflags != 0);
4162 }
4163
4164 int
4165 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4166 {
4167 int error;
4168
4169 if (dp->d_reclen > ap->a_uio->uio_resid)
4170 return (ENAMETOOLONG);
4171 error = uiomove(dp, dp->d_reclen, ap->a_uio);
4172 if (error) {
4173 if (ap->a_ncookies != NULL) {
4174 if (ap->a_cookies != NULL)
4175 free(ap->a_cookies, M_TEMP);
4176 ap->a_cookies = NULL;
4177 *ap->a_ncookies = 0;
4178 }
4179 return (error);
4180 }
4181 if (ap->a_ncookies == NULL)
4182 return (0);
4183
4184 KASSERT(ap->a_cookies,
4185 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4186
4187 *ap->a_cookies = realloc(*ap->a_cookies,
4188 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
4189 (*ap->a_cookies)[*ap->a_ncookies] = off;
4190 return (0);
4191 }
4192
4193 /*
4194 * Mark for update the access time of the file if the filesystem
4195 * supports VA_MARK_ATIME. This functionality is used by execve
4196 * and mmap, so we want to avoid the synchronous I/O implied by
4197 * directly setting va_atime for the sake of efficiency.
4198 */
4199 void
4200 vfs_mark_atime(struct vnode *vp, struct thread *td)
4201 {
4202 struct vattr atimeattr;
4203
4204 if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0) {
4205 VATTR_NULL(&atimeattr);
4206 atimeattr.va_vaflags |= VA_MARK_ATIME;
4207 (void)VOP_SETATTR(vp, &atimeattr, td->td_ucred, td);
4208 }
4209 }
Cache object: 7cab0ed60382bf20d361d23b5448a79a
|