FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_subr.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
37 */
38
39 /*
40 * External virtual filesystem routines
41 */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD: releng/12.0/sys/kern/vfs_subr.c 337977 2018-08-17 16:07:06Z markj $");
45
46 #include "opt_ddb.h"
47 #include "opt_watchdog.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/bio.h>
52 #include <sys/buf.h>
53 #include <sys/condvar.h>
54 #include <sys/conf.h>
55 #include <sys/counter.h>
56 #include <sys/dirent.h>
57 #include <sys/event.h>
58 #include <sys/eventhandler.h>
59 #include <sys/extattr.h>
60 #include <sys/file.h>
61 #include <sys/fcntl.h>
62 #include <sys/jail.h>
63 #include <sys/kdb.h>
64 #include <sys/kernel.h>
65 #include <sys/kthread.h>
66 #include <sys/lockf.h>
67 #include <sys/malloc.h>
68 #include <sys/mount.h>
69 #include <sys/namei.h>
70 #include <sys/pctrie.h>
71 #include <sys/priv.h>
72 #include <sys/reboot.h>
73 #include <sys/refcount.h>
74 #include <sys/rwlock.h>
75 #include <sys/sched.h>
76 #include <sys/sleepqueue.h>
77 #include <sys/smp.h>
78 #include <sys/stat.h>
79 #include <sys/sysctl.h>
80 #include <sys/syslog.h>
81 #include <sys/vmmeter.h>
82 #include <sys/vnode.h>
83 #include <sys/watchdog.h>
84
85 #include <machine/stdarg.h>
86
87 #include <security/mac/mac_framework.h>
88
89 #include <vm/vm.h>
90 #include <vm/vm_object.h>
91 #include <vm/vm_extern.h>
92 #include <vm/pmap.h>
93 #include <vm/vm_map.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_kern.h>
96 #include <vm/uma.h>
97
98 #ifdef DDB
99 #include <ddb/ddb.h>
100 #endif
101
102 static void delmntque(struct vnode *vp);
103 static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
104 int slpflag, int slptimeo);
105 static void syncer_shutdown(void *arg, int howto);
106 static int vtryrecycle(struct vnode *vp);
107 static void v_init_counters(struct vnode *);
108 static void v_incr_usecount(struct vnode *);
109 static void v_incr_usecount_locked(struct vnode *);
110 static void v_incr_devcount(struct vnode *);
111 static void v_decr_devcount(struct vnode *);
112 static void vgonel(struct vnode *);
113 static void vfs_knllock(void *arg);
114 static void vfs_knlunlock(void *arg);
115 static void vfs_knl_assert_locked(void *arg);
116 static void vfs_knl_assert_unlocked(void *arg);
117 static void vnlru_return_batches(struct vfsops *mnt_op);
118 static void destroy_vpollinfo(struct vpollinfo *vi);
119
120 /*
121 * These fences are intended for cases where some synchronization is
122 * needed between access of v_iflags and lockless vnode refcount (v_holdcnt
123 * and v_usecount) updates. Access to v_iflags is generally synchronized
124 * by the interlock, but we have some internal assertions that check vnode
125 * flags without acquiring the lock. Thus, these fences are INVARIANTS-only
126 * for now.
127 */
128 #ifdef INVARIANTS
129 #define VNODE_REFCOUNT_FENCE_ACQ() atomic_thread_fence_acq()
130 #define VNODE_REFCOUNT_FENCE_REL() atomic_thread_fence_rel()
131 #else
132 #define VNODE_REFCOUNT_FENCE_ACQ()
133 #define VNODE_REFCOUNT_FENCE_REL()
134 #endif
135
136 /*
137 * Number of vnodes in existence. Increased whenever getnewvnode()
138 * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode.
139 */
140 static unsigned long numvnodes;
141
142 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
143 "Number of vnodes in existence");
144
145 static counter_u64_t vnodes_created;
146 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
147 "Number of vnodes created by getnewvnode");
148
149 static u_long mnt_free_list_batch = 128;
150 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW,
151 &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list");
152
153 /*
154 * Conversion tables for conversion from vnode types to inode formats
155 * and back.
156 */
157 enum vtype iftovt_tab[16] = {
158 VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
159 VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
160 };
161 int vttoif_tab[10] = {
162 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
163 S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
164 };
165
166 /*
167 * List of vnodes that are ready for recycling.
168 */
169 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
170
171 /*
172 * "Free" vnode target. Free vnodes are rarely completely free, but are
173 * just ones that are cheap to recycle. Usually they are for files which
174 * have been stat'd but not read; these usually have inode and namecache
175 * data attached to them. This target is the preferred minimum size of a
176 * sub-cache consisting mostly of such files. The system balances the size
177 * of this sub-cache with its complement to try to prevent either from
178 * thrashing while the other is relatively inactive. The targets express
179 * a preference for the best balance.
180 *
181 * "Above" this target there are 2 further targets (watermarks) related
182 * to recyling of free vnodes. In the best-operating case, the cache is
183 * exactly full, the free list has size between vlowat and vhiwat above the
184 * free target, and recycling from it and normal use maintains this state.
185 * Sometimes the free list is below vlowat or even empty, but this state
186 * is even better for immediate use provided the cache is not full.
187 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
188 * ones) to reach one of these states. The watermarks are currently hard-
189 * coded as 4% and 9% of the available space higher. These and the default
190 * of 25% for wantfreevnodes are too large if the memory size is large.
191 * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
192 * whenever vnlru_proc() becomes active.
193 */
194 static u_long wantfreevnodes;
195 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
196 &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes");
197 static u_long freevnodes;
198 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD,
199 &freevnodes, 0, "Number of \"free\" vnodes");
200
201 static counter_u64_t recycles_count;
202 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count,
203 "Number of vnodes recycled to meet vnode cache targets");
204
205 /*
206 * Various variables used for debugging the new implementation of
207 * reassignbuf().
208 * XXX these are probably of (very) limited utility now.
209 */
210 static int reassignbufcalls;
211 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0,
212 "Number of calls to reassignbuf");
213
214 static counter_u64_t free_owe_inact;
215 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact,
216 "Number of times free vnodes kept on active list due to VFS "
217 "owing inactivation");
218
219 /* To keep more than one thread at a time from running vfs_getnewfsid */
220 static struct mtx mntid_mtx;
221
222 /*
223 * Lock for any access to the following:
224 * vnode_free_list
225 * numvnodes
226 * freevnodes
227 */
228 static struct mtx vnode_free_list_mtx;
229
230 /* Publicly exported FS */
231 struct nfs_public nfs_pub;
232
233 static uma_zone_t buf_trie_zone;
234
235 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
236 static uma_zone_t vnode_zone;
237 static uma_zone_t vnodepoll_zone;
238
239 /*
240 * The workitem queue.
241 *
242 * It is useful to delay writes of file data and filesystem metadata
243 * for tens of seconds so that quickly created and deleted files need
244 * not waste disk bandwidth being created and removed. To realize this,
245 * we append vnodes to a "workitem" queue. When running with a soft
246 * updates implementation, most pending metadata dependencies should
247 * not wait for more than a few seconds. Thus, mounted on block devices
248 * are delayed only about a half the time that file data is delayed.
249 * Similarly, directory updates are more critical, so are only delayed
250 * about a third the time that file data is delayed. Thus, there are
251 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
252 * one each second (driven off the filesystem syncer process). The
253 * syncer_delayno variable indicates the next queue that is to be processed.
254 * Items that need to be processed soon are placed in this queue:
255 *
256 * syncer_workitem_pending[syncer_delayno]
257 *
258 * A delay of fifteen seconds is done by placing the request fifteen
259 * entries later in the queue:
260 *
261 * syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
262 *
263 */
264 static int syncer_delayno;
265 static long syncer_mask;
266 LIST_HEAD(synclist, bufobj);
267 static struct synclist *syncer_workitem_pending;
268 /*
269 * The sync_mtx protects:
270 * bo->bo_synclist
271 * sync_vnode_count
272 * syncer_delayno
273 * syncer_state
274 * syncer_workitem_pending
275 * syncer_worklist_len
276 * rushjob
277 */
278 static struct mtx sync_mtx;
279 static struct cv sync_wakeup;
280
281 #define SYNCER_MAXDELAY 32
282 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
283 static int syncdelay = 30; /* max time to delay syncing data */
284 static int filedelay = 30; /* time to delay syncing files */
285 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
286 "Time to delay syncing files (in seconds)");
287 static int dirdelay = 29; /* time to delay syncing directories */
288 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
289 "Time to delay syncing directories (in seconds)");
290 static int metadelay = 28; /* time to delay syncing metadata */
291 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
292 "Time to delay syncing metadata (in seconds)");
293 static int rushjob; /* number of slots to run ASAP */
294 static int stat_rush_requests; /* number of times I/O speeded up */
295 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
296 "Number of times I/O speeded up (rush requests)");
297
298 /*
299 * When shutting down the syncer, run it at four times normal speed.
300 */
301 #define SYNCER_SHUTDOWN_SPEEDUP 4
302 static int sync_vnode_count;
303 static int syncer_worklist_len;
304 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
305 syncer_state;
306
307 /* Target for maximum number of vnodes. */
308 int desiredvnodes;
309 static int gapvnodes; /* gap between wanted and desired */
310 static int vhiwat; /* enough extras after expansion */
311 static int vlowat; /* minimal extras before expansion */
312 static int vstir; /* nonzero to stir non-free vnodes */
313 static volatile int vsmalltrigger = 8; /* pref to keep if > this many pages */
314
315 static int
316 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS)
317 {
318 int error, old_desiredvnodes;
319
320 old_desiredvnodes = desiredvnodes;
321 if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0)
322 return (error);
323 if (old_desiredvnodes != desiredvnodes) {
324 wantfreevnodes = desiredvnodes / 4;
325 /* XXX locking seems to be incomplete. */
326 vfs_hash_changesize(desiredvnodes);
327 cache_changesize(desiredvnodes);
328 }
329 return (0);
330 }
331
332 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
333 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0,
334 sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes");
335 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
336 &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)");
337 static int vnlru_nowhere;
338 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
339 &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
340
341 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
342 static int vnsz2log;
343
344 /*
345 * Support for the bufobj clean & dirty pctrie.
346 */
347 static void *
348 buf_trie_alloc(struct pctrie *ptree)
349 {
350
351 return uma_zalloc(buf_trie_zone, M_NOWAIT);
352 }
353
354 static void
355 buf_trie_free(struct pctrie *ptree, void *node)
356 {
357
358 uma_zfree(buf_trie_zone, node);
359 }
360 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free);
361
362 /*
363 * Initialize the vnode management data structures.
364 *
365 * Reevaluate the following cap on the number of vnodes after the physical
366 * memory size exceeds 512GB. In the limit, as the physical memory size
367 * grows, the ratio of the memory size in KB to vnodes approaches 64:1.
368 */
369 #ifndef MAXVNODES_MAX
370 #define MAXVNODES_MAX (512 * 1024 * 1024 / 64) /* 8M */
371 #endif
372
373 /*
374 * Initialize a vnode as it first enters the zone.
375 */
376 static int
377 vnode_init(void *mem, int size, int flags)
378 {
379 struct vnode *vp;
380
381 vp = mem;
382 bzero(vp, size);
383 /*
384 * Setup locks.
385 */
386 vp->v_vnlock = &vp->v_lock;
387 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
388 /*
389 * By default, don't allow shared locks unless filesystems opt-in.
390 */
391 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
392 LK_NOSHARE | LK_IS_VNODE);
393 /*
394 * Initialize bufobj.
395 */
396 bufobj_init(&vp->v_bufobj, vp);
397 /*
398 * Initialize namecache.
399 */
400 LIST_INIT(&vp->v_cache_src);
401 TAILQ_INIT(&vp->v_cache_dst);
402 /*
403 * Initialize rangelocks.
404 */
405 rangelock_init(&vp->v_rl);
406 return (0);
407 }
408
409 /*
410 * Free a vnode when it is cleared from the zone.
411 */
412 static void
413 vnode_fini(void *mem, int size)
414 {
415 struct vnode *vp;
416 struct bufobj *bo;
417
418 vp = mem;
419 rangelock_destroy(&vp->v_rl);
420 lockdestroy(vp->v_vnlock);
421 mtx_destroy(&vp->v_interlock);
422 bo = &vp->v_bufobj;
423 rw_destroy(BO_LOCKPTR(bo));
424 }
425
426 /*
427 * Provide the size of NFS nclnode and NFS fh for calculation of the
428 * vnode memory consumption. The size is specified directly to
429 * eliminate dependency on NFS-private header.
430 *
431 * Other filesystems may use bigger or smaller (like UFS and ZFS)
432 * private inode data, but the NFS-based estimation is ample enough.
433 * Still, we care about differences in the size between 64- and 32-bit
434 * platforms.
435 *
436 * Namecache structure size is heuristically
437 * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
438 */
439 #ifdef _LP64
440 #define NFS_NCLNODE_SZ (528 + 64)
441 #define NC_SZ 148
442 #else
443 #define NFS_NCLNODE_SZ (360 + 32)
444 #define NC_SZ 92
445 #endif
446
447 static void
448 vntblinit(void *dummy __unused)
449 {
450 u_int i;
451 int physvnodes, virtvnodes;
452
453 /*
454 * Desiredvnodes is a function of the physical memory size and the
455 * kernel's heap size. Generally speaking, it scales with the
456 * physical memory size. The ratio of desiredvnodes to the physical
457 * memory size is 1:16 until desiredvnodes exceeds 98,304.
458 * Thereafter, the
459 * marginal ratio of desiredvnodes to the physical memory size is
460 * 1:64. However, desiredvnodes is limited by the kernel's heap
461 * size. The memory required by desiredvnodes vnodes and vm objects
462 * must not exceed 1/10th of the kernel's heap size.
463 */
464 physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
465 3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
466 virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
467 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
468 desiredvnodes = min(physvnodes, virtvnodes);
469 if (desiredvnodes > MAXVNODES_MAX) {
470 if (bootverbose)
471 printf("Reducing kern.maxvnodes %d -> %d\n",
472 desiredvnodes, MAXVNODES_MAX);
473 desiredvnodes = MAXVNODES_MAX;
474 }
475 wantfreevnodes = desiredvnodes / 4;
476 mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
477 TAILQ_INIT(&vnode_free_list);
478 mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
479 vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
480 vnode_init, vnode_fini, UMA_ALIGN_PTR, 0);
481 vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
482 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
483 /*
484 * Preallocate enough nodes to support one-per buf so that
485 * we can not fail an insert. reassignbuf() callers can not
486 * tolerate the insertion failure.
487 */
488 buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
489 NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
490 UMA_ZONE_NOFREE | UMA_ZONE_VM);
491 uma_prealloc(buf_trie_zone, nbuf);
492
493 vnodes_created = counter_u64_alloc(M_WAITOK);
494 recycles_count = counter_u64_alloc(M_WAITOK);
495 free_owe_inact = counter_u64_alloc(M_WAITOK);
496
497 /*
498 * Initialize the filesystem syncer.
499 */
500 syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
501 &syncer_mask);
502 syncer_maxdelay = syncer_mask + 1;
503 mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
504 cv_init(&sync_wakeup, "syncer");
505 for (i = 1; i <= sizeof(struct vnode); i <<= 1)
506 vnsz2log++;
507 vnsz2log--;
508 }
509 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
510
511
512 /*
513 * Mark a mount point as busy. Used to synchronize access and to delay
514 * unmounting. Eventually, mountlist_mtx is not released on failure.
515 *
516 * vfs_busy() is a custom lock, it can block the caller.
517 * vfs_busy() only sleeps if the unmount is active on the mount point.
518 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
519 * vnode belonging to mp.
520 *
521 * Lookup uses vfs_busy() to traverse mount points.
522 * root fs var fs
523 * / vnode lock A / vnode lock (/var) D
524 * /var vnode lock B /log vnode lock(/var/log) E
525 * vfs_busy lock C vfs_busy lock F
526 *
527 * Within each file system, the lock order is C->A->B and F->D->E.
528 *
529 * When traversing across mounts, the system follows that lock order:
530 *
531 * C->A->B
532 * |
533 * +->F->D->E
534 *
535 * The lookup() process for namei("/var") illustrates the process:
536 * VOP_LOOKUP() obtains B while A is held
537 * vfs_busy() obtains a shared lock on F while A and B are held
538 * vput() releases lock on B
539 * vput() releases lock on A
540 * VFS_ROOT() obtains lock on D while shared lock on F is held
541 * vfs_unbusy() releases shared lock on F
542 * vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
543 * Attempt to lock A (instead of vp_crossmp) while D is held would
544 * violate the global order, causing deadlocks.
545 *
546 * dounmount() locks B while F is drained.
547 */
548 int
549 vfs_busy(struct mount *mp, int flags)
550 {
551
552 MPASS((flags & ~MBF_MASK) == 0);
553 CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
554
555 MNT_ILOCK(mp);
556 MNT_REF(mp);
557 /*
558 * If mount point is currently being unmounted, sleep until the
559 * mount point fate is decided. If thread doing the unmounting fails,
560 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
561 * that this mount point has survived the unmount attempt and vfs_busy
562 * should retry. Otherwise the unmounter thread will set MNTK_REFEXPIRE
563 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
564 * about to be really destroyed. vfs_busy needs to release its
565 * reference on the mount point in this case and return with ENOENT,
566 * telling the caller that mount mount it tried to busy is no longer
567 * valid.
568 */
569 while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
570 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
571 MNT_REL(mp);
572 MNT_IUNLOCK(mp);
573 CTR1(KTR_VFS, "%s: failed busying before sleeping",
574 __func__);
575 return (ENOENT);
576 }
577 if (flags & MBF_MNTLSTLOCK)
578 mtx_unlock(&mountlist_mtx);
579 mp->mnt_kern_flag |= MNTK_MWAIT;
580 msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
581 if (flags & MBF_MNTLSTLOCK)
582 mtx_lock(&mountlist_mtx);
583 MNT_ILOCK(mp);
584 }
585 if (flags & MBF_MNTLSTLOCK)
586 mtx_unlock(&mountlist_mtx);
587 mp->mnt_lockref++;
588 MNT_IUNLOCK(mp);
589 return (0);
590 }
591
592 /*
593 * Free a busy filesystem.
594 */
595 void
596 vfs_unbusy(struct mount *mp)
597 {
598
599 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
600 MNT_ILOCK(mp);
601 MNT_REL(mp);
602 KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
603 mp->mnt_lockref--;
604 if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
605 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
606 CTR1(KTR_VFS, "%s: waking up waiters", __func__);
607 mp->mnt_kern_flag &= ~MNTK_DRAINING;
608 wakeup(&mp->mnt_lockref);
609 }
610 MNT_IUNLOCK(mp);
611 }
612
613 /*
614 * Lookup a mount point by filesystem identifier.
615 */
616 struct mount *
617 vfs_getvfs(fsid_t *fsid)
618 {
619 struct mount *mp;
620
621 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
622 mtx_lock(&mountlist_mtx);
623 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
624 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
625 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
626 vfs_ref(mp);
627 mtx_unlock(&mountlist_mtx);
628 return (mp);
629 }
630 }
631 mtx_unlock(&mountlist_mtx);
632 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
633 return ((struct mount *) 0);
634 }
635
636 /*
637 * Lookup a mount point by filesystem identifier, busying it before
638 * returning.
639 *
640 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
641 * cache for popular filesystem identifiers. The cache is lockess, using
642 * the fact that struct mount's are never freed. In worst case we may
643 * get pointer to unmounted or even different filesystem, so we have to
644 * check what we got, and go slow way if so.
645 */
646 struct mount *
647 vfs_busyfs(fsid_t *fsid)
648 {
649 #define FSID_CACHE_SIZE 256
650 typedef struct mount * volatile vmp_t;
651 static vmp_t cache[FSID_CACHE_SIZE];
652 struct mount *mp;
653 int error;
654 uint32_t hash;
655
656 CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
657 hash = fsid->val[0] ^ fsid->val[1];
658 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
659 mp = cache[hash];
660 if (mp == NULL ||
661 mp->mnt_stat.f_fsid.val[0] != fsid->val[0] ||
662 mp->mnt_stat.f_fsid.val[1] != fsid->val[1])
663 goto slow;
664 if (vfs_busy(mp, 0) != 0) {
665 cache[hash] = NULL;
666 goto slow;
667 }
668 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
669 mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
670 return (mp);
671 else
672 vfs_unbusy(mp);
673
674 slow:
675 mtx_lock(&mountlist_mtx);
676 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
677 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
678 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
679 error = vfs_busy(mp, MBF_MNTLSTLOCK);
680 if (error) {
681 cache[hash] = NULL;
682 mtx_unlock(&mountlist_mtx);
683 return (NULL);
684 }
685 cache[hash] = mp;
686 return (mp);
687 }
688 }
689 CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
690 mtx_unlock(&mountlist_mtx);
691 return ((struct mount *) 0);
692 }
693
694 /*
695 * Check if a user can access privileged mount options.
696 */
697 int
698 vfs_suser(struct mount *mp, struct thread *td)
699 {
700 int error;
701
702 if (jailed(td->td_ucred)) {
703 /*
704 * If the jail of the calling thread lacks permission for
705 * this type of file system, deny immediately.
706 */
707 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag))
708 return (EPERM);
709
710 /*
711 * If the file system was mounted outside the jail of the
712 * calling thread, deny immediately.
713 */
714 if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
715 return (EPERM);
716 }
717
718 /*
719 * If file system supports delegated administration, we don't check
720 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
721 * by the file system itself.
722 * If this is not the user that did original mount, we check for
723 * the PRIV_VFS_MOUNT_OWNER privilege.
724 */
725 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
726 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
727 if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
728 return (error);
729 }
730 return (0);
731 }
732
733 /*
734 * Get a new unique fsid. Try to make its val[0] unique, since this value
735 * will be used to create fake device numbers for stat(). Also try (but
736 * not so hard) make its val[0] unique mod 2^16, since some emulators only
737 * support 16-bit device numbers. We end up with unique val[0]'s for the
738 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
739 *
740 * Keep in mind that several mounts may be running in parallel. Starting
741 * the search one past where the previous search terminated is both a
742 * micro-optimization and a defense against returning the same fsid to
743 * different mounts.
744 */
745 void
746 vfs_getnewfsid(struct mount *mp)
747 {
748 static uint16_t mntid_base;
749 struct mount *nmp;
750 fsid_t tfsid;
751 int mtype;
752
753 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
754 mtx_lock(&mntid_mtx);
755 mtype = mp->mnt_vfc->vfc_typenum;
756 tfsid.val[1] = mtype;
757 mtype = (mtype & 0xFF) << 24;
758 for (;;) {
759 tfsid.val[0] = makedev(255,
760 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
761 mntid_base++;
762 if ((nmp = vfs_getvfs(&tfsid)) == NULL)
763 break;
764 vfs_rel(nmp);
765 }
766 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
767 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
768 mtx_unlock(&mntid_mtx);
769 }
770
771 /*
772 * Knob to control the precision of file timestamps:
773 *
774 * 0 = seconds only; nanoseconds zeroed.
775 * 1 = seconds and nanoseconds, accurate within 1/HZ.
776 * 2 = seconds and nanoseconds, truncated to microseconds.
777 * >=3 = seconds and nanoseconds, maximum precision.
778 */
779 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
780
781 static int timestamp_precision = TSP_USEC;
782 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
783 ×tamp_precision, 0, "File timestamp precision (0: seconds, "
784 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, "
785 "3+: sec + ns (max. precision))");
786
787 /*
788 * Get a current timestamp.
789 */
790 void
791 vfs_timestamp(struct timespec *tsp)
792 {
793 struct timeval tv;
794
795 switch (timestamp_precision) {
796 case TSP_SEC:
797 tsp->tv_sec = time_second;
798 tsp->tv_nsec = 0;
799 break;
800 case TSP_HZ:
801 getnanotime(tsp);
802 break;
803 case TSP_USEC:
804 microtime(&tv);
805 TIMEVAL_TO_TIMESPEC(&tv, tsp);
806 break;
807 case TSP_NSEC:
808 default:
809 nanotime(tsp);
810 break;
811 }
812 }
813
814 /*
815 * Set vnode attributes to VNOVAL
816 */
817 void
818 vattr_null(struct vattr *vap)
819 {
820
821 vap->va_type = VNON;
822 vap->va_size = VNOVAL;
823 vap->va_bytes = VNOVAL;
824 vap->va_mode = VNOVAL;
825 vap->va_nlink = VNOVAL;
826 vap->va_uid = VNOVAL;
827 vap->va_gid = VNOVAL;
828 vap->va_fsid = VNOVAL;
829 vap->va_fileid = VNOVAL;
830 vap->va_blocksize = VNOVAL;
831 vap->va_rdev = VNOVAL;
832 vap->va_atime.tv_sec = VNOVAL;
833 vap->va_atime.tv_nsec = VNOVAL;
834 vap->va_mtime.tv_sec = VNOVAL;
835 vap->va_mtime.tv_nsec = VNOVAL;
836 vap->va_ctime.tv_sec = VNOVAL;
837 vap->va_ctime.tv_nsec = VNOVAL;
838 vap->va_birthtime.tv_sec = VNOVAL;
839 vap->va_birthtime.tv_nsec = VNOVAL;
840 vap->va_flags = VNOVAL;
841 vap->va_gen = VNOVAL;
842 vap->va_vaflags = 0;
843 }
844
845 /*
846 * This routine is called when we have too many vnodes. It attempts
847 * to free <count> vnodes and will potentially free vnodes that still
848 * have VM backing store (VM backing store is typically the cause
849 * of a vnode blowout so we want to do this). Therefore, this operation
850 * is not considered cheap.
851 *
852 * A number of conditions may prevent a vnode from being reclaimed.
853 * the buffer cache may have references on the vnode, a directory
854 * vnode may still have references due to the namei cache representing
855 * underlying files, or the vnode may be in active use. It is not
856 * desirable to reuse such vnodes. These conditions may cause the
857 * number of vnodes to reach some minimum value regardless of what
858 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
859 */
860 static int
861 vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger)
862 {
863 struct vnode *vp;
864 int count, done, target;
865
866 done = 0;
867 vn_start_write(NULL, &mp, V_WAIT);
868 MNT_ILOCK(mp);
869 count = mp->mnt_nvnodelistsize;
870 target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1);
871 target = target / 10 + 1;
872 while (count != 0 && done < target) {
873 vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
874 while (vp != NULL && vp->v_type == VMARKER)
875 vp = TAILQ_NEXT(vp, v_nmntvnodes);
876 if (vp == NULL)
877 break;
878 /*
879 * XXX LRU is completely broken for non-free vnodes. First
880 * by calling here in mountpoint order, then by moving
881 * unselected vnodes to the end here, and most grossly by
882 * removing the vlruvp() function that was supposed to
883 * maintain the order. (This function was born broken
884 * since syncer problems prevented it doing anything.) The
885 * order is closer to LRC (C = Created).
886 *
887 * LRU reclaiming of vnodes seems to have last worked in
888 * FreeBSD-3 where LRU wasn't mentioned under any spelling.
889 * Then there was no hold count, and inactive vnodes were
890 * simply put on the free list in LRU order. The separate
891 * lists also break LRU. We prefer to reclaim from the
892 * free list for technical reasons. This tends to thrash
893 * the free list to keep very unrecently used held vnodes.
894 * The problem is mitigated by keeping the free list large.
895 */
896 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
897 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
898 --count;
899 if (!VI_TRYLOCK(vp))
900 goto next_iter;
901 /*
902 * If it's been deconstructed already, it's still
903 * referenced, or it exceeds the trigger, skip it.
904 * Also skip free vnodes. We are trying to make space
905 * to expand the free list, not reduce it.
906 */
907 if (vp->v_usecount ||
908 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
909 ((vp->v_iflag & VI_FREE) != 0) ||
910 (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
911 vp->v_object->resident_page_count > trigger)) {
912 VI_UNLOCK(vp);
913 goto next_iter;
914 }
915 MNT_IUNLOCK(mp);
916 vholdl(vp);
917 if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
918 vdrop(vp);
919 goto next_iter_mntunlocked;
920 }
921 VI_LOCK(vp);
922 /*
923 * v_usecount may have been bumped after VOP_LOCK() dropped
924 * the vnode interlock and before it was locked again.
925 *
926 * It is not necessary to recheck VI_DOOMED because it can
927 * only be set by another thread that holds both the vnode
928 * lock and vnode interlock. If another thread has the
929 * vnode lock before we get to VOP_LOCK() and obtains the
930 * vnode interlock after VOP_LOCK() drops the vnode
931 * interlock, the other thread will be unable to drop the
932 * vnode lock before our VOP_LOCK() call fails.
933 */
934 if (vp->v_usecount ||
935 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
936 (vp->v_iflag & VI_FREE) != 0 ||
937 (vp->v_object != NULL &&
938 vp->v_object->resident_page_count > trigger)) {
939 VOP_UNLOCK(vp, LK_INTERLOCK);
940 vdrop(vp);
941 goto next_iter_mntunlocked;
942 }
943 KASSERT((vp->v_iflag & VI_DOOMED) == 0,
944 ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
945 counter_u64_add(recycles_count, 1);
946 vgonel(vp);
947 VOP_UNLOCK(vp, 0);
948 vdropl(vp);
949 done++;
950 next_iter_mntunlocked:
951 if (!should_yield())
952 goto relock_mnt;
953 goto yield;
954 next_iter:
955 if (!should_yield())
956 continue;
957 MNT_IUNLOCK(mp);
958 yield:
959 kern_yield(PRI_USER);
960 relock_mnt:
961 MNT_ILOCK(mp);
962 }
963 MNT_IUNLOCK(mp);
964 vn_finished_write(mp);
965 return done;
966 }
967
968 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */
969 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free,
970 0,
971 "limit on vnode free requests per call to the vnlru_free routine");
972
973 /*
974 * Attempt to reduce the free list by the requested amount.
975 */
976 static void
977 vnlru_free_locked(int count, struct vfsops *mnt_op)
978 {
979 struct vnode *vp;
980 struct mount *mp;
981 bool tried_batches;
982
983 tried_batches = false;
984 mtx_assert(&vnode_free_list_mtx, MA_OWNED);
985 if (count > max_vnlru_free)
986 count = max_vnlru_free;
987 for (; count > 0; count--) {
988 vp = TAILQ_FIRST(&vnode_free_list);
989 /*
990 * The list can be modified while the free_list_mtx
991 * has been dropped and vp could be NULL here.
992 */
993 if (vp == NULL) {
994 if (tried_batches)
995 break;
996 mtx_unlock(&vnode_free_list_mtx);
997 vnlru_return_batches(mnt_op);
998 tried_batches = true;
999 mtx_lock(&vnode_free_list_mtx);
1000 continue;
1001 }
1002
1003 VNASSERT(vp->v_op != NULL, vp,
1004 ("vnlru_free: vnode already reclaimed."));
1005 KASSERT((vp->v_iflag & VI_FREE) != 0,
1006 ("Removing vnode not on freelist"));
1007 KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
1008 ("Mangling active vnode"));
1009 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
1010
1011 /*
1012 * Don't recycle if our vnode is from different type
1013 * of mount point. Note that mp is type-safe, the
1014 * check does not reach unmapped address even if
1015 * vnode is reclaimed.
1016 * Don't recycle if we can't get the interlock without
1017 * blocking.
1018 */
1019 if ((mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1020 mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) {
1021 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist);
1022 continue;
1023 }
1024 VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0,
1025 vp, ("vp inconsistent on freelist"));
1026
1027 /*
1028 * The clear of VI_FREE prevents activation of the
1029 * vnode. There is no sense in putting the vnode on
1030 * the mount point active list, only to remove it
1031 * later during recycling. Inline the relevant part
1032 * of vholdl(), to avoid triggering assertions or
1033 * activating.
1034 */
1035 freevnodes--;
1036 vp->v_iflag &= ~VI_FREE;
1037 VNODE_REFCOUNT_FENCE_REL();
1038 refcount_acquire(&vp->v_holdcnt);
1039
1040 mtx_unlock(&vnode_free_list_mtx);
1041 VI_UNLOCK(vp);
1042 vtryrecycle(vp);
1043 /*
1044 * If the recycled succeeded this vdrop will actually free
1045 * the vnode. If not it will simply place it back on
1046 * the free list.
1047 */
1048 vdrop(vp);
1049 mtx_lock(&vnode_free_list_mtx);
1050 }
1051 }
1052
1053 void
1054 vnlru_free(int count, struct vfsops *mnt_op)
1055 {
1056
1057 mtx_lock(&vnode_free_list_mtx);
1058 vnlru_free_locked(count, mnt_op);
1059 mtx_unlock(&vnode_free_list_mtx);
1060 }
1061
1062
1063 /* XXX some names and initialization are bad for limits and watermarks. */
1064 static int
1065 vspace(void)
1066 {
1067 int space;
1068
1069 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1070 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1071 vlowat = vhiwat / 2;
1072 if (numvnodes > desiredvnodes)
1073 return (0);
1074 space = desiredvnodes - numvnodes;
1075 if (freevnodes > wantfreevnodes)
1076 space += freevnodes - wantfreevnodes;
1077 return (space);
1078 }
1079
1080 static void
1081 vnlru_return_batch_locked(struct mount *mp)
1082 {
1083 struct vnode *vp;
1084
1085 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
1086
1087 if (mp->mnt_tmpfreevnodelistsize == 0)
1088 return;
1089
1090 TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) {
1091 VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp,
1092 ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist"));
1093 vp->v_mflag &= ~VMP_TMPMNTFREELIST;
1094 }
1095 mtx_lock(&vnode_free_list_mtx);
1096 TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist);
1097 freevnodes += mp->mnt_tmpfreevnodelistsize;
1098 mtx_unlock(&vnode_free_list_mtx);
1099 mp->mnt_tmpfreevnodelistsize = 0;
1100 }
1101
1102 static void
1103 vnlru_return_batch(struct mount *mp)
1104 {
1105
1106 mtx_lock(&mp->mnt_listmtx);
1107 vnlru_return_batch_locked(mp);
1108 mtx_unlock(&mp->mnt_listmtx);
1109 }
1110
1111 static void
1112 vnlru_return_batches(struct vfsops *mnt_op)
1113 {
1114 struct mount *mp, *nmp;
1115 bool need_unbusy;
1116
1117 mtx_lock(&mountlist_mtx);
1118 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1119 need_unbusy = false;
1120 if (mnt_op != NULL && mp->mnt_op != mnt_op)
1121 goto next;
1122 if (mp->mnt_tmpfreevnodelistsize == 0)
1123 goto next;
1124 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) {
1125 vnlru_return_batch(mp);
1126 need_unbusy = true;
1127 mtx_lock(&mountlist_mtx);
1128 }
1129 next:
1130 nmp = TAILQ_NEXT(mp, mnt_list);
1131 if (need_unbusy)
1132 vfs_unbusy(mp);
1133 }
1134 mtx_unlock(&mountlist_mtx);
1135 }
1136
1137 /*
1138 * Attempt to recycle vnodes in a context that is always safe to block.
1139 * Calling vlrurecycle() from the bowels of filesystem code has some
1140 * interesting deadlock problems.
1141 */
1142 static struct proc *vnlruproc;
1143 static int vnlruproc_sig;
1144
1145 static void
1146 vnlru_proc(void)
1147 {
1148 struct mount *mp, *nmp;
1149 unsigned long onumvnodes;
1150 int done, force, reclaim_nc_src, trigger, usevnodes;
1151
1152 EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1153 SHUTDOWN_PRI_FIRST);
1154
1155 force = 0;
1156 for (;;) {
1157 kproc_suspend_check(vnlruproc);
1158 mtx_lock(&vnode_free_list_mtx);
1159 /*
1160 * If numvnodes is too large (due to desiredvnodes being
1161 * adjusted using its sysctl, or emergency growth), first
1162 * try to reduce it by discarding from the free list.
1163 */
1164 if (numvnodes > desiredvnodes)
1165 vnlru_free_locked(numvnodes - desiredvnodes, NULL);
1166 /*
1167 * Sleep if the vnode cache is in a good state. This is
1168 * when it is not over-full and has space for about a 4%
1169 * or 9% expansion (by growing its size or inexcessively
1170 * reducing its free list). Otherwise, try to reclaim
1171 * space for a 10% expansion.
1172 */
1173 if (vstir && force == 0) {
1174 force = 1;
1175 vstir = 0;
1176 }
1177 if (vspace() >= vlowat && force == 0) {
1178 vnlruproc_sig = 0;
1179 wakeup(&vnlruproc_sig);
1180 msleep(vnlruproc, &vnode_free_list_mtx,
1181 PVFS|PDROP, "vlruwt", hz);
1182 continue;
1183 }
1184 mtx_unlock(&vnode_free_list_mtx);
1185 done = 0;
1186 onumvnodes = numvnodes;
1187 /*
1188 * Calculate parameters for recycling. These are the same
1189 * throughout the loop to give some semblance of fairness.
1190 * The trigger point is to avoid recycling vnodes with lots
1191 * of resident pages. We aren't trying to free memory; we
1192 * are trying to recycle or at least free vnodes.
1193 */
1194 if (numvnodes <= desiredvnodes)
1195 usevnodes = numvnodes - freevnodes;
1196 else
1197 usevnodes = numvnodes;
1198 if (usevnodes <= 0)
1199 usevnodes = 1;
1200 /*
1201 * The trigger value is is chosen to give a conservatively
1202 * large value to ensure that it alone doesn't prevent
1203 * making progress. The value can easily be so large that
1204 * it is effectively infinite in some congested and
1205 * misconfigured cases, and this is necessary. Normally
1206 * it is about 8 to 100 (pages), which is quite large.
1207 */
1208 trigger = vm_cnt.v_page_count * 2 / usevnodes;
1209 if (force < 2)
1210 trigger = vsmalltrigger;
1211 reclaim_nc_src = force >= 3;
1212 mtx_lock(&mountlist_mtx);
1213 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1214 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
1215 nmp = TAILQ_NEXT(mp, mnt_list);
1216 continue;
1217 }
1218 done += vlrureclaim(mp, reclaim_nc_src, trigger);
1219 mtx_lock(&mountlist_mtx);
1220 nmp = TAILQ_NEXT(mp, mnt_list);
1221 vfs_unbusy(mp);
1222 }
1223 mtx_unlock(&mountlist_mtx);
1224 if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes)
1225 uma_reclaim();
1226 if (done == 0) {
1227 if (force == 0 || force == 1) {
1228 force = 2;
1229 continue;
1230 }
1231 if (force == 2) {
1232 force = 3;
1233 continue;
1234 }
1235 force = 0;
1236 vnlru_nowhere++;
1237 tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1238 } else
1239 kern_yield(PRI_USER);
1240 /*
1241 * After becoming active to expand above low water, keep
1242 * active until above high water.
1243 */
1244 force = vspace() < vhiwat;
1245 }
1246 }
1247
1248 static struct kproc_desc vnlru_kp = {
1249 "vnlru",
1250 vnlru_proc,
1251 &vnlruproc
1252 };
1253 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1254 &vnlru_kp);
1255
1256 /*
1257 * Routines having to do with the management of the vnode table.
1258 */
1259
1260 /*
1261 * Try to recycle a freed vnode. We abort if anyone picks up a reference
1262 * before we actually vgone(). This function must be called with the vnode
1263 * held to prevent the vnode from being returned to the free list midway
1264 * through vgone().
1265 */
1266 static int
1267 vtryrecycle(struct vnode *vp)
1268 {
1269 struct mount *vnmp;
1270
1271 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1272 VNASSERT(vp->v_holdcnt, vp,
1273 ("vtryrecycle: Recycling vp %p without a reference.", vp));
1274 /*
1275 * This vnode may found and locked via some other list, if so we
1276 * can't recycle it yet.
1277 */
1278 if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1279 CTR2(KTR_VFS,
1280 "%s: impossible to recycle, vp %p lock is already held",
1281 __func__, vp);
1282 return (EWOULDBLOCK);
1283 }
1284 /*
1285 * Don't recycle if its filesystem is being suspended.
1286 */
1287 if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1288 VOP_UNLOCK(vp, 0);
1289 CTR2(KTR_VFS,
1290 "%s: impossible to recycle, cannot start the write for %p",
1291 __func__, vp);
1292 return (EBUSY);
1293 }
1294 /*
1295 * If we got this far, we need to acquire the interlock and see if
1296 * anyone picked up this vnode from another list. If not, we will
1297 * mark it with DOOMED via vgonel() so that anyone who does find it
1298 * will skip over it.
1299 */
1300 VI_LOCK(vp);
1301 if (vp->v_usecount) {
1302 VOP_UNLOCK(vp, LK_INTERLOCK);
1303 vn_finished_write(vnmp);
1304 CTR2(KTR_VFS,
1305 "%s: impossible to recycle, %p is already referenced",
1306 __func__, vp);
1307 return (EBUSY);
1308 }
1309 if ((vp->v_iflag & VI_DOOMED) == 0) {
1310 counter_u64_add(recycles_count, 1);
1311 vgonel(vp);
1312 }
1313 VOP_UNLOCK(vp, LK_INTERLOCK);
1314 vn_finished_write(vnmp);
1315 return (0);
1316 }
1317
1318 static void
1319 vcheckspace(void)
1320 {
1321
1322 if (vspace() < vlowat && vnlruproc_sig == 0) {
1323 vnlruproc_sig = 1;
1324 wakeup(vnlruproc);
1325 }
1326 }
1327
1328 /*
1329 * Wait if necessary for space for a new vnode.
1330 */
1331 static int
1332 getnewvnode_wait(int suspended)
1333 {
1334
1335 mtx_assert(&vnode_free_list_mtx, MA_OWNED);
1336 if (numvnodes >= desiredvnodes) {
1337 if (suspended) {
1338 /*
1339 * The file system is being suspended. We cannot
1340 * risk a deadlock here, so allow allocation of
1341 * another vnode even if this would give too many.
1342 */
1343 return (0);
1344 }
1345 if (vnlruproc_sig == 0) {
1346 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
1347 wakeup(vnlruproc);
1348 }
1349 msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
1350 "vlruwk", hz);
1351 }
1352 /* Post-adjust like the pre-adjust in getnewvnode(). */
1353 if (numvnodes + 1 > desiredvnodes && freevnodes > 1)
1354 vnlru_free_locked(1, NULL);
1355 return (numvnodes >= desiredvnodes ? ENFILE : 0);
1356 }
1357
1358 /*
1359 * This hack is fragile, and probably not needed any more now that the
1360 * watermark handling works.
1361 */
1362 void
1363 getnewvnode_reserve(u_int count)
1364 {
1365 struct thread *td;
1366
1367 /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */
1368 /* XXX no longer so quick, but this part is not racy. */
1369 mtx_lock(&vnode_free_list_mtx);
1370 if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes)
1371 vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes,
1372 freevnodes - wantfreevnodes), NULL);
1373 mtx_unlock(&vnode_free_list_mtx);
1374
1375 td = curthread;
1376 /* First try to be quick and racy. */
1377 if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) {
1378 td->td_vp_reserv += count;
1379 vcheckspace(); /* XXX no longer so quick, but more racy */
1380 return;
1381 } else
1382 atomic_subtract_long(&numvnodes, count);
1383
1384 mtx_lock(&vnode_free_list_mtx);
1385 while (count > 0) {
1386 if (getnewvnode_wait(0) == 0) {
1387 count--;
1388 td->td_vp_reserv++;
1389 atomic_add_long(&numvnodes, 1);
1390 }
1391 }
1392 vcheckspace();
1393 mtx_unlock(&vnode_free_list_mtx);
1394 }
1395
1396 /*
1397 * This hack is fragile, especially if desiredvnodes or wantvnodes are
1398 * misconfgured or changed significantly. Reducing desiredvnodes below
1399 * the reserved amount should cause bizarre behaviour like reducing it
1400 * below the number of active vnodes -- the system will try to reduce
1401 * numvnodes to match, but should fail, so the subtraction below should
1402 * not overflow.
1403 */
1404 void
1405 getnewvnode_drop_reserve(void)
1406 {
1407 struct thread *td;
1408
1409 td = curthread;
1410 atomic_subtract_long(&numvnodes, td->td_vp_reserv);
1411 td->td_vp_reserv = 0;
1412 }
1413
1414 /*
1415 * Return the next vnode from the free list.
1416 */
1417 int
1418 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
1419 struct vnode **vpp)
1420 {
1421 struct vnode *vp;
1422 struct thread *td;
1423 struct lock_object *lo;
1424 static int cyclecount;
1425 int error __unused;
1426
1427 CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
1428 vp = NULL;
1429 td = curthread;
1430 if (td->td_vp_reserv > 0) {
1431 td->td_vp_reserv -= 1;
1432 goto alloc;
1433 }
1434 mtx_lock(&vnode_free_list_mtx);
1435 if (numvnodes < desiredvnodes)
1436 cyclecount = 0;
1437 else if (cyclecount++ >= freevnodes) {
1438 cyclecount = 0;
1439 vstir = 1;
1440 }
1441 /*
1442 * Grow the vnode cache if it will not be above its target max
1443 * after growing. Otherwise, if the free list is nonempty, try
1444 * to reclaim 1 item from it before growing the cache (possibly
1445 * above its target max if the reclamation failed or is delayed).
1446 * Otherwise, wait for some space. In all cases, schedule
1447 * vnlru_proc() if we are getting short of space. The watermarks
1448 * should be chosen so that we never wait or even reclaim from
1449 * the free list to below its target minimum.
1450 */
1451 if (numvnodes + 1 <= desiredvnodes)
1452 ;
1453 else if (freevnodes > 0)
1454 vnlru_free_locked(1, NULL);
1455 else {
1456 error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
1457 MNTK_SUSPEND));
1458 #if 0 /* XXX Not all VFS_VGET/ffs_vget callers check returns. */
1459 if (error != 0) {
1460 mtx_unlock(&vnode_free_list_mtx);
1461 return (error);
1462 }
1463 #endif
1464 }
1465 vcheckspace();
1466 atomic_add_long(&numvnodes, 1);
1467 mtx_unlock(&vnode_free_list_mtx);
1468 alloc:
1469 counter_u64_add(vnodes_created, 1);
1470 vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
1471 /*
1472 * Locks are given the generic name "vnode" when created.
1473 * Follow the historic practice of using the filesystem
1474 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
1475 *
1476 * Locks live in a witness group keyed on their name. Thus,
1477 * when a lock is renamed, it must also move from the witness
1478 * group of its old name to the witness group of its new name.
1479 *
1480 * The change only needs to be made when the vnode moves
1481 * from one filesystem type to another. We ensure that each
1482 * filesystem use a single static name pointer for its tag so
1483 * that we can compare pointers rather than doing a strcmp().
1484 */
1485 lo = &vp->v_vnlock->lock_object;
1486 if (lo->lo_name != tag) {
1487 lo->lo_name = tag;
1488 WITNESS_DESTROY(lo);
1489 WITNESS_INIT(lo, tag);
1490 }
1491 /*
1492 * By default, don't allow shared locks unless filesystems opt-in.
1493 */
1494 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
1495 /*
1496 * Finalize various vnode identity bits.
1497 */
1498 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
1499 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
1500 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
1501 vp->v_type = VNON;
1502 vp->v_tag = tag;
1503 vp->v_op = vops;
1504 v_init_counters(vp);
1505 vp->v_bufobj.bo_ops = &buf_ops_bio;
1506 #ifdef DIAGNOSTIC
1507 if (mp == NULL && vops != &dead_vnodeops)
1508 printf("NULL mp in getnewvnode(9), tag %s\n", tag);
1509 #endif
1510 #ifdef MAC
1511 mac_vnode_init(vp);
1512 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1513 mac_vnode_associate_singlelabel(mp, vp);
1514 #endif
1515 if (mp != NULL) {
1516 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
1517 if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1518 vp->v_vflag |= VV_NOKNOTE;
1519 }
1520
1521 /*
1522 * For the filesystems which do not use vfs_hash_insert(),
1523 * still initialize v_hash to have vfs_hash_index() useful.
1524 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
1525 * its own hashing.
1526 */
1527 vp->v_hash = (uintptr_t)vp >> vnsz2log;
1528
1529 *vpp = vp;
1530 return (0);
1531 }
1532
1533 /*
1534 * Delete from old mount point vnode list, if on one.
1535 */
1536 static void
1537 delmntque(struct vnode *vp)
1538 {
1539 struct mount *mp;
1540 int active;
1541
1542 mp = vp->v_mount;
1543 if (mp == NULL)
1544 return;
1545 MNT_ILOCK(mp);
1546 VI_LOCK(vp);
1547 KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize,
1548 ("Active vnode list size %d > Vnode list size %d",
1549 mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize));
1550 active = vp->v_iflag & VI_ACTIVE;
1551 vp->v_iflag &= ~VI_ACTIVE;
1552 if (active) {
1553 mtx_lock(&mp->mnt_listmtx);
1554 TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist);
1555 mp->mnt_activevnodelistsize--;
1556 mtx_unlock(&mp->mnt_listmtx);
1557 }
1558 vp->v_mount = NULL;
1559 VI_UNLOCK(vp);
1560 VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1561 ("bad mount point vnode list size"));
1562 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1563 mp->mnt_nvnodelistsize--;
1564 MNT_REL(mp);
1565 MNT_IUNLOCK(mp);
1566 }
1567
1568 static void
1569 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1570 {
1571
1572 vp->v_data = NULL;
1573 vp->v_op = &dead_vnodeops;
1574 vgone(vp);
1575 vput(vp);
1576 }
1577
1578 /*
1579 * Insert into list of vnodes for the new mount point, if available.
1580 */
1581 int
1582 insmntque1(struct vnode *vp, struct mount *mp,
1583 void (*dtr)(struct vnode *, void *), void *dtr_arg)
1584 {
1585
1586 KASSERT(vp->v_mount == NULL,
1587 ("insmntque: vnode already on per mount vnode list"));
1588 VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1589 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
1590
1591 /*
1592 * We acquire the vnode interlock early to ensure that the
1593 * vnode cannot be recycled by another process releasing a
1594 * holdcnt on it before we get it on both the vnode list
1595 * and the active vnode list. The mount mutex protects only
1596 * manipulation of the vnode list and the vnode freelist
1597 * mutex protects only manipulation of the active vnode list.
1598 * Hence the need to hold the vnode interlock throughout.
1599 */
1600 MNT_ILOCK(mp);
1601 VI_LOCK(vp);
1602 if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1603 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1604 mp->mnt_nvnodelistsize == 0)) &&
1605 (vp->v_vflag & VV_FORCEINSMQ) == 0) {
1606 VI_UNLOCK(vp);
1607 MNT_IUNLOCK(mp);
1608 if (dtr != NULL)
1609 dtr(vp, dtr_arg);
1610 return (EBUSY);
1611 }
1612 vp->v_mount = mp;
1613 MNT_REF(mp);
1614 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1615 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1616 ("neg mount point vnode list size"));
1617 mp->mnt_nvnodelistsize++;
1618 KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
1619 ("Activating already active vnode"));
1620 vp->v_iflag |= VI_ACTIVE;
1621 mtx_lock(&mp->mnt_listmtx);
1622 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
1623 mp->mnt_activevnodelistsize++;
1624 mtx_unlock(&mp->mnt_listmtx);
1625 VI_UNLOCK(vp);
1626 MNT_IUNLOCK(mp);
1627 return (0);
1628 }
1629
1630 int
1631 insmntque(struct vnode *vp, struct mount *mp)
1632 {
1633
1634 return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1635 }
1636
1637 /*
1638 * Flush out and invalidate all buffers associated with a bufobj
1639 * Called with the underlying object locked.
1640 */
1641 int
1642 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1643 {
1644 int error;
1645
1646 BO_LOCK(bo);
1647 if (flags & V_SAVE) {
1648 error = bufobj_wwait(bo, slpflag, slptimeo);
1649 if (error) {
1650 BO_UNLOCK(bo);
1651 return (error);
1652 }
1653 if (bo->bo_dirty.bv_cnt > 0) {
1654 BO_UNLOCK(bo);
1655 if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1656 return (error);
1657 /*
1658 * XXX We could save a lock/unlock if this was only
1659 * enabled under INVARIANTS
1660 */
1661 BO_LOCK(bo);
1662 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1663 panic("vinvalbuf: dirty bufs");
1664 }
1665 }
1666 /*
1667 * If you alter this loop please notice that interlock is dropped and
1668 * reacquired in flushbuflist. Special care is needed to ensure that
1669 * no race conditions occur from this.
1670 */
1671 do {
1672 error = flushbuflist(&bo->bo_clean,
1673 flags, bo, slpflag, slptimeo);
1674 if (error == 0 && !(flags & V_CLEANONLY))
1675 error = flushbuflist(&bo->bo_dirty,
1676 flags, bo, slpflag, slptimeo);
1677 if (error != 0 && error != EAGAIN) {
1678 BO_UNLOCK(bo);
1679 return (error);
1680 }
1681 } while (error != 0);
1682
1683 /*
1684 * Wait for I/O to complete. XXX needs cleaning up. The vnode can
1685 * have write I/O in-progress but if there is a VM object then the
1686 * VM object can also have read-I/O in-progress.
1687 */
1688 do {
1689 bufobj_wwait(bo, 0, 0);
1690 if ((flags & V_VMIO) == 0) {
1691 BO_UNLOCK(bo);
1692 if (bo->bo_object != NULL) {
1693 VM_OBJECT_WLOCK(bo->bo_object);
1694 vm_object_pip_wait(bo->bo_object, "bovlbx");
1695 VM_OBJECT_WUNLOCK(bo->bo_object);
1696 }
1697 BO_LOCK(bo);
1698 }
1699 } while (bo->bo_numoutput > 0);
1700 BO_UNLOCK(bo);
1701
1702 /*
1703 * Destroy the copy in the VM cache, too.
1704 */
1705 if (bo->bo_object != NULL &&
1706 (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) {
1707 VM_OBJECT_WLOCK(bo->bo_object);
1708 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
1709 OBJPR_CLEANONLY : 0);
1710 VM_OBJECT_WUNLOCK(bo->bo_object);
1711 }
1712
1713 #ifdef INVARIANTS
1714 BO_LOCK(bo);
1715 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO |
1716 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 ||
1717 bo->bo_clean.bv_cnt > 0))
1718 panic("vinvalbuf: flush failed");
1719 if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 &&
1720 bo->bo_dirty.bv_cnt > 0)
1721 panic("vinvalbuf: flush dirty failed");
1722 BO_UNLOCK(bo);
1723 #endif
1724 return (0);
1725 }
1726
1727 /*
1728 * Flush out and invalidate all buffers associated with a vnode.
1729 * Called with the underlying object locked.
1730 */
1731 int
1732 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1733 {
1734
1735 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1736 ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1737 if (vp->v_object != NULL && vp->v_object->handle != vp)
1738 return (0);
1739 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1740 }
1741
1742 /*
1743 * Flush out buffers on the specified list.
1744 *
1745 */
1746 static int
1747 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1748 int slptimeo)
1749 {
1750 struct buf *bp, *nbp;
1751 int retval, error;
1752 daddr_t lblkno;
1753 b_xflags_t xflags;
1754
1755 ASSERT_BO_WLOCKED(bo);
1756
1757 retval = 0;
1758 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1759 if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1760 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1761 continue;
1762 }
1763 if (nbp != NULL) {
1764 lblkno = nbp->b_lblkno;
1765 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
1766 }
1767 retval = EAGAIN;
1768 error = BUF_TIMELOCK(bp,
1769 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
1770 "flushbuf", slpflag, slptimeo);
1771 if (error) {
1772 BO_LOCK(bo);
1773 return (error != ENOLCK ? error : EAGAIN);
1774 }
1775 KASSERT(bp->b_bufobj == bo,
1776 ("bp %p wrong b_bufobj %p should be %p",
1777 bp, bp->b_bufobj, bo));
1778 /*
1779 * XXX Since there are no node locks for NFS, I
1780 * believe there is a slight chance that a delayed
1781 * write will occur while sleeping just above, so
1782 * check for it.
1783 */
1784 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1785 (flags & V_SAVE)) {
1786 bremfree(bp);
1787 bp->b_flags |= B_ASYNC;
1788 bwrite(bp);
1789 BO_LOCK(bo);
1790 return (EAGAIN); /* XXX: why not loop ? */
1791 }
1792 bremfree(bp);
1793 bp->b_flags |= (B_INVAL | B_RELBUF);
1794 bp->b_flags &= ~B_ASYNC;
1795 brelse(bp);
1796 BO_LOCK(bo);
1797 if (nbp == NULL)
1798 break;
1799 nbp = gbincore(bo, lblkno);
1800 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1801 != xflags)
1802 break; /* nbp invalid */
1803 }
1804 return (retval);
1805 }
1806
1807 int
1808 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
1809 {
1810 struct buf *bp;
1811 int error;
1812 daddr_t lblkno;
1813
1814 ASSERT_BO_LOCKED(bo);
1815
1816 for (lblkno = startn;;) {
1817 again:
1818 bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno);
1819 if (bp == NULL || bp->b_lblkno >= endn ||
1820 bp->b_lblkno < startn)
1821 break;
1822 error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
1823 LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
1824 if (error != 0) {
1825 BO_RLOCK(bo);
1826 if (error == ENOLCK)
1827 goto again;
1828 return (error);
1829 }
1830 KASSERT(bp->b_bufobj == bo,
1831 ("bp %p wrong b_bufobj %p should be %p",
1832 bp, bp->b_bufobj, bo));
1833 lblkno = bp->b_lblkno + 1;
1834 if ((bp->b_flags & B_MANAGED) == 0)
1835 bremfree(bp);
1836 bp->b_flags |= B_RELBUF;
1837 /*
1838 * In the VMIO case, use the B_NOREUSE flag to hint that the
1839 * pages backing each buffer in the range are unlikely to be
1840 * reused. Dirty buffers will have the hint applied once
1841 * they've been written.
1842 */
1843 if (bp->b_vp->v_object != NULL)
1844 bp->b_flags |= B_NOREUSE;
1845 brelse(bp);
1846 BO_RLOCK(bo);
1847 }
1848 return (0);
1849 }
1850
1851 /*
1852 * Truncate a file's buffer and pages to a specified length. This
1853 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1854 * sync activity.
1855 */
1856 int
1857 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize)
1858 {
1859 struct buf *bp, *nbp;
1860 int anyfreed;
1861 int trunclbn;
1862 struct bufobj *bo;
1863
1864 CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1865 vp, cred, blksize, (uintmax_t)length);
1866
1867 /*
1868 * Round up to the *next* lbn.
1869 */
1870 trunclbn = howmany(length, blksize);
1871
1872 ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1873 restart:
1874 bo = &vp->v_bufobj;
1875 BO_LOCK(bo);
1876 anyfreed = 1;
1877 for (;anyfreed;) {
1878 anyfreed = 0;
1879 TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1880 if (bp->b_lblkno < trunclbn)
1881 continue;
1882 if (BUF_LOCK(bp,
1883 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1884 BO_LOCKPTR(bo)) == ENOLCK)
1885 goto restart;
1886
1887 bremfree(bp);
1888 bp->b_flags |= (B_INVAL | B_RELBUF);
1889 bp->b_flags &= ~B_ASYNC;
1890 brelse(bp);
1891 anyfreed = 1;
1892
1893 BO_LOCK(bo);
1894 if (nbp != NULL &&
1895 (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1896 (nbp->b_vp != vp) ||
1897 (nbp->b_flags & B_DELWRI))) {
1898 BO_UNLOCK(bo);
1899 goto restart;
1900 }
1901 }
1902
1903 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1904 if (bp->b_lblkno < trunclbn)
1905 continue;
1906 if (BUF_LOCK(bp,
1907 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1908 BO_LOCKPTR(bo)) == ENOLCK)
1909 goto restart;
1910 bremfree(bp);
1911 bp->b_flags |= (B_INVAL | B_RELBUF);
1912 bp->b_flags &= ~B_ASYNC;
1913 brelse(bp);
1914 anyfreed = 1;
1915
1916 BO_LOCK(bo);
1917 if (nbp != NULL &&
1918 (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1919 (nbp->b_vp != vp) ||
1920 (nbp->b_flags & B_DELWRI) == 0)) {
1921 BO_UNLOCK(bo);
1922 goto restart;
1923 }
1924 }
1925 }
1926
1927 if (length > 0) {
1928 restartsync:
1929 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1930 if (bp->b_lblkno > 0)
1931 continue;
1932 /*
1933 * Since we hold the vnode lock this should only
1934 * fail if we're racing with the buf daemon.
1935 */
1936 if (BUF_LOCK(bp,
1937 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1938 BO_LOCKPTR(bo)) == ENOLCK) {
1939 goto restart;
1940 }
1941 VNASSERT((bp->b_flags & B_DELWRI), vp,
1942 ("buf(%p) on dirty queue without DELWRI", bp));
1943
1944 bremfree(bp);
1945 bawrite(bp);
1946 BO_LOCK(bo);
1947 goto restartsync;
1948 }
1949 }
1950
1951 bufobj_wwait(bo, 0, 0);
1952 BO_UNLOCK(bo);
1953 vnode_pager_setsize(vp, length);
1954
1955 return (0);
1956 }
1957
1958 static void
1959 buf_vlist_remove(struct buf *bp)
1960 {
1961 struct bufv *bv;
1962
1963 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1964 ASSERT_BO_WLOCKED(bp->b_bufobj);
1965 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1966 (BX_VNDIRTY|BX_VNCLEAN),
1967 ("buf_vlist_remove: Buf %p is on two lists", bp));
1968 if (bp->b_xflags & BX_VNDIRTY)
1969 bv = &bp->b_bufobj->bo_dirty;
1970 else
1971 bv = &bp->b_bufobj->bo_clean;
1972 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
1973 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1974 bv->bv_cnt--;
1975 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1976 }
1977
1978 /*
1979 * Add the buffer to the sorted clean or dirty block list.
1980 *
1981 * NOTE: xflags is passed as a constant, optimizing this inline function!
1982 */
1983 static void
1984 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1985 {
1986 struct bufv *bv;
1987 struct buf *n;
1988 int error;
1989
1990 ASSERT_BO_WLOCKED(bo);
1991 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
1992 ("dead bo %p", bo));
1993 KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1994 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1995 bp->b_xflags |= xflags;
1996 if (xflags & BX_VNDIRTY)
1997 bv = &bo->bo_dirty;
1998 else
1999 bv = &bo->bo_clean;
2000
2001 /*
2002 * Keep the list ordered. Optimize empty list insertion. Assume
2003 * we tend to grow at the tail so lookup_le should usually be cheaper
2004 * than _ge.
2005 */
2006 if (bv->bv_cnt == 0 ||
2007 bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno)
2008 TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
2009 else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL)
2010 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
2011 else
2012 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
2013 error = BUF_PCTRIE_INSERT(&bv->bv_root, bp);
2014 if (error)
2015 panic("buf_vlist_add: Preallocated nodes insufficient.");
2016 bv->bv_cnt++;
2017 }
2018
2019 /*
2020 * Look up a buffer using the buffer tries.
2021 */
2022 struct buf *
2023 gbincore(struct bufobj *bo, daddr_t lblkno)
2024 {
2025 struct buf *bp;
2026
2027 ASSERT_BO_LOCKED(bo);
2028 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
2029 if (bp != NULL)
2030 return (bp);
2031 return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno);
2032 }
2033
2034 /*
2035 * Associate a buffer with a vnode.
2036 */
2037 void
2038 bgetvp(struct vnode *vp, struct buf *bp)
2039 {
2040 struct bufobj *bo;
2041
2042 bo = &vp->v_bufobj;
2043 ASSERT_BO_WLOCKED(bo);
2044 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
2045
2046 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2047 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2048 ("bgetvp: bp already attached! %p", bp));
2049
2050 vhold(vp);
2051 bp->b_vp = vp;
2052 bp->b_bufobj = bo;
2053 /*
2054 * Insert onto list for new vnode.
2055 */
2056 buf_vlist_add(bp, bo, BX_VNCLEAN);
2057 }
2058
2059 /*
2060 * Disassociate a buffer from a vnode.
2061 */
2062 void
2063 brelvp(struct buf *bp)
2064 {
2065 struct bufobj *bo;
2066 struct vnode *vp;
2067
2068 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2069 KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
2070
2071 /*
2072 * Delete from old vnode list, if on one.
2073 */
2074 vp = bp->b_vp; /* XXX */
2075 bo = bp->b_bufobj;
2076 BO_LOCK(bo);
2077 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2078 buf_vlist_remove(bp);
2079 else
2080 panic("brelvp: Buffer %p not on queue.", bp);
2081 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2082 bo->bo_flag &= ~BO_ONWORKLST;
2083 mtx_lock(&sync_mtx);
2084 LIST_REMOVE(bo, bo_synclist);
2085 syncer_worklist_len--;
2086 mtx_unlock(&sync_mtx);
2087 }
2088 bp->b_vp = NULL;
2089 bp->b_bufobj = NULL;
2090 BO_UNLOCK(bo);
2091 vdrop(vp);
2092 }
2093
2094 /*
2095 * Add an item to the syncer work queue.
2096 */
2097 static void
2098 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
2099 {
2100 int slot;
2101
2102 ASSERT_BO_WLOCKED(bo);
2103
2104 mtx_lock(&sync_mtx);
2105 if (bo->bo_flag & BO_ONWORKLST)
2106 LIST_REMOVE(bo, bo_synclist);
2107 else {
2108 bo->bo_flag |= BO_ONWORKLST;
2109 syncer_worklist_len++;
2110 }
2111
2112 if (delay > syncer_maxdelay - 2)
2113 delay = syncer_maxdelay - 2;
2114 slot = (syncer_delayno + delay) & syncer_mask;
2115
2116 LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
2117 mtx_unlock(&sync_mtx);
2118 }
2119
2120 static int
2121 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2122 {
2123 int error, len;
2124
2125 mtx_lock(&sync_mtx);
2126 len = syncer_worklist_len - sync_vnode_count;
2127 mtx_unlock(&sync_mtx);
2128 error = SYSCTL_OUT(req, &len, sizeof(len));
2129 return (error);
2130 }
2131
2132 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
2133 sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2134
2135 static struct proc *updateproc;
2136 static void sched_sync(void);
2137 static struct kproc_desc up_kp = {
2138 "syncer",
2139 sched_sync,
2140 &updateproc
2141 };
2142 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2143
2144 static int
2145 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2146 {
2147 struct vnode *vp;
2148 struct mount *mp;
2149
2150 *bo = LIST_FIRST(slp);
2151 if (*bo == NULL)
2152 return (0);
2153 vp = bo2vnode(*bo);
2154 if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2155 return (1);
2156 /*
2157 * We use vhold in case the vnode does not
2158 * successfully sync. vhold prevents the vnode from
2159 * going away when we unlock the sync_mtx so that
2160 * we can acquire the vnode interlock.
2161 */
2162 vholdl(vp);
2163 mtx_unlock(&sync_mtx);
2164 VI_UNLOCK(vp);
2165 if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2166 vdrop(vp);
2167 mtx_lock(&sync_mtx);
2168 return (*bo == LIST_FIRST(slp));
2169 }
2170 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2171 (void) VOP_FSYNC(vp, MNT_LAZY, td);
2172 VOP_UNLOCK(vp, 0);
2173 vn_finished_write(mp);
2174 BO_LOCK(*bo);
2175 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2176 /*
2177 * Put us back on the worklist. The worklist
2178 * routine will remove us from our current
2179 * position and then add us back in at a later
2180 * position.
2181 */
2182 vn_syncer_add_to_worklist(*bo, syncdelay);
2183 }
2184 BO_UNLOCK(*bo);
2185 vdrop(vp);
2186 mtx_lock(&sync_mtx);
2187 return (0);
2188 }
2189
2190 static int first_printf = 1;
2191
2192 /*
2193 * System filesystem synchronizer daemon.
2194 */
2195 static void
2196 sched_sync(void)
2197 {
2198 struct synclist *next, *slp;
2199 struct bufobj *bo;
2200 long starttime;
2201 struct thread *td = curthread;
2202 int last_work_seen;
2203 int net_worklist_len;
2204 int syncer_final_iter;
2205 int error;
2206
2207 last_work_seen = 0;
2208 syncer_final_iter = 0;
2209 syncer_state = SYNCER_RUNNING;
2210 starttime = time_uptime;
2211 td->td_pflags |= TDP_NORUNNINGBUF;
2212
2213 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
2214 SHUTDOWN_PRI_LAST);
2215
2216 mtx_lock(&sync_mtx);
2217 for (;;) {
2218 if (syncer_state == SYNCER_FINAL_DELAY &&
2219 syncer_final_iter == 0) {
2220 mtx_unlock(&sync_mtx);
2221 kproc_suspend_check(td->td_proc);
2222 mtx_lock(&sync_mtx);
2223 }
2224 net_worklist_len = syncer_worklist_len - sync_vnode_count;
2225 if (syncer_state != SYNCER_RUNNING &&
2226 starttime != time_uptime) {
2227 if (first_printf) {
2228 printf("\nSyncing disks, vnodes remaining... ");
2229 first_printf = 0;
2230 }
2231 printf("%d ", net_worklist_len);
2232 }
2233 starttime = time_uptime;
2234
2235 /*
2236 * Push files whose dirty time has expired. Be careful
2237 * of interrupt race on slp queue.
2238 *
2239 * Skip over empty worklist slots when shutting down.
2240 */
2241 do {
2242 slp = &syncer_workitem_pending[syncer_delayno];
2243 syncer_delayno += 1;
2244 if (syncer_delayno == syncer_maxdelay)
2245 syncer_delayno = 0;
2246 next = &syncer_workitem_pending[syncer_delayno];
2247 /*
2248 * If the worklist has wrapped since the
2249 * it was emptied of all but syncer vnodes,
2250 * switch to the FINAL_DELAY state and run
2251 * for one more second.
2252 */
2253 if (syncer_state == SYNCER_SHUTTING_DOWN &&
2254 net_worklist_len == 0 &&
2255 last_work_seen == syncer_delayno) {
2256 syncer_state = SYNCER_FINAL_DELAY;
2257 syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
2258 }
2259 } while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
2260 syncer_worklist_len > 0);
2261
2262 /*
2263 * Keep track of the last time there was anything
2264 * on the worklist other than syncer vnodes.
2265 * Return to the SHUTTING_DOWN state if any
2266 * new work appears.
2267 */
2268 if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
2269 last_work_seen = syncer_delayno;
2270 if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
2271 syncer_state = SYNCER_SHUTTING_DOWN;
2272 while (!LIST_EMPTY(slp)) {
2273 error = sync_vnode(slp, &bo, td);
2274 if (error == 1) {
2275 LIST_REMOVE(bo, bo_synclist);
2276 LIST_INSERT_HEAD(next, bo, bo_synclist);
2277 continue;
2278 }
2279
2280 if (first_printf == 0) {
2281 /*
2282 * Drop the sync mutex, because some watchdog
2283 * drivers need to sleep while patting
2284 */
2285 mtx_unlock(&sync_mtx);
2286 wdog_kern_pat(WD_LASTVAL);
2287 mtx_lock(&sync_mtx);
2288 }
2289
2290 }
2291 if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
2292 syncer_final_iter--;
2293 /*
2294 * The variable rushjob allows the kernel to speed up the
2295 * processing of the filesystem syncer process. A rushjob
2296 * value of N tells the filesystem syncer to process the next
2297 * N seconds worth of work on its queue ASAP. Currently rushjob
2298 * is used by the soft update code to speed up the filesystem
2299 * syncer process when the incore state is getting so far
2300 * ahead of the disk that the kernel memory pool is being
2301 * threatened with exhaustion.
2302 */
2303 if (rushjob > 0) {
2304 rushjob -= 1;
2305 continue;
2306 }
2307 /*
2308 * Just sleep for a short period of time between
2309 * iterations when shutting down to allow some I/O
2310 * to happen.
2311 *
2312 * If it has taken us less than a second to process the
2313 * current work, then wait. Otherwise start right over
2314 * again. We can still lose time if any single round
2315 * takes more than two seconds, but it does not really
2316 * matter as we are just trying to generally pace the
2317 * filesystem activity.
2318 */
2319 if (syncer_state != SYNCER_RUNNING ||
2320 time_uptime == starttime) {
2321 thread_lock(td);
2322 sched_prio(td, PPAUSE);
2323 thread_unlock(td);
2324 }
2325 if (syncer_state != SYNCER_RUNNING)
2326 cv_timedwait(&sync_wakeup, &sync_mtx,
2327 hz / SYNCER_SHUTDOWN_SPEEDUP);
2328 else if (time_uptime == starttime)
2329 cv_timedwait(&sync_wakeup, &sync_mtx, hz);
2330 }
2331 }
2332
2333 /*
2334 * Request the syncer daemon to speed up its work.
2335 * We never push it to speed up more than half of its
2336 * normal turn time, otherwise it could take over the cpu.
2337 */
2338 int
2339 speedup_syncer(void)
2340 {
2341 int ret = 0;
2342
2343 mtx_lock(&sync_mtx);
2344 if (rushjob < syncdelay / 2) {
2345 rushjob += 1;
2346 stat_rush_requests += 1;
2347 ret = 1;
2348 }
2349 mtx_unlock(&sync_mtx);
2350 cv_broadcast(&sync_wakeup);
2351 return (ret);
2352 }
2353
2354 /*
2355 * Tell the syncer to speed up its work and run though its work
2356 * list several times, then tell it to shut down.
2357 */
2358 static void
2359 syncer_shutdown(void *arg, int howto)
2360 {
2361
2362 if (howto & RB_NOSYNC)
2363 return;
2364 mtx_lock(&sync_mtx);
2365 syncer_state = SYNCER_SHUTTING_DOWN;
2366 rushjob = 0;
2367 mtx_unlock(&sync_mtx);
2368 cv_broadcast(&sync_wakeup);
2369 kproc_shutdown(arg, howto);
2370 }
2371
2372 void
2373 syncer_suspend(void)
2374 {
2375
2376 syncer_shutdown(updateproc, 0);
2377 }
2378
2379 void
2380 syncer_resume(void)
2381 {
2382
2383 mtx_lock(&sync_mtx);
2384 first_printf = 1;
2385 syncer_state = SYNCER_RUNNING;
2386 mtx_unlock(&sync_mtx);
2387 cv_broadcast(&sync_wakeup);
2388 kproc_resume(updateproc);
2389 }
2390
2391 /*
2392 * Reassign a buffer from one vnode to another.
2393 * Used to assign file specific control information
2394 * (indirect blocks) to the vnode to which they belong.
2395 */
2396 void
2397 reassignbuf(struct buf *bp)
2398 {
2399 struct vnode *vp;
2400 struct bufobj *bo;
2401 int delay;
2402 #ifdef INVARIANTS
2403 struct bufv *bv;
2404 #endif
2405
2406 vp = bp->b_vp;
2407 bo = bp->b_bufobj;
2408 ++reassignbufcalls;
2409
2410 CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
2411 bp, bp->b_vp, bp->b_flags);
2412 /*
2413 * B_PAGING flagged buffers cannot be reassigned because their vp
2414 * is not fully linked in.
2415 */
2416 if (bp->b_flags & B_PAGING)
2417 panic("cannot reassign paging buffer");
2418
2419 /*
2420 * Delete from old vnode list, if on one.
2421 */
2422 BO_LOCK(bo);
2423 if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2424 buf_vlist_remove(bp);
2425 else
2426 panic("reassignbuf: Buffer %p not on queue.", bp);
2427 /*
2428 * If dirty, put on list of dirty buffers; otherwise insert onto list
2429 * of clean buffers.
2430 */
2431 if (bp->b_flags & B_DELWRI) {
2432 if ((bo->bo_flag & BO_ONWORKLST) == 0) {
2433 switch (vp->v_type) {
2434 case VDIR:
2435 delay = dirdelay;
2436 break;
2437 case VCHR:
2438 delay = metadelay;
2439 break;
2440 default:
2441 delay = filedelay;
2442 }
2443 vn_syncer_add_to_worklist(bo, delay);
2444 }
2445 buf_vlist_add(bp, bo, BX_VNDIRTY);
2446 } else {
2447 buf_vlist_add(bp, bo, BX_VNCLEAN);
2448
2449 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2450 mtx_lock(&sync_mtx);
2451 LIST_REMOVE(bo, bo_synclist);
2452 syncer_worklist_len--;
2453 mtx_unlock(&sync_mtx);
2454 bo->bo_flag &= ~BO_ONWORKLST;
2455 }
2456 }
2457 #ifdef INVARIANTS
2458 bv = &bo->bo_clean;
2459 bp = TAILQ_FIRST(&bv->bv_hd);
2460 KASSERT(bp == NULL || bp->b_bufobj == bo,
2461 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2462 bp = TAILQ_LAST(&bv->bv_hd, buflists);
2463 KASSERT(bp == NULL || bp->b_bufobj == bo,
2464 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2465 bv = &bo->bo_dirty;
2466 bp = TAILQ_FIRST(&bv->bv_hd);
2467 KASSERT(bp == NULL || bp->b_bufobj == bo,
2468 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2469 bp = TAILQ_LAST(&bv->bv_hd, buflists);
2470 KASSERT(bp == NULL || bp->b_bufobj == bo,
2471 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2472 #endif
2473 BO_UNLOCK(bo);
2474 }
2475
2476 static void
2477 v_init_counters(struct vnode *vp)
2478 {
2479
2480 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
2481 vp, ("%s called for an initialized vnode", __FUNCTION__));
2482 ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
2483
2484 refcount_init(&vp->v_holdcnt, 1);
2485 refcount_init(&vp->v_usecount, 1);
2486 }
2487
2488 static void
2489 v_incr_usecount_locked(struct vnode *vp)
2490 {
2491
2492 ASSERT_VI_LOCKED(vp, __func__);
2493 if ((vp->v_iflag & VI_OWEINACT) != 0) {
2494 VNASSERT(vp->v_usecount == 0, vp,
2495 ("vnode with usecount and VI_OWEINACT set"));
2496 vp->v_iflag &= ~VI_OWEINACT;
2497 }
2498 refcount_acquire(&vp->v_usecount);
2499 v_incr_devcount(vp);
2500 }
2501
2502 /*
2503 * Increment the use count on the vnode, taking care to reference
2504 * the driver's usecount if this is a chardev.
2505 */
2506 static void
2507 v_incr_usecount(struct vnode *vp)
2508 {
2509
2510 ASSERT_VI_UNLOCKED(vp, __func__);
2511 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2512
2513 if (vp->v_type != VCHR &&
2514 refcount_acquire_if_not_zero(&vp->v_usecount)) {
2515 VNODE_REFCOUNT_FENCE_ACQ();
2516 VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
2517 ("vnode with usecount and VI_OWEINACT set"));
2518 } else {
2519 VI_LOCK(vp);
2520 v_incr_usecount_locked(vp);
2521 VI_UNLOCK(vp);
2522 }
2523 }
2524
2525 /*
2526 * Increment si_usecount of the associated device, if any.
2527 */
2528 static void
2529 v_incr_devcount(struct vnode *vp)
2530 {
2531
2532 ASSERT_VI_LOCKED(vp, __FUNCTION__);
2533 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2534 dev_lock();
2535 vp->v_rdev->si_usecount++;
2536 dev_unlock();
2537 }
2538 }
2539
2540 /*
2541 * Decrement si_usecount of the associated device, if any.
2542 */
2543 static void
2544 v_decr_devcount(struct vnode *vp)
2545 {
2546
2547 ASSERT_VI_LOCKED(vp, __FUNCTION__);
2548 if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2549 dev_lock();
2550 vp->v_rdev->si_usecount--;
2551 dev_unlock();
2552 }
2553 }
2554
2555 /*
2556 * Grab a particular vnode from the free list, increment its
2557 * reference count and lock it. VI_DOOMED is set if the vnode
2558 * is being destroyed. Only callers who specify LK_RETRY will
2559 * see doomed vnodes. If inactive processing was delayed in
2560 * vput try to do it here.
2561 *
2562 * Notes on lockless counter manipulation:
2563 * _vhold, vputx and other routines make various decisions based
2564 * on either holdcnt or usecount being 0. As long as either counter
2565 * is not transitioning 0->1 nor 1->0, the manipulation can be done
2566 * with atomic operations. Otherwise the interlock is taken covering
2567 * both the atomic and additional actions.
2568 */
2569 int
2570 vget(struct vnode *vp, int flags, struct thread *td)
2571 {
2572 int error, oweinact;
2573
2574 VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2575 ("vget: invalid lock operation"));
2576
2577 if ((flags & LK_INTERLOCK) != 0)
2578 ASSERT_VI_LOCKED(vp, __func__);
2579 else
2580 ASSERT_VI_UNLOCKED(vp, __func__);
2581 if ((flags & LK_VNHELD) != 0)
2582 VNASSERT((vp->v_holdcnt > 0), vp,
2583 ("vget: LK_VNHELD passed but vnode not held"));
2584
2585 CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2586
2587 if ((flags & LK_VNHELD) == 0)
2588 _vhold(vp, (flags & LK_INTERLOCK) != 0);
2589
2590 if ((error = vn_lock(vp, flags)) != 0) {
2591 vdrop(vp);
2592 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2593 vp);
2594 return (error);
2595 }
2596 if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2597 panic("vget: vn_lock failed to return ENOENT\n");
2598 /*
2599 * We don't guarantee that any particular close will
2600 * trigger inactive processing so just make a best effort
2601 * here at preventing a reference to a removed file. If
2602 * we don't succeed no harm is done.
2603 *
2604 * Upgrade our holdcnt to a usecount.
2605 */
2606 if (vp->v_type == VCHR ||
2607 !refcount_acquire_if_not_zero(&vp->v_usecount)) {
2608 VI_LOCK(vp);
2609 if ((vp->v_iflag & VI_OWEINACT) == 0) {
2610 oweinact = 0;
2611 } else {
2612 oweinact = 1;
2613 vp->v_iflag &= ~VI_OWEINACT;
2614 VNODE_REFCOUNT_FENCE_REL();
2615 }
2616 refcount_acquire(&vp->v_usecount);
2617 v_incr_devcount(vp);
2618 if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2619 (flags & LK_NOWAIT) == 0)
2620 vinactive(vp, td);
2621 VI_UNLOCK(vp);
2622 }
2623 return (0);
2624 }
2625
2626 /*
2627 * Increase the reference (use) and hold count of a vnode.
2628 * This will also remove the vnode from the free list if it is presently free.
2629 */
2630 void
2631 vref(struct vnode *vp)
2632 {
2633
2634 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2635 _vhold(vp, false);
2636 v_incr_usecount(vp);
2637 }
2638
2639 void
2640 vrefl(struct vnode *vp)
2641 {
2642
2643 ASSERT_VI_LOCKED(vp, __func__);
2644 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2645 _vhold(vp, true);
2646 v_incr_usecount_locked(vp);
2647 }
2648
2649 void
2650 vrefact(struct vnode *vp)
2651 {
2652
2653 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2654 if (__predict_false(vp->v_type == VCHR)) {
2655 VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp,
2656 ("%s: wrong ref counts", __func__));
2657 vref(vp);
2658 return;
2659 }
2660 #ifdef INVARIANTS
2661 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
2662 VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__));
2663 old = atomic_fetchadd_int(&vp->v_usecount, 1);
2664 VNASSERT(old > 0, vp, ("%s: wrong use count", __func__));
2665 #else
2666 refcount_acquire(&vp->v_holdcnt);
2667 refcount_acquire(&vp->v_usecount);
2668 #endif
2669 }
2670
2671 /*
2672 * Return reference count of a vnode.
2673 *
2674 * The results of this call are only guaranteed when some mechanism is used to
2675 * stop other processes from gaining references to the vnode. This may be the
2676 * case if the caller holds the only reference. This is also useful when stale
2677 * data is acceptable as race conditions may be accounted for by some other
2678 * means.
2679 */
2680 int
2681 vrefcnt(struct vnode *vp)
2682 {
2683
2684 return (vp->v_usecount);
2685 }
2686
2687 #define VPUTX_VRELE 1
2688 #define VPUTX_VPUT 2
2689 #define VPUTX_VUNREF 3
2690
2691 /*
2692 * Decrement the use and hold counts for a vnode.
2693 *
2694 * See an explanation near vget() as to why atomic operation is safe.
2695 */
2696 static void
2697 vputx(struct vnode *vp, int func)
2698 {
2699 int error;
2700
2701 KASSERT(vp != NULL, ("vputx: null vp"));
2702 if (func == VPUTX_VUNREF)
2703 ASSERT_VOP_LOCKED(vp, "vunref");
2704 else if (func == VPUTX_VPUT)
2705 ASSERT_VOP_LOCKED(vp, "vput");
2706 else
2707 KASSERT(func == VPUTX_VRELE, ("vputx: wrong func"));
2708 ASSERT_VI_UNLOCKED(vp, __func__);
2709 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2710
2711 if (vp->v_type != VCHR &&
2712 refcount_release_if_not_last(&vp->v_usecount)) {
2713 if (func == VPUTX_VPUT)
2714 VOP_UNLOCK(vp, 0);
2715 vdrop(vp);
2716 return;
2717 }
2718
2719 VI_LOCK(vp);
2720
2721 /*
2722 * We want to hold the vnode until the inactive finishes to
2723 * prevent vgone() races. We drop the use count here and the
2724 * hold count below when we're done.
2725 */
2726 if (!refcount_release(&vp->v_usecount) ||
2727 (vp->v_iflag & VI_DOINGINACT)) {
2728 if (func == VPUTX_VPUT)
2729 VOP_UNLOCK(vp, 0);
2730 v_decr_devcount(vp);
2731 vdropl(vp);
2732 return;
2733 }
2734
2735 v_decr_devcount(vp);
2736
2737 error = 0;
2738
2739 if (vp->v_usecount != 0) {
2740 vn_printf(vp, "vputx: usecount not zero for vnode ");
2741 panic("vputx: usecount not zero");
2742 }
2743
2744 CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2745
2746 /*
2747 * We must call VOP_INACTIVE with the node locked. Mark
2748 * as VI_DOINGINACT to avoid recursion.
2749 */
2750 vp->v_iflag |= VI_OWEINACT;
2751 switch (func) {
2752 case VPUTX_VRELE:
2753 error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2754 VI_LOCK(vp);
2755 break;
2756 case VPUTX_VPUT:
2757 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2758 error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
2759 LK_NOWAIT);
2760 VI_LOCK(vp);
2761 }
2762 break;
2763 case VPUTX_VUNREF:
2764 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2765 error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
2766 VI_LOCK(vp);
2767 }
2768 break;
2769 }
2770 VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp,
2771 ("vnode with usecount and VI_OWEINACT set"));
2772 if (error == 0) {
2773 if (vp->v_iflag & VI_OWEINACT)
2774 vinactive(vp, curthread);
2775 if (func != VPUTX_VUNREF)
2776 VOP_UNLOCK(vp, 0);
2777 }
2778 vdropl(vp);
2779 }
2780
2781 /*
2782 * Vnode put/release.
2783 * If count drops to zero, call inactive routine and return to freelist.
2784 */
2785 void
2786 vrele(struct vnode *vp)
2787 {
2788
2789 vputx(vp, VPUTX_VRELE);
2790 }
2791
2792 /*
2793 * Release an already locked vnode. This give the same effects as
2794 * unlock+vrele(), but takes less time and avoids releasing and
2795 * re-aquiring the lock (as vrele() acquires the lock internally.)
2796 */
2797 void
2798 vput(struct vnode *vp)
2799 {
2800
2801 vputx(vp, VPUTX_VPUT);
2802 }
2803
2804 /*
2805 * Release an exclusively locked vnode. Do not unlock the vnode lock.
2806 */
2807 void
2808 vunref(struct vnode *vp)
2809 {
2810
2811 vputx(vp, VPUTX_VUNREF);
2812 }
2813
2814 /*
2815 * Increase the hold count and activate if this is the first reference.
2816 */
2817 void
2818 _vhold(struct vnode *vp, bool locked)
2819 {
2820 struct mount *mp;
2821
2822 if (locked)
2823 ASSERT_VI_LOCKED(vp, __func__);
2824 else
2825 ASSERT_VI_UNLOCKED(vp, __func__);
2826 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2827 if (!locked) {
2828 if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) {
2829 VNODE_REFCOUNT_FENCE_ACQ();
2830 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2831 ("_vhold: vnode with holdcnt is free"));
2832 return;
2833 }
2834 VI_LOCK(vp);
2835 }
2836 if ((vp->v_iflag & VI_FREE) == 0) {
2837 refcount_acquire(&vp->v_holdcnt);
2838 if (!locked)
2839 VI_UNLOCK(vp);
2840 return;
2841 }
2842 VNASSERT(vp->v_holdcnt == 0, vp,
2843 ("%s: wrong hold count", __func__));
2844 VNASSERT(vp->v_op != NULL, vp,
2845 ("%s: vnode already reclaimed.", __func__));
2846 /*
2847 * Remove a vnode from the free list, mark it as in use,
2848 * and put it on the active list.
2849 */
2850 VNASSERT(vp->v_mount != NULL, vp,
2851 ("_vhold: vnode not on per mount vnode list"));
2852 mp = vp->v_mount;
2853 mtx_lock(&mp->mnt_listmtx);
2854 if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) {
2855 TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist);
2856 mp->mnt_tmpfreevnodelistsize--;
2857 vp->v_mflag &= ~VMP_TMPMNTFREELIST;
2858 } else {
2859 mtx_lock(&vnode_free_list_mtx);
2860 TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
2861 freevnodes--;
2862 mtx_unlock(&vnode_free_list_mtx);
2863 }
2864 KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
2865 ("Activating already active vnode"));
2866 vp->v_iflag &= ~VI_FREE;
2867 vp->v_iflag |= VI_ACTIVE;
2868 TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
2869 mp->mnt_activevnodelistsize++;
2870 mtx_unlock(&mp->mnt_listmtx);
2871 refcount_acquire(&vp->v_holdcnt);
2872 if (!locked)
2873 VI_UNLOCK(vp);
2874 }
2875
2876 /*
2877 * Drop the hold count of the vnode. If this is the last reference to
2878 * the vnode we place it on the free list unless it has been vgone'd
2879 * (marked VI_DOOMED) in which case we will free it.
2880 *
2881 * Because the vnode vm object keeps a hold reference on the vnode if
2882 * there is at least one resident non-cached page, the vnode cannot
2883 * leave the active list without the page cleanup done.
2884 */
2885 void
2886 _vdrop(struct vnode *vp, bool locked)
2887 {
2888 struct bufobj *bo;
2889 struct mount *mp;
2890 int active;
2891
2892 if (locked)
2893 ASSERT_VI_LOCKED(vp, __func__);
2894 else
2895 ASSERT_VI_UNLOCKED(vp, __func__);
2896 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2897 if ((int)vp->v_holdcnt <= 0)
2898 panic("vdrop: holdcnt %d", vp->v_holdcnt);
2899 if (!locked) {
2900 if (refcount_release_if_not_last(&vp->v_holdcnt))
2901 return;
2902 VI_LOCK(vp);
2903 }
2904 if (refcount_release(&vp->v_holdcnt) == 0) {
2905 VI_UNLOCK(vp);
2906 return;
2907 }
2908 if ((vp->v_iflag & VI_DOOMED) == 0) {
2909 /*
2910 * Mark a vnode as free: remove it from its active list
2911 * and put it up for recycling on the freelist.
2912 */
2913 VNASSERT(vp->v_op != NULL, vp,
2914 ("vdropl: vnode already reclaimed."));
2915 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2916 ("vnode already free"));
2917 VNASSERT(vp->v_holdcnt == 0, vp,
2918 ("vdropl: freeing when we shouldn't"));
2919 active = vp->v_iflag & VI_ACTIVE;
2920 if ((vp->v_iflag & VI_OWEINACT) == 0) {
2921 vp->v_iflag &= ~VI_ACTIVE;
2922 mp = vp->v_mount;
2923 if (mp != NULL) {
2924 mtx_lock(&mp->mnt_listmtx);
2925 if (active) {
2926 TAILQ_REMOVE(&mp->mnt_activevnodelist,
2927 vp, v_actfreelist);
2928 mp->mnt_activevnodelistsize--;
2929 }
2930 TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist,
2931 vp, v_actfreelist);
2932 mp->mnt_tmpfreevnodelistsize++;
2933 vp->v_iflag |= VI_FREE;
2934 vp->v_mflag |= VMP_TMPMNTFREELIST;
2935 VI_UNLOCK(vp);
2936 if (mp->mnt_tmpfreevnodelistsize >=
2937 mnt_free_list_batch)
2938 vnlru_return_batch_locked(mp);
2939 mtx_unlock(&mp->mnt_listmtx);
2940 } else {
2941 VNASSERT(active == 0, vp,
2942 ("vdropl: active vnode not on per mount "
2943 "vnode list"));
2944 mtx_lock(&vnode_free_list_mtx);
2945 TAILQ_INSERT_TAIL(&vnode_free_list, vp,
2946 v_actfreelist);
2947 freevnodes++;
2948 vp->v_iflag |= VI_FREE;
2949 VI_UNLOCK(vp);
2950 mtx_unlock(&vnode_free_list_mtx);
2951 }
2952 } else {
2953 VI_UNLOCK(vp);
2954 counter_u64_add(free_owe_inact, 1);
2955 }
2956 return;
2957 }
2958 /*
2959 * The vnode has been marked for destruction, so free it.
2960 *
2961 * The vnode will be returned to the zone where it will
2962 * normally remain until it is needed for another vnode. We
2963 * need to cleanup (or verify that the cleanup has already
2964 * been done) any residual data left from its current use
2965 * so as not to contaminate the freshly allocated vnode.
2966 */
2967 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2968 atomic_subtract_long(&numvnodes, 1);
2969 bo = &vp->v_bufobj;
2970 VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2971 ("cleaned vnode still on the free list."));
2972 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2973 VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
2974 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2975 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2976 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2977 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2978 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2979 ("clean blk trie not empty"));
2980 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2981 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2982 ("dirty blk trie not empty"));
2983 VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
2984 VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
2985 VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
2986 VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
2987 ("Dangling rangelock waiters"));
2988 VI_UNLOCK(vp);
2989 #ifdef MAC
2990 mac_vnode_destroy(vp);
2991 #endif
2992 if (vp->v_pollinfo != NULL) {
2993 destroy_vpollinfo(vp->v_pollinfo);
2994 vp->v_pollinfo = NULL;
2995 }
2996 #ifdef INVARIANTS
2997 /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */
2998 vp->v_op = NULL;
2999 #endif
3000 vp->v_mountedhere = NULL;
3001 vp->v_unpcb = NULL;
3002 vp->v_rdev = NULL;
3003 vp->v_fifoinfo = NULL;
3004 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
3005 vp->v_iflag = 0;
3006 vp->v_vflag = 0;
3007 bo->bo_flag = 0;
3008 uma_zfree(vnode_zone, vp);
3009 }
3010
3011 /*
3012 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
3013 * flags. DOINGINACT prevents us from recursing in calls to vinactive.
3014 * OWEINACT tracks whether a vnode missed a call to inactive due to a
3015 * failed lock upgrade.
3016 */
3017 void
3018 vinactive(struct vnode *vp, struct thread *td)
3019 {
3020 struct vm_object *obj;
3021
3022 ASSERT_VOP_ELOCKED(vp, "vinactive");
3023 ASSERT_VI_LOCKED(vp, "vinactive");
3024 VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
3025 ("vinactive: recursed on VI_DOINGINACT"));
3026 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3027 vp->v_iflag |= VI_DOINGINACT;
3028 vp->v_iflag &= ~VI_OWEINACT;
3029 VI_UNLOCK(vp);
3030 /*
3031 * Before moving off the active list, we must be sure that any
3032 * modified pages are converted into the vnode's dirty
3033 * buffers, since these will no longer be checked once the
3034 * vnode is on the inactive list.
3035 *
3036 * The write-out of the dirty pages is asynchronous. At the
3037 * point that VOP_INACTIVE() is called, there could still be
3038 * pending I/O and dirty pages in the object.
3039 */
3040 if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
3041 (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
3042 VM_OBJECT_WLOCK(obj);
3043 vm_object_page_clean(obj, 0, 0, 0);
3044 VM_OBJECT_WUNLOCK(obj);
3045 }
3046 VOP_INACTIVE(vp, td);
3047 VI_LOCK(vp);
3048 VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
3049 ("vinactive: lost VI_DOINGINACT"));
3050 vp->v_iflag &= ~VI_DOINGINACT;
3051 }
3052
3053 /*
3054 * Remove any vnodes in the vnode table belonging to mount point mp.
3055 *
3056 * If FORCECLOSE is not specified, there should not be any active ones,
3057 * return error if any are found (nb: this is a user error, not a
3058 * system error). If FORCECLOSE is specified, detach any active vnodes
3059 * that are found.
3060 *
3061 * If WRITECLOSE is set, only flush out regular file vnodes open for
3062 * writing.
3063 *
3064 * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
3065 *
3066 * `rootrefs' specifies the base reference count for the root vnode
3067 * of this filesystem. The root vnode is considered busy if its
3068 * v_usecount exceeds this value. On a successful return, vflush(, td)
3069 * will call vrele() on the root vnode exactly rootrefs times.
3070 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
3071 * be zero.
3072 */
3073 #ifdef DIAGNOSTIC
3074 static int busyprt = 0; /* print out busy vnodes */
3075 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
3076 #endif
3077
3078 int
3079 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
3080 {
3081 struct vnode *vp, *mvp, *rootvp = NULL;
3082 struct vattr vattr;
3083 int busy = 0, error;
3084
3085 CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
3086 rootrefs, flags);
3087 if (rootrefs > 0) {
3088 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
3089 ("vflush: bad args"));
3090 /*
3091 * Get the filesystem root vnode. We can vput() it
3092 * immediately, since with rootrefs > 0, it won't go away.
3093 */
3094 if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
3095 CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
3096 __func__, error);
3097 return (error);
3098 }
3099 vput(rootvp);
3100 }
3101 loop:
3102 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
3103 vholdl(vp);
3104 error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
3105 if (error) {
3106 vdrop(vp);
3107 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3108 goto loop;
3109 }
3110 /*
3111 * Skip over a vnodes marked VV_SYSTEM.
3112 */
3113 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
3114 VOP_UNLOCK(vp, 0);
3115 vdrop(vp);
3116 continue;
3117 }
3118 /*
3119 * If WRITECLOSE is set, flush out unlinked but still open
3120 * files (even if open only for reading) and regular file
3121 * vnodes open for writing.
3122 */
3123 if (flags & WRITECLOSE) {
3124 if (vp->v_object != NULL) {
3125 VM_OBJECT_WLOCK(vp->v_object);
3126 vm_object_page_clean(vp->v_object, 0, 0, 0);
3127 VM_OBJECT_WUNLOCK(vp->v_object);
3128 }
3129 error = VOP_FSYNC(vp, MNT_WAIT, td);
3130 if (error != 0) {
3131 VOP_UNLOCK(vp, 0);
3132 vdrop(vp);
3133 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3134 return (error);
3135 }
3136 error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3137 VI_LOCK(vp);
3138
3139 if ((vp->v_type == VNON ||
3140 (error == 0 && vattr.va_nlink > 0)) &&
3141 (vp->v_writecount == 0 || vp->v_type != VREG)) {
3142 VOP_UNLOCK(vp, 0);
3143 vdropl(vp);
3144 continue;
3145 }
3146 } else
3147 VI_LOCK(vp);
3148 /*
3149 * With v_usecount == 0, all we need to do is clear out the
3150 * vnode data structures and we are done.
3151 *
3152 * If FORCECLOSE is set, forcibly close the vnode.
3153 */
3154 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
3155 vgonel(vp);
3156 } else {
3157 busy++;
3158 #ifdef DIAGNOSTIC
3159 if (busyprt)
3160 vn_printf(vp, "vflush: busy vnode ");
3161 #endif
3162 }
3163 VOP_UNLOCK(vp, 0);
3164 vdropl(vp);
3165 }
3166 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
3167 /*
3168 * If just the root vnode is busy, and if its refcount
3169 * is equal to `rootrefs', then go ahead and kill it.
3170 */
3171 VI_LOCK(rootvp);
3172 KASSERT(busy > 0, ("vflush: not busy"));
3173 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
3174 ("vflush: usecount %d < rootrefs %d",
3175 rootvp->v_usecount, rootrefs));
3176 if (busy == 1 && rootvp->v_usecount == rootrefs) {
3177 VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
3178 vgone(rootvp);
3179 VOP_UNLOCK(rootvp, 0);
3180 busy = 0;
3181 } else
3182 VI_UNLOCK(rootvp);
3183 }
3184 if (busy) {
3185 CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
3186 busy);
3187 return (EBUSY);
3188 }
3189 for (; rootrefs > 0; rootrefs--)
3190 vrele(rootvp);
3191 return (0);
3192 }
3193
3194 /*
3195 * Recycle an unused vnode to the front of the free list.
3196 */
3197 int
3198 vrecycle(struct vnode *vp)
3199 {
3200 int recycled;
3201
3202 VI_LOCK(vp);
3203 recycled = vrecyclel(vp);
3204 VI_UNLOCK(vp);
3205 return (recycled);
3206 }
3207
3208 /*
3209 * vrecycle, with the vp interlock held.
3210 */
3211 int
3212 vrecyclel(struct vnode *vp)
3213 {
3214 int recycled;
3215
3216 ASSERT_VOP_ELOCKED(vp, __func__);
3217 ASSERT_VI_LOCKED(vp, __func__);
3218 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3219 recycled = 0;
3220 if (vp->v_usecount == 0) {
3221 recycled = 1;
3222 vgonel(vp);
3223 }
3224 return (recycled);
3225 }
3226
3227 /*
3228 * Eliminate all activity associated with a vnode
3229 * in preparation for reuse.
3230 */
3231 void
3232 vgone(struct vnode *vp)
3233 {
3234 VI_LOCK(vp);
3235 vgonel(vp);
3236 VI_UNLOCK(vp);
3237 }
3238
3239 static void
3240 notify_lowervp_vfs_dummy(struct mount *mp __unused,
3241 struct vnode *lowervp __unused)
3242 {
3243 }
3244
3245 /*
3246 * Notify upper mounts about reclaimed or unlinked vnode.
3247 */
3248 void
3249 vfs_notify_upper(struct vnode *vp, int event)
3250 {
3251 static struct vfsops vgonel_vfsops = {
3252 .vfs_reclaim_lowervp = notify_lowervp_vfs_dummy,
3253 .vfs_unlink_lowervp = notify_lowervp_vfs_dummy,
3254 };
3255 struct mount *mp, *ump, *mmp;
3256
3257 mp = vp->v_mount;
3258 if (mp == NULL)
3259 return;
3260
3261 MNT_ILOCK(mp);
3262 if (TAILQ_EMPTY(&mp->mnt_uppers))
3263 goto unlock;
3264 MNT_IUNLOCK(mp);
3265 mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO);
3266 mmp->mnt_op = &vgonel_vfsops;
3267 mmp->mnt_kern_flag |= MNTK_MARKER;
3268 MNT_ILOCK(mp);
3269 mp->mnt_kern_flag |= MNTK_VGONE_UPPER;
3270 for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) {
3271 if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) {
3272 ump = TAILQ_NEXT(ump, mnt_upper_link);
3273 continue;
3274 }
3275 TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link);
3276 MNT_IUNLOCK(mp);
3277 switch (event) {
3278 case VFS_NOTIFY_UPPER_RECLAIM:
3279 VFS_RECLAIM_LOWERVP(ump, vp);
3280 break;
3281 case VFS_NOTIFY_UPPER_UNLINK:
3282 VFS_UNLINK_LOWERVP(ump, vp);
3283 break;
3284 default:
3285 KASSERT(0, ("invalid event %d", event));
3286 break;
3287 }
3288 MNT_ILOCK(mp);
3289 ump = TAILQ_NEXT(mmp, mnt_upper_link);
3290 TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link);
3291 }
3292 free(mmp, M_TEMP);
3293 mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER;
3294 if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) {
3295 mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER;
3296 wakeup(&mp->mnt_uppers);
3297 }
3298 unlock:
3299 MNT_IUNLOCK(mp);
3300 }
3301
3302 /*
3303 * vgone, with the vp interlock held.
3304 */
3305 static void
3306 vgonel(struct vnode *vp)
3307 {
3308 struct thread *td;
3309 int oweinact;
3310 int active;
3311 struct mount *mp;
3312
3313 ASSERT_VOP_ELOCKED(vp, "vgonel");
3314 ASSERT_VI_LOCKED(vp, "vgonel");
3315 VNASSERT(vp->v_holdcnt, vp,
3316 ("vgonel: vp %p has no reference.", vp));
3317 CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3318 td = curthread;
3319
3320 /*
3321 * Don't vgonel if we're already doomed.
3322 */
3323 if (vp->v_iflag & VI_DOOMED)
3324 return;
3325 vp->v_iflag |= VI_DOOMED;
3326
3327 /*
3328 * Check to see if the vnode is in use. If so, we have to call
3329 * VOP_CLOSE() and VOP_INACTIVE().
3330 */
3331 active = vp->v_usecount;
3332 oweinact = (vp->v_iflag & VI_OWEINACT);
3333 VI_UNLOCK(vp);
3334 vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
3335
3336 /*
3337 * If purging an active vnode, it must be closed and
3338 * deactivated before being reclaimed.
3339 */
3340 if (active)
3341 VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
3342 if (oweinact || active) {
3343 VI_LOCK(vp);
3344 if ((vp->v_iflag & VI_DOINGINACT) == 0)
3345 vinactive(vp, td);
3346 VI_UNLOCK(vp);
3347 }
3348 if (vp->v_type == VSOCK)
3349 vfs_unp_reclaim(vp);
3350
3351 /*
3352 * Clean out any buffers associated with the vnode.
3353 * If the flush fails, just toss the buffers.
3354 */
3355 mp = NULL;
3356 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
3357 (void) vn_start_secondary_write(vp, &mp, V_WAIT);
3358 if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
3359 while (vinvalbuf(vp, 0, 0, 0) != 0)
3360 ;
3361 }
3362
3363 BO_LOCK(&vp->v_bufobj);
3364 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
3365 vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
3366 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
3367 vp->v_bufobj.bo_clean.bv_cnt == 0,
3368 ("vp %p bufobj not invalidated", vp));
3369
3370 /*
3371 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate()
3372 * after the object's page queue is flushed.
3373 */
3374 if (vp->v_bufobj.bo_object == NULL)
3375 vp->v_bufobj.bo_flag |= BO_DEAD;
3376 BO_UNLOCK(&vp->v_bufobj);
3377
3378 /*
3379 * Reclaim the vnode.
3380 */
3381 if (VOP_RECLAIM(vp, td))
3382 panic("vgone: cannot reclaim");
3383 if (mp != NULL)
3384 vn_finished_secondary_write(mp);
3385 VNASSERT(vp->v_object == NULL, vp,
3386 ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
3387 /*
3388 * Clear the advisory locks and wake up waiting threads.
3389 */
3390 (void)VOP_ADVLOCKPURGE(vp);
3391 vp->v_lockf = NULL;
3392 /*
3393 * Delete from old mount point vnode list.
3394 */
3395 delmntque(vp);
3396 cache_purge(vp);
3397 /*
3398 * Done with purge, reset to the standard lock and invalidate
3399 * the vnode.
3400 */
3401 VI_LOCK(vp);
3402 vp->v_vnlock = &vp->v_lock;
3403 vp->v_op = &dead_vnodeops;
3404 vp->v_tag = "none";
3405 vp->v_type = VBAD;
3406 }
3407
3408 /*
3409 * Calculate the total number of references to a special device.
3410 */
3411 int
3412 vcount(struct vnode *vp)
3413 {
3414 int count;
3415
3416 dev_lock();
3417 count = vp->v_rdev->si_usecount;
3418 dev_unlock();
3419 return (count);
3420 }
3421
3422 /*
3423 * Same as above, but using the struct cdev *as argument
3424 */
3425 int
3426 count_dev(struct cdev *dev)
3427 {
3428 int count;
3429
3430 dev_lock();
3431 count = dev->si_usecount;
3432 dev_unlock();
3433 return(count);
3434 }
3435
3436 /*
3437 * Print out a description of a vnode.
3438 */
3439 static char *typename[] =
3440 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
3441 "VMARKER"};
3442
3443 void
3444 vn_printf(struct vnode *vp, const char *fmt, ...)
3445 {
3446 va_list ap;
3447 char buf[256], buf2[16];
3448 u_long flags;
3449
3450 va_start(ap, fmt);
3451 vprintf(fmt, ap);
3452 va_end(ap);
3453 printf("%p: ", (void *)vp);
3454 printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
3455 printf(" usecount %d, writecount %d, refcount %d",
3456 vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
3457 switch (vp->v_type) {
3458 case VDIR:
3459 printf(" mountedhere %p\n", vp->v_mountedhere);
3460 break;
3461 case VCHR:
3462 printf(" rdev %p\n", vp->v_rdev);
3463 break;
3464 case VSOCK:
3465 printf(" socket %p\n", vp->v_unpcb);
3466 break;
3467 case VFIFO:
3468 printf(" fifoinfo %p\n", vp->v_fifoinfo);
3469 break;
3470 default:
3471 printf("\n");
3472 break;
3473 }
3474 buf[0] = '\0';
3475 buf[1] = '\0';
3476 if (vp->v_vflag & VV_ROOT)
3477 strlcat(buf, "|VV_ROOT", sizeof(buf));
3478 if (vp->v_vflag & VV_ISTTY)
3479 strlcat(buf, "|VV_ISTTY", sizeof(buf));
3480 if (vp->v_vflag & VV_NOSYNC)
3481 strlcat(buf, "|VV_NOSYNC", sizeof(buf));
3482 if (vp->v_vflag & VV_ETERNALDEV)
3483 strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
3484 if (vp->v_vflag & VV_CACHEDLABEL)
3485 strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
3486 if (vp->v_vflag & VV_TEXT)
3487 strlcat(buf, "|VV_TEXT", sizeof(buf));
3488 if (vp->v_vflag & VV_COPYONWRITE)
3489 strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
3490 if (vp->v_vflag & VV_SYSTEM)
3491 strlcat(buf, "|VV_SYSTEM", sizeof(buf));
3492 if (vp->v_vflag & VV_PROCDEP)
3493 strlcat(buf, "|VV_PROCDEP", sizeof(buf));
3494 if (vp->v_vflag & VV_NOKNOTE)
3495 strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
3496 if (vp->v_vflag & VV_DELETED)
3497 strlcat(buf, "|VV_DELETED", sizeof(buf));
3498 if (vp->v_vflag & VV_MD)
3499 strlcat(buf, "|VV_MD", sizeof(buf));
3500 if (vp->v_vflag & VV_FORCEINSMQ)
3501 strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
3502 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
3503 VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
3504 VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ);
3505 if (flags != 0) {
3506 snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
3507 strlcat(buf, buf2, sizeof(buf));
3508 }
3509 if (vp->v_iflag & VI_MOUNT)
3510 strlcat(buf, "|VI_MOUNT", sizeof(buf));
3511 if (vp->v_iflag & VI_DOOMED)
3512 strlcat(buf, "|VI_DOOMED", sizeof(buf));
3513 if (vp->v_iflag & VI_FREE)
3514 strlcat(buf, "|VI_FREE", sizeof(buf));
3515 if (vp->v_iflag & VI_ACTIVE)
3516 strlcat(buf, "|VI_ACTIVE", sizeof(buf));
3517 if (vp->v_iflag & VI_DOINGINACT)
3518 strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
3519 if (vp->v_iflag & VI_OWEINACT)
3520 strlcat(buf, "|VI_OWEINACT", sizeof(buf));
3521 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE |
3522 VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT);
3523 if (flags != 0) {
3524 snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
3525 strlcat(buf, buf2, sizeof(buf));
3526 }
3527 printf(" flags (%s)\n", buf + 1);
3528 if (mtx_owned(VI_MTX(vp)))
3529 printf(" VI_LOCKed");
3530 if (vp->v_object != NULL)
3531 printf(" v_object %p ref %d pages %d "
3532 "cleanbuf %d dirtybuf %d\n",
3533 vp->v_object, vp->v_object->ref_count,
3534 vp->v_object->resident_page_count,
3535 vp->v_bufobj.bo_clean.bv_cnt,
3536 vp->v_bufobj.bo_dirty.bv_cnt);
3537 printf(" ");
3538 lockmgr_printinfo(vp->v_vnlock);
3539 if (vp->v_data != NULL)
3540 VOP_PRINT(vp);
3541 }
3542
3543 #ifdef DDB
3544 /*
3545 * List all of the locked vnodes in the system.
3546 * Called when debugging the kernel.
3547 */
3548 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
3549 {
3550 struct mount *mp;
3551 struct vnode *vp;
3552
3553 /*
3554 * Note: because this is DDB, we can't obey the locking semantics
3555 * for these structures, which means we could catch an inconsistent
3556 * state and dereference a nasty pointer. Not much to be done
3557 * about that.
3558 */
3559 db_printf("Locked vnodes\n");
3560 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3561 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3562 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
3563 vn_printf(vp, "vnode ");
3564 }
3565 }
3566 }
3567
3568 /*
3569 * Show details about the given vnode.
3570 */
3571 DB_SHOW_COMMAND(vnode, db_show_vnode)
3572 {
3573 struct vnode *vp;
3574
3575 if (!have_addr)
3576 return;
3577 vp = (struct vnode *)addr;
3578 vn_printf(vp, "vnode ");
3579 }
3580
3581 /*
3582 * Show details about the given mount point.
3583 */
3584 DB_SHOW_COMMAND(mount, db_show_mount)
3585 {
3586 struct mount *mp;
3587 struct vfsopt *opt;
3588 struct statfs *sp;
3589 struct vnode *vp;
3590 char buf[512];
3591 uint64_t mflags;
3592 u_int flags;
3593
3594 if (!have_addr) {
3595 /* No address given, print short info about all mount points. */
3596 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3597 db_printf("%p %s on %s (%s)\n", mp,
3598 mp->mnt_stat.f_mntfromname,
3599 mp->mnt_stat.f_mntonname,
3600 mp->mnt_stat.f_fstypename);
3601 if (db_pager_quit)
3602 break;
3603 }
3604 db_printf("\nMore info: show mount <addr>\n");
3605 return;
3606 }
3607
3608 mp = (struct mount *)addr;
3609 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
3610 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
3611
3612 buf[0] = '\0';
3613 mflags = mp->mnt_flag;
3614 #define MNT_FLAG(flag) do { \
3615 if (mflags & (flag)) { \
3616 if (buf[0] != '\0') \
3617 strlcat(buf, ", ", sizeof(buf)); \
3618 strlcat(buf, (#flag) + 4, sizeof(buf)); \
3619 mflags &= ~(flag); \
3620 } \
3621 } while (0)
3622 MNT_FLAG(MNT_RDONLY);
3623 MNT_FLAG(MNT_SYNCHRONOUS);
3624 MNT_FLAG(MNT_NOEXEC);
3625 MNT_FLAG(MNT_NOSUID);
3626 MNT_FLAG(MNT_NFS4ACLS);
3627 MNT_FLAG(MNT_UNION);
3628 MNT_FLAG(MNT_ASYNC);
3629 MNT_FLAG(MNT_SUIDDIR);
3630 MNT_FLAG(MNT_SOFTDEP);
3631 MNT_FLAG(MNT_NOSYMFOLLOW);
3632 MNT_FLAG(MNT_GJOURNAL);
3633 MNT_FLAG(MNT_MULTILABEL);
3634 MNT_FLAG(MNT_ACLS);
3635 MNT_FLAG(MNT_NOATIME);
3636 MNT_FLAG(MNT_NOCLUSTERR);
3637 MNT_FLAG(MNT_NOCLUSTERW);
3638 MNT_FLAG(MNT_SUJ);
3639 MNT_FLAG(MNT_EXRDONLY);
3640 MNT_FLAG(MNT_EXPORTED);
3641 MNT_FLAG(MNT_DEFEXPORTED);
3642 MNT_FLAG(MNT_EXPORTANON);
3643 MNT_FLAG(MNT_EXKERB);
3644 MNT_FLAG(MNT_EXPUBLIC);
3645 MNT_FLAG(MNT_LOCAL);
3646 MNT_FLAG(MNT_QUOTA);
3647 MNT_FLAG(MNT_ROOTFS);
3648 MNT_FLAG(MNT_USER);
3649 MNT_FLAG(MNT_IGNORE);
3650 MNT_FLAG(MNT_UPDATE);
3651 MNT_FLAG(MNT_DELEXPORT);
3652 MNT_FLAG(MNT_RELOAD);
3653 MNT_FLAG(MNT_FORCE);
3654 MNT_FLAG(MNT_SNAPSHOT);
3655 MNT_FLAG(MNT_BYFSID);
3656 #undef MNT_FLAG
3657 if (mflags != 0) {
3658 if (buf[0] != '\0')
3659 strlcat(buf, ", ", sizeof(buf));
3660 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3661 "0x%016jx", mflags);
3662 }
3663 db_printf(" mnt_flag = %s\n", buf);
3664
3665 buf[0] = '\0';
3666 flags = mp->mnt_kern_flag;
3667 #define MNT_KERN_FLAG(flag) do { \
3668 if (flags & (flag)) { \
3669 if (buf[0] != '\0') \
3670 strlcat(buf, ", ", sizeof(buf)); \
3671 strlcat(buf, (#flag) + 5, sizeof(buf)); \
3672 flags &= ~(flag); \
3673 } \
3674 } while (0)
3675 MNT_KERN_FLAG(MNTK_UNMOUNTF);
3676 MNT_KERN_FLAG(MNTK_ASYNC);
3677 MNT_KERN_FLAG(MNTK_SOFTDEP);
3678 MNT_KERN_FLAG(MNTK_NOINSMNTQ);
3679 MNT_KERN_FLAG(MNTK_DRAINING);
3680 MNT_KERN_FLAG(MNTK_REFEXPIRE);
3681 MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
3682 MNT_KERN_FLAG(MNTK_SHARED_WRITES);
3683 MNT_KERN_FLAG(MNTK_NO_IOPF);
3684 MNT_KERN_FLAG(MNTK_VGONE_UPPER);
3685 MNT_KERN_FLAG(MNTK_VGONE_WAITER);
3686 MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT);
3687 MNT_KERN_FLAG(MNTK_MARKER);
3688 MNT_KERN_FLAG(MNTK_USES_BCACHE);
3689 MNT_KERN_FLAG(MNTK_NOASYNC);
3690 MNT_KERN_FLAG(MNTK_UNMOUNT);
3691 MNT_KERN_FLAG(MNTK_MWAIT);
3692 MNT_KERN_FLAG(MNTK_SUSPEND);
3693 MNT_KERN_FLAG(MNTK_SUSPEND2);
3694 MNT_KERN_FLAG(MNTK_SUSPENDED);
3695 MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
3696 MNT_KERN_FLAG(MNTK_NOKNOTE);
3697 #undef MNT_KERN_FLAG
3698 if (flags != 0) {
3699 if (buf[0] != '\0')
3700 strlcat(buf, ", ", sizeof(buf));
3701 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3702 "0x%08x", flags);
3703 }
3704 db_printf(" mnt_kern_flag = %s\n", buf);
3705
3706 db_printf(" mnt_opt = ");
3707 opt = TAILQ_FIRST(mp->mnt_opt);
3708 if (opt != NULL) {
3709 db_printf("%s", opt->name);
3710 opt = TAILQ_NEXT(opt, link);
3711 while (opt != NULL) {
3712 db_printf(", %s", opt->name);
3713 opt = TAILQ_NEXT(opt, link);
3714 }
3715 }
3716 db_printf("\n");
3717
3718 sp = &mp->mnt_stat;
3719 db_printf(" mnt_stat = { version=%u type=%u flags=0x%016jx "
3720 "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
3721 "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
3722 "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
3723 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
3724 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
3725 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
3726 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
3727 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
3728 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
3729 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
3730 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
3731
3732 db_printf(" mnt_cred = { uid=%u ruid=%u",
3733 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
3734 if (jailed(mp->mnt_cred))
3735 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
3736 db_printf(" }\n");
3737 db_printf(" mnt_ref = %d\n", mp->mnt_ref);
3738 db_printf(" mnt_gen = %d\n", mp->mnt_gen);
3739 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
3740 db_printf(" mnt_activevnodelistsize = %d\n",
3741 mp->mnt_activevnodelistsize);
3742 db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount);
3743 db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
3744 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max);
3745 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed);
3746 db_printf(" mnt_lockref = %d\n", mp->mnt_lockref);
3747 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
3748 db_printf(" mnt_secondary_accwrites = %d\n",
3749 mp->mnt_secondary_accwrites);
3750 db_printf(" mnt_gjprovider = %s\n",
3751 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
3752
3753 db_printf("\n\nList of active vnodes\n");
3754 TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) {
3755 if (vp->v_type != VMARKER) {
3756 vn_printf(vp, "vnode ");
3757 if (db_pager_quit)
3758 break;
3759 }
3760 }
3761 db_printf("\n\nList of inactive vnodes\n");
3762 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3763 if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) {
3764 vn_printf(vp, "vnode ");
3765 if (db_pager_quit)
3766 break;
3767 }
3768 }
3769 }
3770 #endif /* DDB */
3771
3772 /*
3773 * Fill in a struct xvfsconf based on a struct vfsconf.
3774 */
3775 static int
3776 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
3777 {
3778 struct xvfsconf xvfsp;
3779
3780 bzero(&xvfsp, sizeof(xvfsp));
3781 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3782 xvfsp.vfc_typenum = vfsp->vfc_typenum;
3783 xvfsp.vfc_refcount = vfsp->vfc_refcount;
3784 xvfsp.vfc_flags = vfsp->vfc_flags;
3785 /*
3786 * These are unused in userland, we keep them
3787 * to not break binary compatibility.
3788 */
3789 xvfsp.vfc_vfsops = NULL;
3790 xvfsp.vfc_next = NULL;
3791 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3792 }
3793
3794 #ifdef COMPAT_FREEBSD32
3795 struct xvfsconf32 {
3796 uint32_t vfc_vfsops;
3797 char vfc_name[MFSNAMELEN];
3798 int32_t vfc_typenum;
3799 int32_t vfc_refcount;
3800 int32_t vfc_flags;
3801 uint32_t vfc_next;
3802 };
3803
3804 static int
3805 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
3806 {
3807 struct xvfsconf32 xvfsp;
3808
3809 bzero(&xvfsp, sizeof(xvfsp));
3810 strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3811 xvfsp.vfc_typenum = vfsp->vfc_typenum;
3812 xvfsp.vfc_refcount = vfsp->vfc_refcount;
3813 xvfsp.vfc_flags = vfsp->vfc_flags;
3814 return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3815 }
3816 #endif
3817
3818 /*
3819 * Top level filesystem related information gathering.
3820 */
3821 static int
3822 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
3823 {
3824 struct vfsconf *vfsp;
3825 int error;
3826
3827 error = 0;
3828 vfsconf_slock();
3829 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3830 #ifdef COMPAT_FREEBSD32
3831 if (req->flags & SCTL_MASK32)
3832 error = vfsconf2x32(req, vfsp);
3833 else
3834 #endif
3835 error = vfsconf2x(req, vfsp);
3836 if (error)
3837 break;
3838 }
3839 vfsconf_sunlock();
3840 return (error);
3841 }
3842
3843 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
3844 CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
3845 "S,xvfsconf", "List of all configured filesystems");
3846
3847 #ifndef BURN_BRIDGES
3848 static int sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
3849
3850 static int
3851 vfs_sysctl(SYSCTL_HANDLER_ARGS)
3852 {
3853 int *name = (int *)arg1 - 1; /* XXX */
3854 u_int namelen = arg2 + 1; /* XXX */
3855 struct vfsconf *vfsp;
3856
3857 log(LOG_WARNING, "userland calling deprecated sysctl, "
3858 "please rebuild world\n");
3859
3860 #if 1 || defined(COMPAT_PRELITE2)
3861 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3862 if (namelen == 1)
3863 return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3864 #endif
3865
3866 switch (name[1]) {
3867 case VFS_MAXTYPENUM:
3868 if (namelen != 2)
3869 return (ENOTDIR);
3870 return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3871 case VFS_CONF:
3872 if (namelen != 3)
3873 return (ENOTDIR); /* overloaded */
3874 vfsconf_slock();
3875 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3876 if (vfsp->vfc_typenum == name[2])
3877 break;
3878 }
3879 vfsconf_sunlock();
3880 if (vfsp == NULL)
3881 return (EOPNOTSUPP);
3882 #ifdef COMPAT_FREEBSD32
3883 if (req->flags & SCTL_MASK32)
3884 return (vfsconf2x32(req, vfsp));
3885 else
3886 #endif
3887 return (vfsconf2x(req, vfsp));
3888 }
3889 return (EOPNOTSUPP);
3890 }
3891
3892 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
3893 CTLFLAG_MPSAFE, vfs_sysctl,
3894 "Generic filesystem");
3895
3896 #if 1 || defined(COMPAT_PRELITE2)
3897
3898 static int
3899 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3900 {
3901 int error;
3902 struct vfsconf *vfsp;
3903 struct ovfsconf ovfs;
3904
3905 vfsconf_slock();
3906 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3907 bzero(&ovfs, sizeof(ovfs));
3908 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */
3909 strcpy(ovfs.vfc_name, vfsp->vfc_name);
3910 ovfs.vfc_index = vfsp->vfc_typenum;
3911 ovfs.vfc_refcount = vfsp->vfc_refcount;
3912 ovfs.vfc_flags = vfsp->vfc_flags;
3913 error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3914 if (error != 0) {
3915 vfsconf_sunlock();
3916 return (error);
3917 }
3918 }
3919 vfsconf_sunlock();
3920 return (0);
3921 }
3922
3923 #endif /* 1 || COMPAT_PRELITE2 */
3924 #endif /* !BURN_BRIDGES */
3925
3926 #define KINFO_VNODESLOP 10
3927 #ifdef notyet
3928 /*
3929 * Dump vnode list (via sysctl).
3930 */
3931 /* ARGSUSED */
3932 static int
3933 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3934 {
3935 struct xvnode *xvn;
3936 struct mount *mp;
3937 struct vnode *vp;
3938 int error, len, n;
3939
3940 /*
3941 * Stale numvnodes access is not fatal here.
3942 */
3943 req->lock = 0;
3944 len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3945 if (!req->oldptr)
3946 /* Make an estimate */
3947 return (SYSCTL_OUT(req, 0, len));
3948
3949 error = sysctl_wire_old_buffer(req, 0);
3950 if (error != 0)
3951 return (error);
3952 xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3953 n = 0;
3954 mtx_lock(&mountlist_mtx);
3955 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3956 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3957 continue;
3958 MNT_ILOCK(mp);
3959 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3960 if (n == len)
3961 break;
3962 vref(vp);
3963 xvn[n].xv_size = sizeof *xvn;
3964 xvn[n].xv_vnode = vp;
3965 xvn[n].xv_id = 0; /* XXX compat */
3966 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3967 XV_COPY(usecount);
3968 XV_COPY(writecount);
3969 XV_COPY(holdcnt);
3970 XV_COPY(mount);
3971 XV_COPY(numoutput);
3972 XV_COPY(type);
3973 #undef XV_COPY
3974 xvn[n].xv_flag = vp->v_vflag;
3975
3976 switch (vp->v_type) {
3977 case VREG:
3978 case VDIR:
3979 case VLNK:
3980 break;
3981 case VBLK:
3982 case VCHR:
3983 if (vp->v_rdev == NULL) {
3984 vrele(vp);
3985 continue;
3986 }
3987 xvn[n].xv_dev = dev2udev(vp->v_rdev);
3988 break;
3989 case VSOCK:
3990 xvn[n].xv_socket = vp->v_socket;
3991 break;
3992 case VFIFO:
3993 xvn[n].xv_fifo = vp->v_fifoinfo;
3994 break;
3995 case VNON:
3996 case VBAD:
3997 default:
3998 /* shouldn't happen? */
3999 vrele(vp);
4000 continue;
4001 }
4002 vrele(vp);
4003 ++n;
4004 }
4005 MNT_IUNLOCK(mp);
4006 mtx_lock(&mountlist_mtx);
4007 vfs_unbusy(mp);
4008 if (n == len)
4009 break;
4010 }
4011 mtx_unlock(&mountlist_mtx);
4012
4013 error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
4014 free(xvn, M_TEMP);
4015 return (error);
4016 }
4017
4018 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD |
4019 CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode",
4020 "");
4021 #endif
4022
4023 static void
4024 unmount_or_warn(struct mount *mp)
4025 {
4026 int error;
4027
4028 error = dounmount(mp, MNT_FORCE, curthread);
4029 if (error != 0) {
4030 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
4031 if (error == EBUSY)
4032 printf("BUSY)\n");
4033 else
4034 printf("%d)\n", error);
4035 }
4036 }
4037
4038 /*
4039 * Unmount all filesystems. The list is traversed in reverse order
4040 * of mounting to avoid dependencies.
4041 */
4042 void
4043 vfs_unmountall(void)
4044 {
4045 struct mount *mp, *tmp;
4046
4047 CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
4048
4049 /*
4050 * Since this only runs when rebooting, it is not interlocked.
4051 */
4052 TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
4053 vfs_ref(mp);
4054
4055 /*
4056 * Forcibly unmounting "/dev" before "/" would prevent clean
4057 * unmount of the latter.
4058 */
4059 if (mp == rootdevmp)
4060 continue;
4061
4062 unmount_or_warn(mp);
4063 }
4064
4065 if (rootdevmp != NULL)
4066 unmount_or_warn(rootdevmp);
4067 }
4068
4069 /*
4070 * perform msync on all vnodes under a mount point
4071 * the mount point must be locked.
4072 */
4073 void
4074 vfs_msync(struct mount *mp, int flags)
4075 {
4076 struct vnode *vp, *mvp;
4077 struct vm_object *obj;
4078
4079 CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
4080
4081 vnlru_return_batch(mp);
4082
4083 MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
4084 obj = vp->v_object;
4085 if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
4086 (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
4087 if (!vget(vp,
4088 LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
4089 curthread)) {
4090 if (vp->v_vflag & VV_NOSYNC) { /* unlinked */
4091 vput(vp);
4092 continue;
4093 }
4094
4095 obj = vp->v_object;
4096 if (obj != NULL) {
4097 VM_OBJECT_WLOCK(obj);
4098 vm_object_page_clean(obj, 0, 0,
4099 flags == MNT_WAIT ?
4100 OBJPC_SYNC : OBJPC_NOSYNC);
4101 VM_OBJECT_WUNLOCK(obj);
4102 }
4103 vput(vp);
4104 }
4105 } else
4106 VI_UNLOCK(vp);
4107 }
4108 }
4109
4110 static void
4111 destroy_vpollinfo_free(struct vpollinfo *vi)
4112 {
4113
4114 knlist_destroy(&vi->vpi_selinfo.si_note);
4115 mtx_destroy(&vi->vpi_lock);
4116 uma_zfree(vnodepoll_zone, vi);
4117 }
4118
4119 static void
4120 destroy_vpollinfo(struct vpollinfo *vi)
4121 {
4122
4123 knlist_clear(&vi->vpi_selinfo.si_note, 1);
4124 seldrain(&vi->vpi_selinfo);
4125 destroy_vpollinfo_free(vi);
4126 }
4127
4128 /*
4129 * Initialize per-vnode helper structure to hold poll-related state.
4130 */
4131 void
4132 v_addpollinfo(struct vnode *vp)
4133 {
4134 struct vpollinfo *vi;
4135
4136 if (vp->v_pollinfo != NULL)
4137 return;
4138 vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO);
4139 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
4140 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
4141 vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked);
4142 VI_LOCK(vp);
4143 if (vp->v_pollinfo != NULL) {
4144 VI_UNLOCK(vp);
4145 destroy_vpollinfo_free(vi);
4146 return;
4147 }
4148 vp->v_pollinfo = vi;
4149 VI_UNLOCK(vp);
4150 }
4151
4152 /*
4153 * Record a process's interest in events which might happen to
4154 * a vnode. Because poll uses the historic select-style interface
4155 * internally, this routine serves as both the ``check for any
4156 * pending events'' and the ``record my interest in future events''
4157 * functions. (These are done together, while the lock is held,
4158 * to avoid race conditions.)
4159 */
4160 int
4161 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
4162 {
4163
4164 v_addpollinfo(vp);
4165 mtx_lock(&vp->v_pollinfo->vpi_lock);
4166 if (vp->v_pollinfo->vpi_revents & events) {
4167 /*
4168 * This leaves events we are not interested
4169 * in available for the other process which
4170 * which presumably had requested them
4171 * (otherwise they would never have been
4172 * recorded).
4173 */
4174 events &= vp->v_pollinfo->vpi_revents;
4175 vp->v_pollinfo->vpi_revents &= ~events;
4176
4177 mtx_unlock(&vp->v_pollinfo->vpi_lock);
4178 return (events);
4179 }
4180 vp->v_pollinfo->vpi_events |= events;
4181 selrecord(td, &vp->v_pollinfo->vpi_selinfo);
4182 mtx_unlock(&vp->v_pollinfo->vpi_lock);
4183 return (0);
4184 }
4185
4186 /*
4187 * Routine to create and manage a filesystem syncer vnode.
4188 */
4189 #define sync_close ((int (*)(struct vop_close_args *))nullop)
4190 static int sync_fsync(struct vop_fsync_args *);
4191 static int sync_inactive(struct vop_inactive_args *);
4192 static int sync_reclaim(struct vop_reclaim_args *);
4193
4194 static struct vop_vector sync_vnodeops = {
4195 .vop_bypass = VOP_EOPNOTSUPP,
4196 .vop_close = sync_close, /* close */
4197 .vop_fsync = sync_fsync, /* fsync */
4198 .vop_inactive = sync_inactive, /* inactive */
4199 .vop_reclaim = sync_reclaim, /* reclaim */
4200 .vop_lock1 = vop_stdlock, /* lock */
4201 .vop_unlock = vop_stdunlock, /* unlock */
4202 .vop_islocked = vop_stdislocked, /* islocked */
4203 };
4204
4205 /*
4206 * Create a new filesystem syncer vnode for the specified mount point.
4207 */
4208 void
4209 vfs_allocate_syncvnode(struct mount *mp)
4210 {
4211 struct vnode *vp;
4212 struct bufobj *bo;
4213 static long start, incr, next;
4214 int error;
4215
4216 /* Allocate a new vnode */
4217 error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
4218 if (error != 0)
4219 panic("vfs_allocate_syncvnode: getnewvnode() failed");
4220 vp->v_type = VNON;
4221 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4222 vp->v_vflag |= VV_FORCEINSMQ;
4223 error = insmntque(vp, mp);
4224 if (error != 0)
4225 panic("vfs_allocate_syncvnode: insmntque() failed");
4226 vp->v_vflag &= ~VV_FORCEINSMQ;
4227 VOP_UNLOCK(vp, 0);
4228 /*
4229 * Place the vnode onto the syncer worklist. We attempt to
4230 * scatter them about on the list so that they will go off
4231 * at evenly distributed times even if all the filesystems
4232 * are mounted at once.
4233 */
4234 next += incr;
4235 if (next == 0 || next > syncer_maxdelay) {
4236 start /= 2;
4237 incr /= 2;
4238 if (start == 0) {
4239 start = syncer_maxdelay / 2;
4240 incr = syncer_maxdelay;
4241 }
4242 next = start;
4243 }
4244 bo = &vp->v_bufobj;
4245 BO_LOCK(bo);
4246 vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
4247 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
4248 mtx_lock(&sync_mtx);
4249 sync_vnode_count++;
4250 if (mp->mnt_syncer == NULL) {
4251 mp->mnt_syncer = vp;
4252 vp = NULL;
4253 }
4254 mtx_unlock(&sync_mtx);
4255 BO_UNLOCK(bo);
4256 if (vp != NULL) {
4257 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4258 vgone(vp);
4259 vput(vp);
4260 }
4261 }
4262
4263 void
4264 vfs_deallocate_syncvnode(struct mount *mp)
4265 {
4266 struct vnode *vp;
4267
4268 mtx_lock(&sync_mtx);
4269 vp = mp->mnt_syncer;
4270 if (vp != NULL)
4271 mp->mnt_syncer = NULL;
4272 mtx_unlock(&sync_mtx);
4273 if (vp != NULL)
4274 vrele(vp);
4275 }
4276
4277 /*
4278 * Do a lazy sync of the filesystem.
4279 */
4280 static int
4281 sync_fsync(struct vop_fsync_args *ap)
4282 {
4283 struct vnode *syncvp = ap->a_vp;
4284 struct mount *mp = syncvp->v_mount;
4285 int error, save;
4286 struct bufobj *bo;
4287
4288 /*
4289 * We only need to do something if this is a lazy evaluation.
4290 */
4291 if (ap->a_waitfor != MNT_LAZY)
4292 return (0);
4293
4294 /*
4295 * Move ourselves to the back of the sync list.
4296 */
4297 bo = &syncvp->v_bufobj;
4298 BO_LOCK(bo);
4299 vn_syncer_add_to_worklist(bo, syncdelay);
4300 BO_UNLOCK(bo);
4301
4302 /*
4303 * Walk the list of vnodes pushing all that are dirty and
4304 * not already on the sync list.
4305 */
4306 if (vfs_busy(mp, MBF_NOWAIT) != 0)
4307 return (0);
4308 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
4309 vfs_unbusy(mp);
4310 return (0);
4311 }
4312 save = curthread_pflags_set(TDP_SYNCIO);
4313 vfs_msync(mp, MNT_NOWAIT);
4314 error = VFS_SYNC(mp, MNT_LAZY);
4315 curthread_pflags_restore(save);
4316 vn_finished_write(mp);
4317 vfs_unbusy(mp);
4318 return (error);
4319 }
4320
4321 /*
4322 * The syncer vnode is no referenced.
4323 */
4324 static int
4325 sync_inactive(struct vop_inactive_args *ap)
4326 {
4327
4328 vgone(ap->a_vp);
4329 return (0);
4330 }
4331
4332 /*
4333 * The syncer vnode is no longer needed and is being decommissioned.
4334 *
4335 * Modifications to the worklist must be protected by sync_mtx.
4336 */
4337 static int
4338 sync_reclaim(struct vop_reclaim_args *ap)
4339 {
4340 struct vnode *vp = ap->a_vp;
4341 struct bufobj *bo;
4342
4343 bo = &vp->v_bufobj;
4344 BO_LOCK(bo);
4345 mtx_lock(&sync_mtx);
4346 if (vp->v_mount->mnt_syncer == vp)
4347 vp->v_mount->mnt_syncer = NULL;
4348 if (bo->bo_flag & BO_ONWORKLST) {
4349 LIST_REMOVE(bo, bo_synclist);
4350 syncer_worklist_len--;
4351 sync_vnode_count--;
4352 bo->bo_flag &= ~BO_ONWORKLST;
4353 }
4354 mtx_unlock(&sync_mtx);
4355 BO_UNLOCK(bo);
4356
4357 return (0);
4358 }
4359
4360 /*
4361 * Check if vnode represents a disk device
4362 */
4363 int
4364 vn_isdisk(struct vnode *vp, int *errp)
4365 {
4366 int error;
4367
4368 if (vp->v_type != VCHR) {
4369 error = ENOTBLK;
4370 goto out;
4371 }
4372 error = 0;
4373 dev_lock();
4374 if (vp->v_rdev == NULL)
4375 error = ENXIO;
4376 else if (vp->v_rdev->si_devsw == NULL)
4377 error = ENXIO;
4378 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
4379 error = ENOTBLK;
4380 dev_unlock();
4381 out:
4382 if (errp != NULL)
4383 *errp = error;
4384 return (error == 0);
4385 }
4386
4387 /*
4388 * Common filesystem object access control check routine. Accepts a
4389 * vnode's type, "mode", uid and gid, requested access mode, credentials,
4390 * and optional call-by-reference privused argument allowing vaccess()
4391 * to indicate to the caller whether privilege was used to satisfy the
4392 * request (obsoleted). Returns 0 on success, or an errno on failure.
4393 */
4394 int
4395 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
4396 accmode_t accmode, struct ucred *cred, int *privused)
4397 {
4398 accmode_t dac_granted;
4399 accmode_t priv_granted;
4400
4401 KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
4402 ("invalid bit in accmode"));
4403 KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
4404 ("VAPPEND without VWRITE"));
4405
4406 /*
4407 * Look for a normal, non-privileged way to access the file/directory
4408 * as requested. If it exists, go with that.
4409 */
4410
4411 if (privused != NULL)
4412 *privused = 0;
4413
4414 dac_granted = 0;
4415
4416 /* Check the owner. */
4417 if (cred->cr_uid == file_uid) {
4418 dac_granted |= VADMIN;
4419 if (file_mode & S_IXUSR)
4420 dac_granted |= VEXEC;
4421 if (file_mode & S_IRUSR)
4422 dac_granted |= VREAD;
4423 if (file_mode & S_IWUSR)
4424 dac_granted |= (VWRITE | VAPPEND);
4425
4426 if ((accmode & dac_granted) == accmode)
4427 return (0);
4428
4429 goto privcheck;
4430 }
4431
4432 /* Otherwise, check the groups (first match) */
4433 if (groupmember(file_gid, cred)) {
4434 if (file_mode & S_IXGRP)
4435 dac_granted |= VEXEC;
4436 if (file_mode & S_IRGRP)
4437 dac_granted |= VREAD;
4438 if (file_mode & S_IWGRP)
4439 dac_granted |= (VWRITE | VAPPEND);
4440
4441 if ((accmode & dac_granted) == accmode)
4442 return (0);
4443
4444 goto privcheck;
4445 }
4446
4447 /* Otherwise, check everyone else. */
4448 if (file_mode & S_IXOTH)
4449 dac_granted |= VEXEC;
4450 if (file_mode & S_IROTH)
4451 dac_granted |= VREAD;
4452 if (file_mode & S_IWOTH)
4453 dac_granted |= (VWRITE | VAPPEND);
4454 if ((accmode & dac_granted) == accmode)
4455 return (0);
4456
4457 privcheck:
4458 /*
4459 * Build a privilege mask to determine if the set of privileges
4460 * satisfies the requirements when combined with the granted mask
4461 * from above. For each privilege, if the privilege is required,
4462 * bitwise or the request type onto the priv_granted mask.
4463 */
4464 priv_granted = 0;
4465
4466 if (type == VDIR) {
4467 /*
4468 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
4469 * requests, instead of PRIV_VFS_EXEC.
4470 */
4471 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4472 !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
4473 priv_granted |= VEXEC;
4474 } else {
4475 /*
4476 * Ensure that at least one execute bit is on. Otherwise,
4477 * a privileged user will always succeed, and we don't want
4478 * this to happen unless the file really is executable.
4479 */
4480 if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4481 (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
4482 !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
4483 priv_granted |= VEXEC;
4484 }
4485
4486 if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
4487 !priv_check_cred(cred, PRIV_VFS_READ, 0))
4488 priv_granted |= VREAD;
4489
4490 if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
4491 !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
4492 priv_granted |= (VWRITE | VAPPEND);
4493
4494 if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
4495 !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
4496 priv_granted |= VADMIN;
4497
4498 if ((accmode & (priv_granted | dac_granted)) == accmode) {
4499 /* XXX audit: privilege used */
4500 if (privused != NULL)
4501 *privused = 1;
4502 return (0);
4503 }
4504
4505 return ((accmode & VADMIN) ? EPERM : EACCES);
4506 }
4507
4508 /*
4509 * Credential check based on process requesting service, and per-attribute
4510 * permissions.
4511 */
4512 int
4513 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
4514 struct thread *td, accmode_t accmode)
4515 {
4516
4517 /*
4518 * Kernel-invoked always succeeds.
4519 */
4520 if (cred == NOCRED)
4521 return (0);
4522
4523 /*
4524 * Do not allow privileged processes in jail to directly manipulate
4525 * system attributes.
4526 */
4527 switch (attrnamespace) {
4528 case EXTATTR_NAMESPACE_SYSTEM:
4529 /* Potentially should be: return (EPERM); */
4530 return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
4531 case EXTATTR_NAMESPACE_USER:
4532 return (VOP_ACCESS(vp, accmode, cred, td));
4533 default:
4534 return (EPERM);
4535 }
4536 }
4537
4538 #ifdef DEBUG_VFS_LOCKS
4539 /*
4540 * This only exists to suppress warnings from unlocked specfs accesses. It is
4541 * no longer ok to have an unlocked VFS.
4542 */
4543 #define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \
4544 (vp)->v_type == VCHR || (vp)->v_type == VBAD)
4545
4546 int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
4547 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
4548 "Drop into debugger on lock violation");
4549
4550 int vfs_badlock_mutex = 1; /* Check for interlock across VOPs. */
4551 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
4552 0, "Check for interlock across VOPs");
4553
4554 int vfs_badlock_print = 1; /* Print lock violations. */
4555 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
4556 0, "Print lock violations");
4557
4558 int vfs_badlock_vnode = 1; /* Print vnode details on lock violations. */
4559 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode,
4560 0, "Print vnode details on lock violations");
4561
4562 #ifdef KDB
4563 int vfs_badlock_backtrace = 1; /* Print backtrace at lock violations. */
4564 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
4565 &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
4566 #endif
4567
4568 static void
4569 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
4570 {
4571
4572 #ifdef KDB
4573 if (vfs_badlock_backtrace)
4574 kdb_backtrace();
4575 #endif
4576 if (vfs_badlock_vnode)
4577 vn_printf(vp, "vnode ");
4578 if (vfs_badlock_print)
4579 printf("%s: %p %s\n", str, (void *)vp, msg);
4580 if (vfs_badlock_ddb)
4581 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4582 }
4583
4584 void
4585 assert_vi_locked(struct vnode *vp, const char *str)
4586 {
4587
4588 if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
4589 vfs_badlock("interlock is not locked but should be", str, vp);
4590 }
4591
4592 void
4593 assert_vi_unlocked(struct vnode *vp, const char *str)
4594 {
4595
4596 if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
4597 vfs_badlock("interlock is locked but should not be", str, vp);
4598 }
4599
4600 void
4601 assert_vop_locked(struct vnode *vp, const char *str)
4602 {
4603 int locked;
4604
4605 if (!IGNORE_LOCK(vp)) {
4606 locked = VOP_ISLOCKED(vp);
4607 if (locked == 0 || locked == LK_EXCLOTHER)
4608 vfs_badlock("is not locked but should be", str, vp);
4609 }
4610 }
4611
4612 void
4613 assert_vop_unlocked(struct vnode *vp, const char *str)
4614 {
4615
4616 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
4617 vfs_badlock("is locked but should not be", str, vp);
4618 }
4619
4620 void
4621 assert_vop_elocked(struct vnode *vp, const char *str)
4622 {
4623
4624 if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
4625 vfs_badlock("is not exclusive locked but should be", str, vp);
4626 }
4627 #endif /* DEBUG_VFS_LOCKS */
4628
4629 void
4630 vop_rename_fail(struct vop_rename_args *ap)
4631 {
4632
4633 if (ap->a_tvp != NULL)
4634 vput(ap->a_tvp);
4635 if (ap->a_tdvp == ap->a_tvp)
4636 vrele(ap->a_tdvp);
4637 else
4638 vput(ap->a_tdvp);
4639 vrele(ap->a_fdvp);
4640 vrele(ap->a_fvp);
4641 }
4642
4643 void
4644 vop_rename_pre(void *ap)
4645 {
4646 struct vop_rename_args *a = ap;
4647
4648 #ifdef DEBUG_VFS_LOCKS
4649 if (a->a_tvp)
4650 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
4651 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
4652 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
4653 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
4654
4655 /* Check the source (from). */
4656 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
4657 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
4658 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
4659 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
4660 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
4661
4662 /* Check the target. */
4663 if (a->a_tvp)
4664 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
4665 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
4666 #endif
4667 if (a->a_tdvp != a->a_fdvp)
4668 vhold(a->a_fdvp);
4669 if (a->a_tvp != a->a_fvp)
4670 vhold(a->a_fvp);
4671 vhold(a->a_tdvp);
4672 if (a->a_tvp)
4673 vhold(a->a_tvp);
4674 }
4675
4676 #ifdef DEBUG_VFS_LOCKS
4677 void
4678 vop_strategy_pre(void *ap)
4679 {
4680 struct vop_strategy_args *a;
4681 struct buf *bp;
4682
4683 a = ap;
4684 bp = a->a_bp;
4685
4686 /*
4687 * Cluster ops lock their component buffers but not the IO container.
4688 */
4689 if ((bp->b_flags & B_CLUSTER) != 0)
4690 return;
4691
4692 if (panicstr == NULL && !BUF_ISLOCKED(bp)) {
4693 if (vfs_badlock_print)
4694 printf(
4695 "VOP_STRATEGY: bp is not locked but should be\n");
4696 if (vfs_badlock_ddb)
4697 kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4698 }
4699 }
4700
4701 void
4702 vop_lock_pre(void *ap)
4703 {
4704 struct vop_lock1_args *a = ap;
4705
4706 if ((a->a_flags & LK_INTERLOCK) == 0)
4707 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4708 else
4709 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
4710 }
4711
4712 void
4713 vop_lock_post(void *ap, int rc)
4714 {
4715 struct vop_lock1_args *a = ap;
4716
4717 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4718 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
4719 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
4720 }
4721
4722 void
4723 vop_unlock_pre(void *ap)
4724 {
4725 struct vop_unlock_args *a = ap;
4726
4727 if (a->a_flags & LK_INTERLOCK)
4728 ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
4729 ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
4730 }
4731
4732 void
4733 vop_unlock_post(void *ap, int rc)
4734 {
4735 struct vop_unlock_args *a = ap;
4736
4737 if (a->a_flags & LK_INTERLOCK)
4738 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
4739 }
4740 #endif
4741
4742 void
4743 vop_create_post(void *ap, int rc)
4744 {
4745 struct vop_create_args *a = ap;
4746
4747 if (!rc)
4748 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4749 }
4750
4751 void
4752 vop_deleteextattr_post(void *ap, int rc)
4753 {
4754 struct vop_deleteextattr_args *a = ap;
4755
4756 if (!rc)
4757 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4758 }
4759
4760 void
4761 vop_link_post(void *ap, int rc)
4762 {
4763 struct vop_link_args *a = ap;
4764
4765 if (!rc) {
4766 VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
4767 VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
4768 }
4769 }
4770
4771 void
4772 vop_mkdir_post(void *ap, int rc)
4773 {
4774 struct vop_mkdir_args *a = ap;
4775
4776 if (!rc)
4777 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4778 }
4779
4780 void
4781 vop_mknod_post(void *ap, int rc)
4782 {
4783 struct vop_mknod_args *a = ap;
4784
4785 if (!rc)
4786 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4787 }
4788
4789 void
4790 vop_reclaim_post(void *ap, int rc)
4791 {
4792 struct vop_reclaim_args *a = ap;
4793
4794 if (!rc)
4795 VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE);
4796 }
4797
4798 void
4799 vop_remove_post(void *ap, int rc)
4800 {
4801 struct vop_remove_args *a = ap;
4802
4803 if (!rc) {
4804 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4805 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4806 }
4807 }
4808
4809 void
4810 vop_rename_post(void *ap, int rc)
4811 {
4812 struct vop_rename_args *a = ap;
4813 long hint;
4814
4815 if (!rc) {
4816 hint = NOTE_WRITE;
4817 if (a->a_fdvp == a->a_tdvp) {
4818 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
4819 hint |= NOTE_LINK;
4820 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4821 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4822 } else {
4823 hint |= NOTE_EXTEND;
4824 if (a->a_fvp->v_type == VDIR)
4825 hint |= NOTE_LINK;
4826 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4827
4828 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
4829 a->a_tvp->v_type == VDIR)
4830 hint &= ~NOTE_LINK;
4831 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4832 }
4833
4834 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
4835 if (a->a_tvp)
4836 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
4837 }
4838 if (a->a_tdvp != a->a_fdvp)
4839 vdrop(a->a_fdvp);
4840 if (a->a_tvp != a->a_fvp)
4841 vdrop(a->a_fvp);
4842 vdrop(a->a_tdvp);
4843 if (a->a_tvp)
4844 vdrop(a->a_tvp);
4845 }
4846
4847 void
4848 vop_rmdir_post(void *ap, int rc)
4849 {
4850 struct vop_rmdir_args *a = ap;
4851
4852 if (!rc) {
4853 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4854 VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4855 }
4856 }
4857
4858 void
4859 vop_setattr_post(void *ap, int rc)
4860 {
4861 struct vop_setattr_args *a = ap;
4862
4863 if (!rc)
4864 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4865 }
4866
4867 void
4868 vop_setextattr_post(void *ap, int rc)
4869 {
4870 struct vop_setextattr_args *a = ap;
4871
4872 if (!rc)
4873 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4874 }
4875
4876 void
4877 vop_symlink_post(void *ap, int rc)
4878 {
4879 struct vop_symlink_args *a = ap;
4880
4881 if (!rc)
4882 VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4883 }
4884
4885 void
4886 vop_open_post(void *ap, int rc)
4887 {
4888 struct vop_open_args *a = ap;
4889
4890 if (!rc)
4891 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN);
4892 }
4893
4894 void
4895 vop_close_post(void *ap, int rc)
4896 {
4897 struct vop_close_args *a = ap;
4898
4899 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
4900 (a->a_vp->v_iflag & VI_DOOMED) == 0)) {
4901 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
4902 NOTE_CLOSE_WRITE : NOTE_CLOSE);
4903 }
4904 }
4905
4906 void
4907 vop_read_post(void *ap, int rc)
4908 {
4909 struct vop_read_args *a = ap;
4910
4911 if (!rc)
4912 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
4913 }
4914
4915 void
4916 vop_readdir_post(void *ap, int rc)
4917 {
4918 struct vop_readdir_args *a = ap;
4919
4920 if (!rc)
4921 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
4922 }
4923
4924 static struct knlist fs_knlist;
4925
4926 static void
4927 vfs_event_init(void *arg)
4928 {
4929 knlist_init_mtx(&fs_knlist, NULL);
4930 }
4931 /* XXX - correct order? */
4932 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
4933
4934 void
4935 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
4936 {
4937
4938 KNOTE_UNLOCKED(&fs_knlist, event);
4939 }
4940
4941 static int filt_fsattach(struct knote *kn);
4942 static void filt_fsdetach(struct knote *kn);
4943 static int filt_fsevent(struct knote *kn, long hint);
4944
4945 struct filterops fs_filtops = {
4946 .f_isfd = 0,
4947 .f_attach = filt_fsattach,
4948 .f_detach = filt_fsdetach,
4949 .f_event = filt_fsevent
4950 };
4951
4952 static int
4953 filt_fsattach(struct knote *kn)
4954 {
4955
4956 kn->kn_flags |= EV_CLEAR;
4957 knlist_add(&fs_knlist, kn, 0);
4958 return (0);
4959 }
4960
4961 static void
4962 filt_fsdetach(struct knote *kn)
4963 {
4964
4965 knlist_remove(&fs_knlist, kn, 0);
4966 }
4967
4968 static int
4969 filt_fsevent(struct knote *kn, long hint)
4970 {
4971
4972 kn->kn_fflags |= hint;
4973 return (kn->kn_fflags != 0);
4974 }
4975
4976 static int
4977 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4978 {
4979 struct vfsidctl vc;
4980 int error;
4981 struct mount *mp;
4982
4983 error = SYSCTL_IN(req, &vc, sizeof(vc));
4984 if (error)
4985 return (error);
4986 if (vc.vc_vers != VFS_CTL_VERS1)
4987 return (EINVAL);
4988 mp = vfs_getvfs(&vc.vc_fsid);
4989 if (mp == NULL)
4990 return (ENOENT);
4991 /* ensure that a specific sysctl goes to the right filesystem. */
4992 if (strcmp(vc.vc_fstypename, "*") != 0 &&
4993 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4994 vfs_rel(mp);
4995 return (EINVAL);
4996 }
4997 VCTLTOREQ(&vc, req);
4998 error = VFS_SYSCTL(mp, vc.vc_op, req);
4999 vfs_rel(mp);
5000 return (error);
5001 }
5002
5003 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR,
5004 NULL, 0, sysctl_vfs_ctl, "",
5005 "Sysctl by fsid");
5006
5007 /*
5008 * Function to initialize a va_filerev field sensibly.
5009 * XXX: Wouldn't a random number make a lot more sense ??
5010 */
5011 u_quad_t
5012 init_va_filerev(void)
5013 {
5014 struct bintime bt;
5015
5016 getbinuptime(&bt);
5017 return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
5018 }
5019
5020 static int filt_vfsread(struct knote *kn, long hint);
5021 static int filt_vfswrite(struct knote *kn, long hint);
5022 static int filt_vfsvnode(struct knote *kn, long hint);
5023 static void filt_vfsdetach(struct knote *kn);
5024 static struct filterops vfsread_filtops = {
5025 .f_isfd = 1,
5026 .f_detach = filt_vfsdetach,
5027 .f_event = filt_vfsread
5028 };
5029 static struct filterops vfswrite_filtops = {
5030 .f_isfd = 1,
5031 .f_detach = filt_vfsdetach,
5032 .f_event = filt_vfswrite
5033 };
5034 static struct filterops vfsvnode_filtops = {
5035 .f_isfd = 1,
5036 .f_detach = filt_vfsdetach,
5037 .f_event = filt_vfsvnode
5038 };
5039
5040 static void
5041 vfs_knllock(void *arg)
5042 {
5043 struct vnode *vp = arg;
5044
5045 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5046 }
5047
5048 static void
5049 vfs_knlunlock(void *arg)
5050 {
5051 struct vnode *vp = arg;
5052
5053 VOP_UNLOCK(vp, 0);
5054 }
5055
5056 static void
5057 vfs_knl_assert_locked(void *arg)
5058 {
5059 #ifdef DEBUG_VFS_LOCKS
5060 struct vnode *vp = arg;
5061
5062 ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
5063 #endif
5064 }
5065
5066 static void
5067 vfs_knl_assert_unlocked(void *arg)
5068 {
5069 #ifdef DEBUG_VFS_LOCKS
5070 struct vnode *vp = arg;
5071
5072 ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
5073 #endif
5074 }
5075
5076 int
5077 vfs_kqfilter(struct vop_kqfilter_args *ap)
5078 {
5079 struct vnode *vp = ap->a_vp;
5080 struct knote *kn = ap->a_kn;
5081 struct knlist *knl;
5082
5083 switch (kn->kn_filter) {
5084 case EVFILT_READ:
5085 kn->kn_fop = &vfsread_filtops;
5086 break;
5087 case EVFILT_WRITE:
5088 kn->kn_fop = &vfswrite_filtops;
5089 break;
5090 case EVFILT_VNODE:
5091 kn->kn_fop = &vfsvnode_filtops;
5092 break;
5093 default:
5094 return (EINVAL);
5095 }
5096
5097 kn->kn_hook = (caddr_t)vp;
5098
5099 v_addpollinfo(vp);
5100 if (vp->v_pollinfo == NULL)
5101 return (ENOMEM);
5102 knl = &vp->v_pollinfo->vpi_selinfo.si_note;
5103 vhold(vp);
5104 knlist_add(knl, kn, 0);
5105
5106 return (0);
5107 }
5108
5109 /*
5110 * Detach knote from vnode
5111 */
5112 static void
5113 filt_vfsdetach(struct knote *kn)
5114 {
5115 struct vnode *vp = (struct vnode *)kn->kn_hook;
5116
5117 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
5118 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
5119 vdrop(vp);
5120 }
5121
5122 /*ARGSUSED*/
5123 static int
5124 filt_vfsread(struct knote *kn, long hint)
5125 {
5126 struct vnode *vp = (struct vnode *)kn->kn_hook;
5127 struct vattr va;
5128 int res;
5129
5130 /*
5131 * filesystem is gone, so set the EOF flag and schedule
5132 * the knote for deletion.
5133 */
5134 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
5135 VI_LOCK(vp);
5136 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
5137 VI_UNLOCK(vp);
5138 return (1);
5139 }
5140
5141 if (VOP_GETATTR(vp, &va, curthread->td_ucred))
5142 return (0);
5143
5144 VI_LOCK(vp);
5145 kn->kn_data = va.va_size - kn->kn_fp->f_offset;
5146 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
5147 VI_UNLOCK(vp);
5148 return (res);
5149 }
5150
5151 /*ARGSUSED*/
5152 static int
5153 filt_vfswrite(struct knote *kn, long hint)
5154 {
5155 struct vnode *vp = (struct vnode *)kn->kn_hook;
5156
5157 VI_LOCK(vp);
5158
5159 /*
5160 * filesystem is gone, so set the EOF flag and schedule
5161 * the knote for deletion.
5162 */
5163 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
5164 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
5165
5166 kn->kn_data = 0;
5167 VI_UNLOCK(vp);
5168 return (1);
5169 }
5170
5171 static int
5172 filt_vfsvnode(struct knote *kn, long hint)
5173 {
5174 struct vnode *vp = (struct vnode *)kn->kn_hook;
5175 int res;
5176
5177 VI_LOCK(vp);
5178 if (kn->kn_sfflags & hint)
5179 kn->kn_fflags |= hint;
5180 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
5181 kn->kn_flags |= EV_EOF;
5182 VI_UNLOCK(vp);
5183 return (1);
5184 }
5185 res = (kn->kn_fflags != 0);
5186 VI_UNLOCK(vp);
5187 return (res);
5188 }
5189
5190 int
5191 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
5192 {
5193 int error;
5194
5195 if (dp->d_reclen > ap->a_uio->uio_resid)
5196 return (ENAMETOOLONG);
5197 error = uiomove(dp, dp->d_reclen, ap->a_uio);
5198 if (error) {
5199 if (ap->a_ncookies != NULL) {
5200 if (ap->a_cookies != NULL)
5201 free(ap->a_cookies, M_TEMP);
5202 ap->a_cookies = NULL;
5203 *ap->a_ncookies = 0;
5204 }
5205 return (error);
5206 }
5207 if (ap->a_ncookies == NULL)
5208 return (0);
5209
5210 KASSERT(ap->a_cookies,
5211 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
5212
5213 *ap->a_cookies = realloc(*ap->a_cookies,
5214 (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
5215 (*ap->a_cookies)[*ap->a_ncookies] = off;
5216 *ap->a_ncookies += 1;
5217 return (0);
5218 }
5219
5220 /*
5221 * Mark for update the access time of the file if the filesystem
5222 * supports VOP_MARKATIME. This functionality is used by execve and
5223 * mmap, so we want to avoid the I/O implied by directly setting
5224 * va_atime for the sake of efficiency.
5225 */
5226 void
5227 vfs_mark_atime(struct vnode *vp, struct ucred *cred)
5228 {
5229 struct mount *mp;
5230
5231 mp = vp->v_mount;
5232 ASSERT_VOP_LOCKED(vp, "vfs_mark_atime");
5233 if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
5234 (void)VOP_MARKATIME(vp);
5235 }
5236
5237 /*
5238 * The purpose of this routine is to remove granularity from accmode_t,
5239 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
5240 * VADMIN and VAPPEND.
5241 *
5242 * If it returns 0, the caller is supposed to continue with the usual
5243 * access checks using 'accmode' as modified by this routine. If it
5244 * returns nonzero value, the caller is supposed to return that value
5245 * as errno.
5246 *
5247 * Note that after this routine runs, accmode may be zero.
5248 */
5249 int
5250 vfs_unixify_accmode(accmode_t *accmode)
5251 {
5252 /*
5253 * There is no way to specify explicit "deny" rule using
5254 * file mode or POSIX.1e ACLs.
5255 */
5256 if (*accmode & VEXPLICIT_DENY) {
5257 *accmode = 0;
5258 return (0);
5259 }
5260
5261 /*
5262 * None of these can be translated into usual access bits.
5263 * Also, the common case for NFSv4 ACLs is to not contain
5264 * either of these bits. Caller should check for VWRITE
5265 * on the containing directory instead.
5266 */
5267 if (*accmode & (VDELETE_CHILD | VDELETE))
5268 return (EPERM);
5269
5270 if (*accmode & VADMIN_PERMS) {
5271 *accmode &= ~VADMIN_PERMS;
5272 *accmode |= VADMIN;
5273 }
5274
5275 /*
5276 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
5277 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
5278 */
5279 *accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
5280
5281 return (0);
5282 }
5283
5284 /*
5285 * These are helper functions for filesystems to traverse all
5286 * their vnodes. See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
5287 *
5288 * This interface replaces MNT_VNODE_FOREACH.
5289 */
5290
5291 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
5292
5293 struct vnode *
5294 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
5295 {
5296 struct vnode *vp;
5297
5298 if (should_yield())
5299 kern_yield(PRI_USER);
5300 MNT_ILOCK(mp);
5301 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5302 for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
5303 vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
5304 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */
5305 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)
5306 continue;
5307 VI_LOCK(vp);
5308 if ((vp->v_iflag & VI_DOOMED) != 0) {
5309 VI_UNLOCK(vp);
5310 continue;
5311 }
5312 break;
5313 }
5314 if (vp == NULL) {
5315 __mnt_vnode_markerfree_all(mvp, mp);
5316 /* MNT_IUNLOCK(mp); -- done in above function */
5317 mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
5318 return (NULL);
5319 }
5320 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5321 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5322 MNT_IUNLOCK(mp);
5323 return (vp);
5324 }
5325
5326 struct vnode *
5327 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
5328 {
5329 struct vnode *vp;
5330
5331 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5332 MNT_ILOCK(mp);
5333 MNT_REF(mp);
5334 (*mvp)->v_mount = mp;
5335 (*mvp)->v_type = VMARKER;
5336
5337 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
5338 /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */
5339 if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0)
5340 continue;
5341 VI_LOCK(vp);
5342 if ((vp->v_iflag & VI_DOOMED) != 0) {
5343 VI_UNLOCK(vp);
5344 continue;
5345 }
5346 break;
5347 }
5348 if (vp == NULL) {
5349 MNT_REL(mp);
5350 MNT_IUNLOCK(mp);
5351 free(*mvp, M_VNODE_MARKER);
5352 *mvp = NULL;
5353 return (NULL);
5354 }
5355 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5356 MNT_IUNLOCK(mp);
5357 return (vp);
5358 }
5359
5360 void
5361 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
5362 {
5363
5364 if (*mvp == NULL) {
5365 MNT_IUNLOCK(mp);
5366 return;
5367 }
5368
5369 mtx_assert(MNT_MTX(mp), MA_OWNED);
5370
5371 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5372 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5373 MNT_REL(mp);
5374 MNT_IUNLOCK(mp);
5375 free(*mvp, M_VNODE_MARKER);
5376 *mvp = NULL;
5377 }
5378
5379 /*
5380 * These are helper functions for filesystems to traverse their
5381 * active vnodes. See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h
5382 */
5383 static void
5384 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5385 {
5386
5387 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5388
5389 MNT_ILOCK(mp);
5390 MNT_REL(mp);
5391 MNT_IUNLOCK(mp);
5392 free(*mvp, M_VNODE_MARKER);
5393 *mvp = NULL;
5394 }
5395
5396 /*
5397 * Relock the mp mount vnode list lock with the vp vnode interlock in the
5398 * conventional lock order during mnt_vnode_next_active iteration.
5399 *
5400 * On entry, the mount vnode list lock is held and the vnode interlock is not.
5401 * The list lock is dropped and reacquired. On success, both locks are held.
5402 * On failure, the mount vnode list lock is held but the vnode interlock is
5403 * not, and the procedure may have yielded.
5404 */
5405 static bool
5406 mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp,
5407 struct vnode *vp)
5408 {
5409 const struct vnode *tmp;
5410 bool held, ret;
5411
5412 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER &&
5413 TAILQ_NEXT(mvp, v_actfreelist) != NULL, mvp,
5414 ("%s: bad marker", __func__));
5415 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
5416 ("%s: inappropriate vnode", __func__));
5417 ASSERT_VI_UNLOCKED(vp, __func__);
5418 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
5419
5420 ret = false;
5421
5422 TAILQ_REMOVE(&mp->mnt_activevnodelist, mvp, v_actfreelist);
5423 TAILQ_INSERT_BEFORE(vp, mvp, v_actfreelist);
5424
5425 /*
5426 * Use a hold to prevent vp from disappearing while the mount vnode
5427 * list lock is dropped and reacquired. Normally a hold would be
5428 * acquired with vhold(), but that might try to acquire the vnode
5429 * interlock, which would be a LOR with the mount vnode list lock.
5430 */
5431 held = refcount_acquire_if_not_zero(&vp->v_holdcnt);
5432 mtx_unlock(&mp->mnt_listmtx);
5433 if (!held)
5434 goto abort;
5435 VI_LOCK(vp);
5436 if (!refcount_release_if_not_last(&vp->v_holdcnt)) {
5437 vdropl(vp);
5438 goto abort;
5439 }
5440 mtx_lock(&mp->mnt_listmtx);
5441
5442 /*
5443 * Determine whether the vnode is still the next one after the marker,
5444 * excepting any other markers. If the vnode has not been doomed by
5445 * vgone() then the hold should have ensured that it remained on the
5446 * active list. If it has been doomed but is still on the active list,
5447 * don't abort, but rather skip over it (avoid spinning on doomed
5448 * vnodes).
5449 */
5450 tmp = mvp;
5451 do {
5452 tmp = TAILQ_NEXT(tmp, v_actfreelist);
5453 } while (tmp != NULL && tmp->v_type == VMARKER);
5454 if (tmp != vp) {
5455 mtx_unlock(&mp->mnt_listmtx);
5456 VI_UNLOCK(vp);
5457 goto abort;
5458 }
5459
5460 ret = true;
5461 goto out;
5462 abort:
5463 maybe_yield();
5464 mtx_lock(&mp->mnt_listmtx);
5465 out:
5466 if (ret)
5467 ASSERT_VI_LOCKED(vp, __func__);
5468 else
5469 ASSERT_VI_UNLOCKED(vp, __func__);
5470 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
5471 return (ret);
5472 }
5473
5474 static struct vnode *
5475 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5476 {
5477 struct vnode *vp, *nvp;
5478
5479 mtx_assert(&mp->mnt_listmtx, MA_OWNED);
5480 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5481 restart:
5482 vp = TAILQ_NEXT(*mvp, v_actfreelist);
5483 while (vp != NULL) {
5484 if (vp->v_type == VMARKER) {
5485 vp = TAILQ_NEXT(vp, v_actfreelist);
5486 continue;
5487 }
5488 /*
5489 * Try-lock because this is the wrong lock order. If that does
5490 * not succeed, drop the mount vnode list lock and try to
5491 * reacquire it and the vnode interlock in the right order.
5492 */
5493 if (!VI_TRYLOCK(vp) &&
5494 !mnt_vnode_next_active_relock(*mvp, mp, vp))
5495 goto restart;
5496 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
5497 KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
5498 ("alien vnode on the active list %p %p", vp, mp));
5499 if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0)
5500 break;
5501 nvp = TAILQ_NEXT(vp, v_actfreelist);
5502 VI_UNLOCK(vp);
5503 vp = nvp;
5504 }
5505 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5506
5507 /* Check if we are done */
5508 if (vp == NULL) {
5509 mtx_unlock(&mp->mnt_listmtx);
5510 mnt_vnode_markerfree_active(mvp, mp);
5511 return (NULL);
5512 }
5513 TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
5514 mtx_unlock(&mp->mnt_listmtx);
5515 ASSERT_VI_LOCKED(vp, "active iter");
5516 KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
5517 return (vp);
5518 }
5519
5520 struct vnode *
5521 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5522 {
5523
5524 if (should_yield())
5525 kern_yield(PRI_USER);
5526 mtx_lock(&mp->mnt_listmtx);
5527 return (mnt_vnode_next_active(mvp, mp));
5528 }
5529
5530 struct vnode *
5531 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
5532 {
5533 struct vnode *vp;
5534
5535 *mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5536 MNT_ILOCK(mp);
5537 MNT_REF(mp);
5538 MNT_IUNLOCK(mp);
5539 (*mvp)->v_type = VMARKER;
5540 (*mvp)->v_mount = mp;
5541
5542 mtx_lock(&mp->mnt_listmtx);
5543 vp = TAILQ_FIRST(&mp->mnt_activevnodelist);
5544 if (vp == NULL) {
5545 mtx_unlock(&mp->mnt_listmtx);
5546 mnt_vnode_markerfree_active(mvp, mp);
5547 return (NULL);
5548 }
5549 TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
5550 return (mnt_vnode_next_active(mvp, mp));
5551 }
5552
5553 void
5554 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5555 {
5556
5557 if (*mvp == NULL)
5558 return;
5559
5560 mtx_lock(&mp->mnt_listmtx);
5561 TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5562 mtx_unlock(&mp->mnt_listmtx);
5563 mnt_vnode_markerfree_active(mvp, mp);
5564 }
Cache object: f8bd1e3a692fbec170579da76ee5e697
|