FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_cache.c
1 /*-
2 * Copyright (c) 1989, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/8.2/sys/kern/vfs_cache.c 215629 2010-11-21 12:47:54Z brucec $");
37
38 #include "opt_kdtrace.h"
39 #include "opt_ktrace.h"
40
41 #include <sys/param.h>
42 #include <sys/filedesc.h>
43 #include <sys/fnv_hash.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/namei.h>
49 #include <sys/proc.h>
50 #include <sys/rwlock.h>
51 #include <sys/sdt.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/sysctl.h>
54 #include <sys/sysproto.h>
55 #include <sys/systm.h>
56 #include <sys/vnode.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60
61 #include <vm/uma.h>
62
63 SDT_PROVIDER_DECLARE(vfs);
64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *",
65 "struct vnode *");
66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *",
67 "char *");
68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *");
69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *",
70 "struct char *", "struct vnode *");
71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *");
72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", "struct vnode *",
73 "struct char *");
74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *",
75 "struct vnode *");
76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, "struct vnode *",
77 "char *");
78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *",
79 "char *");
80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *");
81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *");
82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *");
83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *",
84 "struct vnode *");
85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *",
86 "char *");
87
88 /*
89 * This structure describes the elements in the cache of recent
90 * names looked up by namei.
91 */
92
93 struct namecache {
94 LIST_ENTRY(namecache) nc_hash; /* hash chain */
95 LIST_ENTRY(namecache) nc_src; /* source vnode list */
96 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
97 struct vnode *nc_dvp; /* vnode of parent of name */
98 struct vnode *nc_vp; /* vnode the name refers to */
99 u_char nc_flag; /* flag bits */
100 u_char nc_nlen; /* length of name */
101 char nc_name[0]; /* segment name + nul */
102 };
103
104 /*
105 * Name caching works as follows:
106 *
107 * Names found by directory scans are retained in a cache
108 * for future reference. It is managed LRU, so frequently
109 * used names will hang around. Cache is indexed by hash value
110 * obtained from (vp, name) where vp refers to the directory
111 * containing name.
112 *
113 * If it is a "negative" entry, (i.e. for a name that is known NOT to
114 * exist) the vnode pointer will be NULL.
115 *
116 * Upon reaching the last segment of a path, if the reference
117 * is for DELETE, or NOCACHE is set (rewrite), and the
118 * name is located in the cache, it will be dropped.
119 */
120
121 /*
122 * Structures associated with name cacheing.
123 */
124 #define NCHHASH(hash) \
125 (&nchashtbl[(hash) & nchash])
126 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
127 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
128 static u_long nchash; /* size of hash table */
129 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
130 "Size of namecache hash table");
131 static u_long ncnegfactor = 16; /* ratio of negative entries */
132 /* _debug sysctl left for backward compatibility */
133 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
134 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0,
135 "Ratio of negative namecache entries");
136 static u_long numneg; /* number of negative entries allocated */
137 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0,
138 "Number of negative entries in namecache");
139 static u_long numcache; /* number of cache entries allocated */
140 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0,
141 "Number of namecache entries");
142 static u_long numcachehv; /* number of cache entries with vnodes held */
143 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0,
144 "Number of namecache entries with vnodes held");
145 static u_int ncsizefactor = 2;
146 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0,
147 "Size factor for namecache");
148
149 struct nchstats nchstats; /* cache effectiveness statistics */
150
151 static struct rwlock cache_lock;
152 RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
153
154 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock)
155 #define CACHE_RLOCK() rw_rlock(&cache_lock)
156 #define CACHE_RUNLOCK() rw_runlock(&cache_lock)
157 #define CACHE_WLOCK() rw_wlock(&cache_lock)
158 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock)
159
160 /*
161 * UMA zones for the VFS cache.
162 *
163 * The small cache is used for entries with short names, which are the
164 * most common. The large cache is used for entries which are too big to
165 * fit in the small cache.
166 */
167 static uma_zone_t cache_zone_small;
168 static uma_zone_t cache_zone_large;
169
170 #define CACHE_PATH_CUTOFF 35
171 #define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF \
172 + 1)
173 #define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX + 1)
174
175 #define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
176 cache_zone_small : cache_zone_large, M_WAITOK)
177 #define cache_free(ncp) do { \
178 if (ncp != NULL) \
179 uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
180 cache_zone_small : cache_zone_large, (ncp)); \
181 } while (0)
182
183 static int doingcache = 1; /* 1 => enable the cache */
184 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
185 "VFS namecache enabled");
186
187 /* Export size information to userland */
188 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
189 sizeof(struct namecache), "sizeof(struct namecache)");
190
191 /*
192 * The new name cache statistics
193 */
194 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0,
195 "Name cache statistics");
196 #define STATNODE(mode, name, var, descr) \
197 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr);
198 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries");
199 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries");
200 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls,
201 "Number of cache lookups");
202 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits,
203 "Number of '.' hits");
204 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits,
205 "Number of '..' hits");
206 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks,
207 "Number of checks in lookup");
208 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss,
209 "Number of cache misses");
210 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap,
211 "Number of cache misses we do not want to cache");
212 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps,
213 "Number of cache hits (positive) we do not want to cache");
214 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits,
215 "Number of cache hits (positive)");
216 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps,
217 "Number of cache hits (negative) we do not want to cache");
218 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits,
219 "Number of cache hits (negative)");
220 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades,
221 "Number of updates of the cache after lookup (write lock + retry)");
222
223 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE,
224 &nchstats, sizeof(nchstats), "LU",
225 "VFS cache effectiveness statistics");
226
227
228
229 static void cache_zap(struct namecache *ncp);
230 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
231 u_int *buflen);
232 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
233 char *buf, char **retbuf, u_int buflen);
234
235 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
236
237 /*
238 * Flags in namecache.nc_flag
239 */
240 #define NCF_WHITE 0x01
241 #define NCF_ISDOTDOT 0x02
242
243 #ifdef DIAGNOSTIC
244 /*
245 * Grab an atomic snapshot of the name cache hash chain lengths
246 */
247 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
248
249 static int
250 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
251 {
252 int error;
253 struct nchashhead *ncpp;
254 struct namecache *ncp;
255 int n_nchash;
256 int count;
257
258 n_nchash = nchash + 1; /* nchash is max index, not count */
259 if (!req->oldptr)
260 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
261
262 /* Scan hash tables for applicable entries */
263 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
264 CACHE_RLOCK();
265 count = 0;
266 LIST_FOREACH(ncp, ncpp, nc_hash) {
267 count++;
268 }
269 CACHE_RUNLOCK();
270 error = SYSCTL_OUT(req, &count, sizeof(count));
271 if (error)
272 return (error);
273 }
274 return (0);
275 }
276 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD|
277 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int",
278 "nchash chain lengths");
279
280 static int
281 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
282 {
283 int error;
284 struct nchashhead *ncpp;
285 struct namecache *ncp;
286 int n_nchash;
287 int count, maxlength, used, pct;
288
289 if (!req->oldptr)
290 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
291
292 n_nchash = nchash + 1; /* nchash is max index, not count */
293 used = 0;
294 maxlength = 0;
295
296 /* Scan hash tables for applicable entries */
297 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
298 count = 0;
299 CACHE_RLOCK();
300 LIST_FOREACH(ncp, ncpp, nc_hash) {
301 count++;
302 }
303 CACHE_RUNLOCK();
304 if (count)
305 used++;
306 if (maxlength < count)
307 maxlength = count;
308 }
309 n_nchash = nchash + 1;
310 pct = (used * 100 * 100) / n_nchash;
311 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
312 if (error)
313 return (error);
314 error = SYSCTL_OUT(req, &used, sizeof(used));
315 if (error)
316 return (error);
317 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
318 if (error)
319 return (error);
320 error = SYSCTL_OUT(req, &pct, sizeof(pct));
321 if (error)
322 return (error);
323 return (0);
324 }
325 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD|
326 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I",
327 "nchash chain lengths");
328 #endif
329
330 /*
331 * cache_zap():
332 *
333 * Removes a namecache entry from cache, whether it contains an actual
334 * pointer to a vnode or if it is just a negative cache entry.
335 */
336 static void
337 cache_zap(ncp)
338 struct namecache *ncp;
339 {
340 struct vnode *vp;
341
342 rw_assert(&cache_lock, RA_WLOCKED);
343 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
344 #ifdef KDTRACE_HOOKS
345 if (ncp->nc_vp != NULL) {
346 SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
347 ncp->nc_name, ncp->nc_vp, 0, 0);
348 } else {
349 SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
350 ncp->nc_name, 0, 0, 0);
351 }
352 #endif
353 vp = NULL;
354 LIST_REMOVE(ncp, nc_hash);
355 if (ncp->nc_flag & NCF_ISDOTDOT) {
356 if (ncp == ncp->nc_dvp->v_cache_dd)
357 ncp->nc_dvp->v_cache_dd = NULL;
358 } else {
359 LIST_REMOVE(ncp, nc_src);
360 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
361 vp = ncp->nc_dvp;
362 numcachehv--;
363 }
364 }
365 if (ncp->nc_vp) {
366 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
367 if (ncp == ncp->nc_vp->v_cache_dd)
368 ncp->nc_vp->v_cache_dd = NULL;
369 } else {
370 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
371 numneg--;
372 }
373 numcache--;
374 cache_free(ncp);
375 if (vp)
376 vdrop(vp);
377 }
378
379 /*
380 * Lookup an entry in the cache
381 *
382 * Lookup is called with dvp pointing to the directory to search,
383 * cnp pointing to the name of the entry being sought. If the lookup
384 * succeeds, the vnode is returned in *vpp, and a status of -1 is
385 * returned. If the lookup determines that the name does not exist
386 * (negative cacheing), a status of ENOENT is returned. If the lookup
387 * fails, a status of zero is returned. If the directory vnode is
388 * recycled out from under us due to a forced unmount, a status of
389 * ENOENT is returned.
390 *
391 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
392 * unlocked. If we're looking up . an extra ref is taken, but the lock is
393 * not recursively acquired.
394 */
395
396 int
397 cache_lookup(dvp, vpp, cnp)
398 struct vnode *dvp;
399 struct vnode **vpp;
400 struct componentname *cnp;
401 {
402 struct namecache *ncp;
403 u_int32_t hash;
404 int error, ltype, wlocked;
405
406 if (!doingcache) {
407 cnp->cn_flags &= ~MAKEENTRY;
408 return (0);
409 }
410 retry:
411 CACHE_RLOCK();
412 wlocked = 0;
413 numcalls++;
414 error = 0;
415
416 retry_wlocked:
417 if (cnp->cn_nameptr[0] == '.') {
418 if (cnp->cn_namelen == 1) {
419 *vpp = dvp;
420 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
421 dvp, cnp->cn_nameptr);
422 dothits++;
423 SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".",
424 *vpp, 0, 0);
425 goto success;
426 }
427 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
428 dotdothits++;
429 if (dvp->v_cache_dd == NULL) {
430 SDT_PROBE(vfs, namecache, lookup, miss, dvp,
431 "..", NULL, 0, 0);
432 goto unlock;
433 }
434 if ((cnp->cn_flags & MAKEENTRY) == 0) {
435 if (!wlocked && !CACHE_UPGRADE_LOCK())
436 goto wlock;
437 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
438 cache_zap(dvp->v_cache_dd);
439 dvp->v_cache_dd = NULL;
440 CACHE_WUNLOCK();
441 return (0);
442 }
443 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT)
444 *vpp = dvp->v_cache_dd->nc_vp;
445 else
446 *vpp = dvp->v_cache_dd->nc_dvp;
447 /* Return failure if negative entry was found. */
448 if (*vpp == NULL) {
449 ncp = dvp->v_cache_dd;
450 goto negative_success;
451 }
452 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
453 dvp, cnp->cn_nameptr, *vpp);
454 SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
455 *vpp, 0, 0);
456 goto success;
457 }
458 }
459
460 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
461 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
462 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
463 numchecks++;
464 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
465 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
466 break;
467 }
468
469 /* We failed to find an entry */
470 if (ncp == NULL) {
471 SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
472 NULL, 0, 0);
473 if ((cnp->cn_flags & MAKEENTRY) == 0) {
474 nummisszap++;
475 } else {
476 nummiss++;
477 }
478 nchstats.ncs_miss++;
479 goto unlock;
480 }
481
482 /* We don't want to have an entry, so dump it */
483 if ((cnp->cn_flags & MAKEENTRY) == 0) {
484 numposzaps++;
485 nchstats.ncs_badhits++;
486 if (!wlocked && !CACHE_UPGRADE_LOCK())
487 goto wlock;
488 cache_zap(ncp);
489 CACHE_WUNLOCK();
490 return (0);
491 }
492
493 /* We found a "positive" match, return the vnode */
494 if (ncp->nc_vp) {
495 numposhits++;
496 nchstats.ncs_goodhits++;
497 *vpp = ncp->nc_vp;
498 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
499 dvp, cnp->cn_nameptr, *vpp, ncp);
500 SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
501 *vpp, 0, 0);
502 goto success;
503 }
504
505 negative_success:
506 /* We found a negative match, and want to create it, so purge */
507 if (cnp->cn_nameiop == CREATE) {
508 numnegzaps++;
509 nchstats.ncs_badhits++;
510 if (!wlocked && !CACHE_UPGRADE_LOCK())
511 goto wlock;
512 cache_zap(ncp);
513 CACHE_WUNLOCK();
514 return (0);
515 }
516
517 if (!wlocked && !CACHE_UPGRADE_LOCK())
518 goto wlock;
519 numneghits++;
520 /*
521 * We found a "negative" match, so we shift it to the end of
522 * the "negative" cache entries queue to satisfy LRU. Also,
523 * check to see if the entry is a whiteout; indicate this to
524 * the componentname, if so.
525 */
526 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
527 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
528 nchstats.ncs_neghits++;
529 if (ncp->nc_flag & NCF_WHITE)
530 cnp->cn_flags |= ISWHITEOUT;
531 SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
532 0, 0, 0);
533 CACHE_WUNLOCK();
534 return (ENOENT);
535
536 wlock:
537 /*
538 * We need to update the cache after our lookup, so upgrade to
539 * a write lock and retry the operation.
540 */
541 CACHE_RUNLOCK();
542 CACHE_WLOCK();
543 numupgrades++;
544 wlocked = 1;
545 goto retry_wlocked;
546
547 success:
548 /*
549 * On success we return a locked and ref'd vnode as per the lookup
550 * protocol.
551 */
552 if (dvp == *vpp) { /* lookup on "." */
553 VREF(*vpp);
554 if (wlocked)
555 CACHE_WUNLOCK();
556 else
557 CACHE_RUNLOCK();
558 /*
559 * When we lookup "." we still can be asked to lock it
560 * differently...
561 */
562 ltype = cnp->cn_lkflags & LK_TYPE_MASK;
563 if (ltype != VOP_ISLOCKED(*vpp)) {
564 if (ltype == LK_EXCLUSIVE) {
565 vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
566 if ((*vpp)->v_iflag & VI_DOOMED) {
567 /* forced unmount */
568 vrele(*vpp);
569 *vpp = NULL;
570 return (ENOENT);
571 }
572 } else
573 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
574 }
575 return (-1);
576 }
577 ltype = 0; /* silence gcc warning */
578 if (cnp->cn_flags & ISDOTDOT) {
579 ltype = VOP_ISLOCKED(dvp);
580 VOP_UNLOCK(dvp, 0);
581 }
582 VI_LOCK(*vpp);
583 if (wlocked)
584 CACHE_WUNLOCK();
585 else
586 CACHE_RUNLOCK();
587 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
588 if (cnp->cn_flags & ISDOTDOT) {
589 vn_lock(dvp, ltype | LK_RETRY);
590 if (dvp->v_iflag & VI_DOOMED) {
591 if (error == 0)
592 vput(*vpp);
593 *vpp = NULL;
594 return (ENOENT);
595 }
596 }
597 if (error) {
598 *vpp = NULL;
599 goto retry;
600 }
601 if ((cnp->cn_flags & ISLASTCN) &&
602 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
603 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
604 }
605 return (-1);
606
607 unlock:
608 if (wlocked)
609 CACHE_WUNLOCK();
610 else
611 CACHE_RUNLOCK();
612 return (0);
613 }
614
615 /*
616 * Add an entry to the cache.
617 */
618 void
619 cache_enter(dvp, vp, cnp)
620 struct vnode *dvp;
621 struct vnode *vp;
622 struct componentname *cnp;
623 {
624 struct namecache *ncp, *n2;
625 struct nchashhead *ncpp;
626 u_int32_t hash;
627 int flag;
628 int hold;
629 int zap;
630 int len;
631
632 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
633 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
634 ("cache_enter: Adding a doomed vnode"));
635 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
636 ("cache_enter: Doomed vnode used as src"));
637
638 if (!doingcache)
639 return;
640
641 /*
642 * Avoid blowout in namecache entries.
643 */
644 if (numcache >= desiredvnodes * ncsizefactor)
645 return;
646
647 flag = 0;
648 if (cnp->cn_nameptr[0] == '.') {
649 if (cnp->cn_namelen == 1)
650 return;
651 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
652 CACHE_WLOCK();
653 /*
654 * If dotdot entry already exists, just retarget it
655 * to new parent vnode, otherwise continue with new
656 * namecache entry allocation.
657 */
658 if ((ncp = dvp->v_cache_dd) != NULL &&
659 ncp->nc_flag & NCF_ISDOTDOT) {
660 KASSERT(ncp->nc_dvp == dvp,
661 ("wrong isdotdot parent"));
662 if (ncp->nc_vp != NULL)
663 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst,
664 ncp, nc_dst);
665 else
666 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
667 if (vp != NULL)
668 TAILQ_INSERT_HEAD(&vp->v_cache_dst,
669 ncp, nc_dst);
670 else
671 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
672 ncp->nc_vp = vp;
673 CACHE_WUNLOCK();
674 return;
675 }
676 dvp->v_cache_dd = NULL;
677 SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp,
678 0, 0);
679 CACHE_WUNLOCK();
680 flag = NCF_ISDOTDOT;
681 }
682 }
683
684 hold = 0;
685 zap = 0;
686
687 /*
688 * Calculate the hash key and setup as much of the new
689 * namecache entry as possible before acquiring the lock.
690 */
691 ncp = cache_alloc(cnp->cn_namelen);
692 ncp->nc_vp = vp;
693 ncp->nc_dvp = dvp;
694 ncp->nc_flag = flag;
695 len = ncp->nc_nlen = cnp->cn_namelen;
696 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
697 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
698 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
699 CACHE_WLOCK();
700
701 /*
702 * See if this vnode or negative entry is already in the cache
703 * with this name. This can happen with concurrent lookups of
704 * the same path name.
705 */
706 ncpp = NCHHASH(hash);
707 LIST_FOREACH(n2, ncpp, nc_hash) {
708 if (n2->nc_dvp == dvp &&
709 n2->nc_nlen == cnp->cn_namelen &&
710 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
711 CACHE_WUNLOCK();
712 cache_free(ncp);
713 return;
714 }
715 }
716
717 if (flag == NCF_ISDOTDOT) {
718 /*
719 * See if we are trying to add .. entry, but some other lookup
720 * has populated v_cache_dd pointer already.
721 */
722 if (dvp->v_cache_dd != NULL) {
723 CACHE_WUNLOCK();
724 cache_free(ncp);
725 return;
726 }
727 KASSERT(vp == NULL || vp->v_type == VDIR,
728 ("wrong vnode type %p", vp));
729 dvp->v_cache_dd = ncp;
730 }
731
732 numcache++;
733 if (!vp) {
734 numneg++;
735 if (cnp->cn_flags & ISWHITEOUT)
736 ncp->nc_flag |= NCF_WHITE;
737 } else if (vp->v_type == VDIR) {
738 if (flag != NCF_ISDOTDOT) {
739 if ((n2 = vp->v_cache_dd) != NULL &&
740 (n2->nc_flag & NCF_ISDOTDOT) != 0)
741 cache_zap(n2);
742 vp->v_cache_dd = ncp;
743 }
744 } else {
745 vp->v_cache_dd = NULL;
746 }
747
748 /*
749 * Insert the new namecache entry into the appropriate chain
750 * within the cache entries table.
751 */
752 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
753 if (flag != NCF_ISDOTDOT) {
754 if (LIST_EMPTY(&dvp->v_cache_src)) {
755 hold = 1;
756 numcachehv++;
757 }
758 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
759 }
760
761 /*
762 * If the entry is "negative", we place it into the
763 * "negative" cache queue, otherwise, we place it into the
764 * destination vnode's cache entries queue.
765 */
766 if (vp) {
767 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
768 SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
769 0, 0);
770 } else {
771 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
772 SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
773 ncp->nc_name, 0, 0, 0);
774 }
775 if (numneg * ncnegfactor > numcache) {
776 ncp = TAILQ_FIRST(&ncneg);
777 zap = 1;
778 }
779 if (hold)
780 vhold(dvp);
781 if (zap)
782 cache_zap(ncp);
783 CACHE_WUNLOCK();
784 }
785
786 /*
787 * Name cache initialization, from vfs_init() when we are booting
788 */
789 static void
790 nchinit(void *dummy __unused)
791 {
792
793 TAILQ_INIT(&ncneg);
794
795 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
796 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
797 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
798 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
799
800 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
801 }
802 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
803
804
805 /*
806 * Invalidate all entries to a particular vnode.
807 */
808 void
809 cache_purge(vp)
810 struct vnode *vp;
811 {
812
813 CTR1(KTR_VFS, "cache_purge(%p)", vp);
814 SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0);
815 CACHE_WLOCK();
816 while (!LIST_EMPTY(&vp->v_cache_src))
817 cache_zap(LIST_FIRST(&vp->v_cache_src));
818 while (!TAILQ_EMPTY(&vp->v_cache_dst))
819 cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
820 if (vp->v_cache_dd != NULL) {
821 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT,
822 ("lost dotdot link"));
823 cache_zap(vp->v_cache_dd);
824 }
825 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge"));
826 CACHE_WUNLOCK();
827 }
828
829 /*
830 * Invalidate all negative entries for a particular directory vnode.
831 */
832 void
833 cache_purge_negative(vp)
834 struct vnode *vp;
835 {
836 struct namecache *cp, *ncp;
837
838 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
839 SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0);
840 CACHE_WLOCK();
841 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) {
842 if (cp->nc_vp == NULL)
843 cache_zap(cp);
844 }
845 CACHE_WUNLOCK();
846 }
847
848 /*
849 * Flush all entries referencing a particular filesystem.
850 */
851 void
852 cache_purgevfs(mp)
853 struct mount *mp;
854 {
855 struct nchashhead *ncpp;
856 struct namecache *ncp, *nnp;
857
858 /* Scan hash tables for applicable entries */
859 SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0);
860 CACHE_WLOCK();
861 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
862 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
863 if (ncp->nc_dvp->v_mount == mp)
864 cache_zap(ncp);
865 }
866 }
867 CACHE_WUNLOCK();
868 }
869
870 /*
871 * Perform canonical checks and cache lookup and pass on to filesystem
872 * through the vop_cachedlookup only if needed.
873 */
874
875 int
876 vfs_cache_lookup(ap)
877 struct vop_lookup_args /* {
878 struct vnode *a_dvp;
879 struct vnode **a_vpp;
880 struct componentname *a_cnp;
881 } */ *ap;
882 {
883 struct vnode *dvp;
884 int error;
885 struct vnode **vpp = ap->a_vpp;
886 struct componentname *cnp = ap->a_cnp;
887 struct ucred *cred = cnp->cn_cred;
888 int flags = cnp->cn_flags;
889 struct thread *td = cnp->cn_thread;
890
891 *vpp = NULL;
892 dvp = ap->a_dvp;
893
894 if (dvp->v_type != VDIR)
895 return (ENOTDIR);
896
897 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
898 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
899 return (EROFS);
900
901 error = VOP_ACCESS(dvp, VEXEC, cred, td);
902 if (error)
903 return (error);
904
905 error = cache_lookup(dvp, vpp, cnp);
906 if (error == 0)
907 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
908 if (error == -1)
909 return (0);
910 return (error);
911 }
912
913
914 #ifndef _SYS_SYSPROTO_H_
915 struct __getcwd_args {
916 u_char *buf;
917 u_int buflen;
918 };
919 #endif
920
921 /*
922 * XXX All of these sysctls would probably be more productive dead.
923 */
924 static int disablecwd;
925 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
926 "Disable the getcwd syscall");
927
928 /* Implementation of the getcwd syscall. */
929 int
930 __getcwd(td, uap)
931 struct thread *td;
932 struct __getcwd_args *uap;
933 {
934
935 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
936 }
937
938 int
939 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
940 {
941 char *bp, *tmpbuf;
942 struct filedesc *fdp;
943 struct vnode *cdir, *rdir;
944 int error, vfslocked;
945
946 if (disablecwd)
947 return (ENODEV);
948 if (buflen < 2)
949 return (EINVAL);
950 if (buflen > MAXPATHLEN)
951 buflen = MAXPATHLEN;
952
953 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
954 fdp = td->td_proc->p_fd;
955 FILEDESC_SLOCK(fdp);
956 cdir = fdp->fd_cdir;
957 VREF(cdir);
958 rdir = fdp->fd_rdir;
959 VREF(rdir);
960 FILEDESC_SUNLOCK(fdp);
961 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen);
962 vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
963 vrele(rdir);
964 VFS_UNLOCK_GIANT(vfslocked);
965 vfslocked = VFS_LOCK_GIANT(cdir->v_mount);
966 vrele(cdir);
967 VFS_UNLOCK_GIANT(vfslocked);
968
969 if (!error) {
970 if (bufseg == UIO_SYSSPACE)
971 bcopy(bp, buf, strlen(bp) + 1);
972 else
973 error = copyout(bp, buf, strlen(bp) + 1);
974 #ifdef KTRACE
975 if (KTRPOINT(curthread, KTR_NAMEI))
976 ktrnamei(bp);
977 #endif
978 }
979 free(tmpbuf, M_TEMP);
980 return (error);
981 }
982
983 /*
984 * Thus begins the fullpath magic.
985 */
986
987 #undef STATNODE
988 #define STATNODE(name, descr) \
989 static u_int name; \
990 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr)
991
992 static int disablefullpath;
993 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
994 "Disable the vn_fullpath function");
995
996 /* These count for kern___getcwd(), too. */
997 STATNODE(numfullpathcalls, "Number of fullpath search calls");
998 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)");
999 STATNODE(numfullpathfail2,
1000 "Number of fullpath search errors (VOP_VPTOCNP failures)");
1001 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)");
1002 STATNODE(numfullpathfound, "Number of successful fullpath calls");
1003
1004 /*
1005 * Retrieve the full filesystem path that correspond to a vnode from the name
1006 * cache (if available)
1007 */
1008 int
1009 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
1010 {
1011 char *buf;
1012 struct filedesc *fdp;
1013 struct vnode *rdir;
1014 int error, vfslocked;
1015
1016 if (disablefullpath)
1017 return (ENODEV);
1018 if (vn == NULL)
1019 return (EINVAL);
1020
1021 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1022 fdp = td->td_proc->p_fd;
1023 FILEDESC_SLOCK(fdp);
1024 rdir = fdp->fd_rdir;
1025 VREF(rdir);
1026 FILEDESC_SUNLOCK(fdp);
1027 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN);
1028 vfslocked = VFS_LOCK_GIANT(rdir->v_mount);
1029 vrele(rdir);
1030 VFS_UNLOCK_GIANT(vfslocked);
1031
1032 if (!error)
1033 *freebuf = buf;
1034 else
1035 free(buf, M_TEMP);
1036 return (error);
1037 }
1038
1039 /*
1040 * This function is similar to vn_fullpath, but it attempts to lookup the
1041 * pathname relative to the global root mount point. This is required for the
1042 * auditing sub-system, as audited pathnames must be absolute, relative to the
1043 * global root mount point.
1044 */
1045 int
1046 vn_fullpath_global(struct thread *td, struct vnode *vn,
1047 char **retbuf, char **freebuf)
1048 {
1049 char *buf;
1050 int error;
1051
1052 if (disablefullpath)
1053 return (ENODEV);
1054 if (vn == NULL)
1055 return (EINVAL);
1056 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1057 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
1058 if (!error)
1059 *freebuf = buf;
1060 else
1061 free(buf, M_TEMP);
1062 return (error);
1063 }
1064
1065 int
1066 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen)
1067 {
1068 int error;
1069
1070 CACHE_RLOCK();
1071 error = vn_vptocnp_locked(vp, cred, buf, buflen);
1072 if (error == 0) {
1073 /*
1074 * vn_vptocnp_locked() dropped hold acquired by
1075 * VOP_VPTOCNP immediately after locking the
1076 * cache. Since we are going to drop the cache rlock,
1077 * re-hold the result.
1078 */
1079 vhold(*vp);
1080 CACHE_RUNLOCK();
1081 }
1082 return (error);
1083 }
1084
1085 static int
1086 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
1087 u_int *buflen)
1088 {
1089 struct vnode *dvp;
1090 struct namecache *ncp;
1091 int error, vfslocked;
1092
1093 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) {
1094 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1095 break;
1096 }
1097 if (ncp != NULL) {
1098 if (*buflen < ncp->nc_nlen) {
1099 CACHE_RUNLOCK();
1100 numfullpathfail4++;
1101 error = ENOMEM;
1102 SDT_PROBE(vfs, namecache, fullpath, return, error,
1103 vp, NULL, 0, 0);
1104 return (error);
1105 }
1106 *buflen -= ncp->nc_nlen;
1107 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
1108 SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
1109 ncp->nc_name, vp, 0, 0);
1110 *vp = ncp->nc_dvp;
1111 return (0);
1112 }
1113 SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0);
1114
1115 vhold(*vp);
1116 CACHE_RUNLOCK();
1117 vfslocked = VFS_LOCK_GIANT((*vp)->v_mount);
1118 vn_lock(*vp, LK_SHARED | LK_RETRY);
1119 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen);
1120 VOP_UNLOCK(*vp, 0);
1121 vdrop(*vp);
1122 VFS_UNLOCK_GIANT(vfslocked);
1123 if (error) {
1124 numfullpathfail2++;
1125 SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
1126 NULL, 0, 0);
1127 return (error);
1128 }
1129
1130 *vp = dvp;
1131 CACHE_RLOCK();
1132 if ((*vp)->v_iflag & VI_DOOMED) {
1133 /* forced unmount */
1134 CACHE_RUNLOCK();
1135 vdrop(*vp);
1136 error = ENOENT;
1137 SDT_PROBE(vfs, namecache, fullpath, return, error, vp,
1138 NULL, 0, 0);
1139 return (error);
1140 }
1141 vdrop(*vp);
1142
1143 return (0);
1144 }
1145
1146 /*
1147 * The magic behind kern___getcwd() and vn_fullpath().
1148 */
1149 static int
1150 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
1151 char *buf, char **retbuf, u_int buflen)
1152 {
1153 int error, slash_prefixed;
1154 #ifdef KDTRACE_HOOKS
1155 struct vnode *startvp = vp;
1156 #endif
1157
1158 buflen--;
1159 buf[buflen] = '\0';
1160 error = 0;
1161 slash_prefixed = 0;
1162
1163 SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0);
1164 numfullpathcalls++;
1165 CACHE_RLOCK();
1166 if (vp->v_type != VDIR) {
1167 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1168 if (error)
1169 return (error);
1170 if (buflen == 0) {
1171 CACHE_RUNLOCK();
1172 return (ENOMEM);
1173 }
1174 buf[--buflen] = '/';
1175 slash_prefixed = 1;
1176 }
1177 while (vp != rdir && vp != rootvnode) {
1178 if (vp->v_vflag & VV_ROOT) {
1179 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */
1180 CACHE_RUNLOCK();
1181 error = ENOENT;
1182 SDT_PROBE(vfs, namecache, fullpath, return,
1183 error, vp, NULL, 0, 0);
1184 break;
1185 }
1186 vp = vp->v_mount->mnt_vnodecovered;
1187 continue;
1188 }
1189 if (vp->v_type != VDIR) {
1190 CACHE_RUNLOCK();
1191 numfullpathfail1++;
1192 error = ENOTDIR;
1193 SDT_PROBE(vfs, namecache, fullpath, return,
1194 error, vp, NULL, 0, 0);
1195 break;
1196 }
1197 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen);
1198 if (error)
1199 break;
1200 if (buflen == 0) {
1201 CACHE_RUNLOCK();
1202 error = ENOMEM;
1203 SDT_PROBE(vfs, namecache, fullpath, return, error,
1204 startvp, NULL, 0, 0);
1205 break;
1206 }
1207 buf[--buflen] = '/';
1208 slash_prefixed = 1;
1209 }
1210 if (error)
1211 return (error);
1212 if (!slash_prefixed) {
1213 if (buflen == 0) {
1214 CACHE_RUNLOCK();
1215 numfullpathfail4++;
1216 SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM,
1217 startvp, NULL, 0, 0);
1218 return (ENOMEM);
1219 }
1220 buf[--buflen] = '/';
1221 }
1222 numfullpathfound++;
1223 CACHE_RUNLOCK();
1224
1225 SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen,
1226 0, 0);
1227 *retbuf = buf + buflen;
1228 return (0);
1229 }
1230
1231 int
1232 vn_commname(struct vnode *vp, char *buf, u_int buflen)
1233 {
1234 struct namecache *ncp;
1235 int l;
1236
1237 CACHE_RLOCK();
1238 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst)
1239 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0)
1240 break;
1241 if (ncp == NULL) {
1242 CACHE_RUNLOCK();
1243 return (ENOENT);
1244 }
1245 l = min(ncp->nc_nlen, buflen - 1);
1246 memcpy(buf, ncp->nc_name, l);
1247 CACHE_RUNLOCK();
1248 buf[l] = '\0';
1249 return (0);
1250 }
Cache object: 6d160a0abfbea84ecebb8392b3feff09
|