FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_cache.c
1 /*-
2 * Copyright (c) 1989, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysproto.h>
50 #include <sys/proc.h>
51 #include <sys/filedesc.h>
52 #include <sys/fnv_hash.h>
53
54 #include <vm/uma.h>
55
56 /*
57 * This structure describes the elements in the cache of recent
58 * names looked up by namei.
59 */
60
61 struct namecache {
62 LIST_ENTRY(namecache) nc_hash; /* hash chain */
63 LIST_ENTRY(namecache) nc_src; /* source vnode list */
64 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
65 struct vnode *nc_dvp; /* vnode of parent of name */
66 struct vnode *nc_vp; /* vnode the name refers to */
67 u_char nc_flag; /* flag bits */
68 u_char nc_nlen; /* length of name */
69 char nc_name[0]; /* segment name */
70 };
71
72 /*
73 * Name caching works as follows:
74 *
75 * Names found by directory scans are retained in a cache
76 * for future reference. It is managed LRU, so frequently
77 * used names will hang around. Cache is indexed by hash value
78 * obtained from (vp, name) where vp refers to the directory
79 * containing name.
80 *
81 * If it is a "negative" entry, (i.e. for a name that is known NOT to
82 * exist) the vnode pointer will be NULL.
83 *
84 * Upon reaching the last segment of a path, if the reference
85 * is for DELETE, or NOCACHE is set (rewrite), and the
86 * name is located in the cache, it will be dropped.
87 */
88
89 /*
90 * Structures associated with name cacheing.
91 */
92 #define NCHHASH(hash) \
93 (&nchashtbl[(hash) & nchash])
94 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
95 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
96 static u_long nchash; /* size of hash table */
97 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
98 static u_long ncnegfactor = 16; /* ratio of negative entries */
99 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
100 static u_long numneg; /* number of cache entries allocated */
101 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
102 static u_long numcache; /* number of cache entries allocated */
103 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
104 static u_long numcachehv; /* number of cache entries with vnodes held */
105 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
106 #if 0
107 static u_long numcachepl; /* number of cache purge for leaf entries */
108 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
109 #endif
110 struct nchstats nchstats; /* cache effectiveness statistics */
111
112 static struct mtx cache_lock;
113 MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF);
114
115 #define CACHE_LOCK() mtx_lock(&cache_lock)
116 #define CACHE_UNLOCK() mtx_unlock(&cache_lock)
117
118 /*
119 * UMA zones for the VFS cache.
120 *
121 * The small cache is used for entries with short names, which are the
122 * most common. The large cache is used for entries which are too big to
123 * fit in the small cache.
124 */
125 static uma_zone_t cache_zone_small;
126 static uma_zone_t cache_zone_large;
127
128 #define CACHE_PATH_CUTOFF 32
129 #define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF)
130 #define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX)
131
132 #define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
133 cache_zone_small : cache_zone_large, M_WAITOK)
134 #define cache_free(ncp) do { \
135 if (ncp != NULL) \
136 uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
137 cache_zone_small : cache_zone_large, (ncp)); \
138 } while (0)
139
140 static int doingcache = 1; /* 1 => enable the cache */
141 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
142
143 /* Export size information to userland */
144 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0,
145 sizeof(struct namecache), "");
146
147 /*
148 * The new name cache statistics
149 */
150 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
151 #define STATNODE(mode, name, var) \
152 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
153 STATNODE(CTLFLAG_RD, numneg, &numneg);
154 STATNODE(CTLFLAG_RD, numcache, &numcache);
155 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
156 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
157 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
158 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
159 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
160 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
161 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
162 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
163 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
164 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
165
166 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
167 sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
168
169
170
171 static void cache_zap(struct namecache *ncp);
172 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
173 char *buf, char **retbuf, u_int buflen);
174
175 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
176
177 /*
178 * Flags in namecache.nc_flag
179 */
180 #define NCF_WHITE 1
181
182 /*
183 * Grab an atomic snapshot of the name cache hash chain lengths
184 */
185 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
186
187 static int
188 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
189 {
190 int error;
191 struct nchashhead *ncpp;
192 struct namecache *ncp;
193 int n_nchash;
194 int count;
195
196 n_nchash = nchash + 1; /* nchash is max index, not count */
197 if (!req->oldptr)
198 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
199
200 /* Scan hash tables for applicable entries */
201 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 count = 0;
203 LIST_FOREACH(ncp, ncpp, nc_hash) {
204 count++;
205 }
206 error = SYSCTL_OUT(req, &count, sizeof(count));
207 if (error)
208 return (error);
209 }
210 return (0);
211 }
212 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
213 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
214
215 static int
216 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
217 {
218 int error;
219 struct nchashhead *ncpp;
220 struct namecache *ncp;
221 int n_nchash;
222 int count, maxlength, used, pct;
223
224 if (!req->oldptr)
225 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
226
227 n_nchash = nchash + 1; /* nchash is max index, not count */
228 used = 0;
229 maxlength = 0;
230
231 /* Scan hash tables for applicable entries */
232 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
233 count = 0;
234 LIST_FOREACH(ncp, ncpp, nc_hash) {
235 count++;
236 }
237 if (count)
238 used++;
239 if (maxlength < count)
240 maxlength = count;
241 }
242 n_nchash = nchash + 1;
243 pct = (used * 100 * 100) / n_nchash;
244 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
245 if (error)
246 return (error);
247 error = SYSCTL_OUT(req, &used, sizeof(used));
248 if (error)
249 return (error);
250 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
251 if (error)
252 return (error);
253 error = SYSCTL_OUT(req, &pct, sizeof(pct));
254 if (error)
255 return (error);
256 return (0);
257 }
258 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
259 0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
260
261 /*
262 * cache_zap():
263 *
264 * Removes a namecache entry from cache, whether it contains an actual
265 * pointer to a vnode or if it is just a negative cache entry.
266 */
267 static void
268 cache_zap(ncp)
269 struct namecache *ncp;
270 {
271 struct vnode *vp;
272
273 mtx_assert(&cache_lock, MA_OWNED);
274 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
275 vp = NULL;
276 LIST_REMOVE(ncp, nc_hash);
277 LIST_REMOVE(ncp, nc_src);
278 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
279 vp = ncp->nc_dvp;
280 numcachehv--;
281 }
282 if (ncp->nc_vp) {
283 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
284 ncp->nc_vp->v_dd = NULL;
285 } else {
286 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
287 numneg--;
288 }
289 numcache--;
290 cache_free(ncp);
291 if (vp)
292 vdrop(vp);
293 }
294
295 /*
296 * Lookup an entry in the cache
297 *
298 * Lookup is called with dvp pointing to the directory to search,
299 * cnp pointing to the name of the entry being sought. If the lookup
300 * succeeds, the vnode is returned in *vpp, and a status of -1 is
301 * returned. If the lookup determines that the name does not exist
302 * (negative cacheing), a status of ENOENT is returned. If the lookup
303 * fails, a status of zero is returned.
304 *
305 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
306 * unlocked. If we're looking up . an extra ref is taken, but the lock is
307 * not recursively acquired.
308 */
309
310 int
311 cache_lookup(dvp, vpp, cnp)
312 struct vnode *dvp;
313 struct vnode **vpp;
314 struct componentname *cnp;
315 {
316 struct namecache *ncp;
317 struct thread *td;
318 u_int32_t hash;
319 int error, ltype;
320
321 if (!doingcache) {
322 cnp->cn_flags &= ~MAKEENTRY;
323 return (0);
324 }
325 td = cnp->cn_thread;
326 retry:
327 CACHE_LOCK();
328 numcalls++;
329
330 if (cnp->cn_nameptr[0] == '.') {
331 if (cnp->cn_namelen == 1) {
332 *vpp = dvp;
333 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
334 dvp, cnp->cn_nameptr);
335 dothits++;
336 goto success;
337 }
338 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
339 dotdothits++;
340 if (dvp->v_dd == NULL ||
341 (cnp->cn_flags & MAKEENTRY) == 0) {
342 CACHE_UNLOCK();
343 return (0);
344 }
345 *vpp = dvp->v_dd;
346 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
347 dvp, cnp->cn_nameptr, *vpp);
348 goto success;
349 }
350 }
351
352 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
353 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
354 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
355 numchecks++;
356 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
357 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
358 break;
359 }
360
361 /* We failed to find an entry */
362 if (ncp == 0) {
363 if ((cnp->cn_flags & MAKEENTRY) == 0) {
364 nummisszap++;
365 } else {
366 nummiss++;
367 }
368 nchstats.ncs_miss++;
369 CACHE_UNLOCK();
370 return (0);
371 }
372
373 /* We don't want to have an entry, so dump it */
374 if ((cnp->cn_flags & MAKEENTRY) == 0) {
375 numposzaps++;
376 nchstats.ncs_badhits++;
377 cache_zap(ncp);
378 CACHE_UNLOCK();
379 return (0);
380 }
381
382 /* We found a "positive" match, return the vnode */
383 if (ncp->nc_vp) {
384 numposhits++;
385 nchstats.ncs_goodhits++;
386 *vpp = ncp->nc_vp;
387 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
388 dvp, cnp->cn_nameptr, *vpp, ncp);
389 goto success;
390 }
391
392 /* We found a negative match, and want to create it, so purge */
393 if (cnp->cn_nameiop == CREATE) {
394 numnegzaps++;
395 nchstats.ncs_badhits++;
396 cache_zap(ncp);
397 CACHE_UNLOCK();
398 return (0);
399 }
400
401 numneghits++;
402 /*
403 * We found a "negative" match, so we shift it to the end of
404 * the "negative" cache entries queue to satisfy LRU. Also,
405 * check to see if the entry is a whiteout; indicate this to
406 * the componentname, if so.
407 */
408 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
409 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
410 nchstats.ncs_neghits++;
411 if (ncp->nc_flag & NCF_WHITE)
412 cnp->cn_flags |= ISWHITEOUT;
413 CACHE_UNLOCK();
414 return (ENOENT);
415
416 success:
417 /*
418 * On success we return a locked and ref'd vnode as per the lookup
419 * protocol.
420 */
421 if (dvp == *vpp) { /* lookup on "." */
422 VREF(*vpp);
423 CACHE_UNLOCK();
424 /*
425 * When we lookup "." we still can be asked to lock it
426 * differently...
427 */
428 ltype = cnp->cn_lkflags & (LK_SHARED | LK_EXCLUSIVE);
429 if (ltype == VOP_ISLOCKED(*vpp, td))
430 return (-1);
431 else if (ltype == LK_EXCLUSIVE)
432 vn_lock(*vpp, LK_UPGRADE | LK_RETRY, td);
433 return (-1);
434 }
435 ltype = 0; /* silence gcc warning */
436 if (cnp->cn_flags & ISDOTDOT) {
437 ltype = VOP_ISLOCKED(dvp, td);
438 VOP_UNLOCK(dvp, 0, td);
439 }
440 VI_LOCK(*vpp);
441 CACHE_UNLOCK();
442 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, td);
443 if (cnp->cn_flags & ISDOTDOT)
444 vn_lock(dvp, ltype | LK_RETRY, td);
445 if ((cnp->cn_flags & ISLASTCN) && (cnp->cn_lkflags & LK_EXCLUSIVE))
446 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
447 if (error) {
448 *vpp = NULL;
449 goto retry;
450 }
451 return (-1);
452 }
453
454 /*
455 * Add an entry to the cache.
456 */
457 void
458 cache_enter(dvp, vp, cnp)
459 struct vnode *dvp;
460 struct vnode *vp;
461 struct componentname *cnp;
462 {
463 struct namecache *ncp;
464 struct nchashhead *ncpp;
465 u_int32_t hash;
466 int hold;
467 int zap;
468 int len;
469
470 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
471 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
472 ("cahe_enter: Adding a doomed vnode"));
473
474 if (!doingcache)
475 return;
476
477 if (cnp->cn_nameptr[0] == '.') {
478 if (cnp->cn_namelen == 1) {
479 return;
480 }
481 /*
482 * For dotdot lookups only cache the v_dd pointer if the
483 * directory has a link back to its parent via v_cache_dst.
484 * Without this an unlinked directory would keep a soft
485 * reference to its parent which could not be NULLd at
486 * cache_purge() time.
487 */
488 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
489 CACHE_LOCK();
490 if (!TAILQ_EMPTY(&dvp->v_cache_dst))
491 dvp->v_dd = vp;
492 CACHE_UNLOCK();
493 return;
494 }
495 }
496
497 hold = 0;
498 zap = 0;
499 ncp = cache_alloc(cnp->cn_namelen);
500 CACHE_LOCK();
501 numcache++;
502 if (!vp) {
503 numneg++;
504 ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
505 } else if (vp->v_type == VDIR) {
506 vp->v_dd = dvp;
507 } else {
508 vp->v_dd = NULL;
509 }
510
511 /*
512 * Set the rest of the namecache entry elements, calculate it's
513 * hash key and insert it into the appropriate chain within
514 * the cache entries table.
515 */
516 ncp->nc_vp = vp;
517 ncp->nc_dvp = dvp;
518 len = ncp->nc_nlen = cnp->cn_namelen;
519 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
520 bcopy(cnp->cn_nameptr, ncp->nc_name, len);
521 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
522 ncpp = NCHHASH(hash);
523 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
524 if (LIST_EMPTY(&dvp->v_cache_src)) {
525 hold = 1;
526 numcachehv++;
527 }
528 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
529 /*
530 * If the entry is "negative", we place it into the
531 * "negative" cache queue, otherwise, we place it into the
532 * destination vnode's cache entries queue.
533 */
534 if (vp) {
535 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
536 } else {
537 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
538 }
539 if (numneg * ncnegfactor > numcache) {
540 ncp = TAILQ_FIRST(&ncneg);
541 zap = 1;
542 }
543 if (hold)
544 vhold(dvp);
545 if (zap)
546 cache_zap(ncp);
547 CACHE_UNLOCK();
548 }
549
550 /*
551 * Name cache initialization, from vfs_init() when we are booting
552 */
553 static void
554 nchinit(void *dummy __unused)
555 {
556
557 TAILQ_INIT(&ncneg);
558
559 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
560 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
561 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
562 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
563
564 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
565 }
566 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
567
568
569 /*
570 * Invalidate all entries to a particular vnode.
571 */
572 void
573 cache_purge(vp)
574 struct vnode *vp;
575 {
576
577 CTR1(KTR_VFS, "cache_purge(%p)", vp);
578 CACHE_LOCK();
579 while (!LIST_EMPTY(&vp->v_cache_src))
580 cache_zap(LIST_FIRST(&vp->v_cache_src));
581 while (!TAILQ_EMPTY(&vp->v_cache_dst))
582 cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
583 vp->v_dd = NULL;
584 CACHE_UNLOCK();
585 }
586
587 /*
588 * Flush all entries referencing a particular filesystem.
589 */
590 void
591 cache_purgevfs(mp)
592 struct mount *mp;
593 {
594 struct nchashhead *ncpp;
595 struct namecache *ncp, *nnp;
596
597 /* Scan hash tables for applicable entries */
598 CACHE_LOCK();
599 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
600 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) {
601 if (ncp->nc_dvp->v_mount == mp)
602 cache_zap(ncp);
603 }
604 }
605 CACHE_UNLOCK();
606 }
607
608 /*
609 * Perform canonical checks and cache lookup and pass on to filesystem
610 * through the vop_cachedlookup only if needed.
611 */
612
613 int
614 vfs_cache_lookup(ap)
615 struct vop_lookup_args /* {
616 struct vnode *a_dvp;
617 struct vnode **a_vpp;
618 struct componentname *a_cnp;
619 } */ *ap;
620 {
621 struct vnode *dvp;
622 int error;
623 struct vnode **vpp = ap->a_vpp;
624 struct componentname *cnp = ap->a_cnp;
625 struct ucred *cred = cnp->cn_cred;
626 int flags = cnp->cn_flags;
627 struct thread *td = cnp->cn_thread;
628
629 *vpp = NULL;
630 dvp = ap->a_dvp;
631
632 if (dvp->v_type != VDIR)
633 return (ENOTDIR);
634
635 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
636 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
637 return (EROFS);
638
639 error = VOP_ACCESS(dvp, VEXEC, cred, td);
640 if (error)
641 return (error);
642
643 error = cache_lookup(dvp, vpp, cnp);
644 if (error == 0)
645 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
646 if (error == ENOENT)
647 return (error);
648 return (0);
649 }
650
651
652 #ifndef _SYS_SYSPROTO_H_
653 struct __getcwd_args {
654 u_char *buf;
655 u_int buflen;
656 };
657 #endif
658
659 /*
660 * XXX All of these sysctls would probably be more productive dead.
661 */
662 static int disablecwd;
663 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
664 "Disable the getcwd syscall");
665
666 /* Implementation of the getcwd syscall. */
667 int
668 __getcwd(td, uap)
669 struct thread *td;
670 struct __getcwd_args *uap;
671 {
672
673 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
674 }
675
676 int
677 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
678 {
679 char *bp, *tmpbuf;
680 struct filedesc *fdp;
681 int error;
682
683 if (disablecwd)
684 return (ENODEV);
685 if (buflen < 2)
686 return (EINVAL);
687 if (buflen > MAXPATHLEN)
688 buflen = MAXPATHLEN;
689
690 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
691 fdp = td->td_proc->p_fd;
692 mtx_lock(&Giant);
693 FILEDESC_SLOCK(fdp);
694 error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf,
695 &bp, buflen);
696 FILEDESC_SUNLOCK(fdp);
697 mtx_unlock(&Giant);
698
699 if (!error) {
700 if (bufseg == UIO_SYSSPACE)
701 bcopy(bp, buf, strlen(bp) + 1);
702 else
703 error = copyout(bp, buf, strlen(bp) + 1);
704 }
705 free(tmpbuf, M_TEMP);
706 return (error);
707 }
708
709 /*
710 * Thus begins the fullpath magic.
711 */
712
713 #undef STATNODE
714 #define STATNODE(name) \
715 static u_int name; \
716 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
717
718 static int disablefullpath;
719 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
720 "Disable the vn_fullpath function");
721
722 /* These count for kern___getcwd(), too. */
723 STATNODE(numfullpathcalls);
724 STATNODE(numfullpathfail1);
725 STATNODE(numfullpathfail2);
726 STATNODE(numfullpathfail4);
727 STATNODE(numfullpathfound);
728
729 /*
730 * Retrieve the full filesystem path that correspond to a vnode from the name
731 * cache (if available)
732 */
733 int
734 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
735 {
736 char *buf;
737 struct filedesc *fdp;
738 int error;
739
740 if (disablefullpath)
741 return (ENODEV);
742 if (vn == NULL)
743 return (EINVAL);
744
745 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
746 fdp = td->td_proc->p_fd;
747 FILEDESC_SLOCK(fdp);
748 error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN);
749 FILEDESC_SUNLOCK(fdp);
750
751 if (!error)
752 *freebuf = buf;
753 else
754 free(buf, M_TEMP);
755 return (error);
756 }
757
758 /*
759 * The magic behind kern___getcwd() and vn_fullpath().
760 */
761 static int
762 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
763 char *buf, char **retbuf, u_int buflen)
764 {
765 char *bp;
766 int error, i, slash_prefixed;
767 struct namecache *ncp;
768
769 bp = buf + buflen - 1;
770 *bp = '\0';
771 error = 0;
772 slash_prefixed = 0;
773
774 CACHE_LOCK();
775 numfullpathcalls++;
776 if (vp->v_type != VDIR) {
777 ncp = TAILQ_FIRST(&vp->v_cache_dst);
778 if (!ncp) {
779 numfullpathfail2++;
780 CACHE_UNLOCK();
781 return (ENOENT);
782 }
783 for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--)
784 *--bp = ncp->nc_name[i];
785 if (bp == buf) {
786 numfullpathfail4++;
787 CACHE_UNLOCK();
788 return (ENOMEM);
789 }
790 *--bp = '/';
791 slash_prefixed = 1;
792 vp = ncp->nc_dvp;
793 }
794 while (vp != rdir && vp != rootvnode) {
795 if (vp->v_vflag & VV_ROOT) {
796 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */
797 error = EBADF;
798 break;
799 }
800 vp = vp->v_mount->mnt_vnodecovered;
801 continue;
802 }
803 if (vp->v_dd == NULL) {
804 numfullpathfail1++;
805 error = ENOTDIR;
806 break;
807 }
808 ncp = TAILQ_FIRST(&vp->v_cache_dst);
809 if (!ncp) {
810 numfullpathfail2++;
811 error = ENOENT;
812 break;
813 }
814 MPASS(ncp->nc_dvp == vp->v_dd);
815 for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--)
816 *--bp = ncp->nc_name[i];
817 if (bp == buf) {
818 numfullpathfail4++;
819 error = ENOMEM;
820 break;
821 }
822 *--bp = '/';
823 slash_prefixed = 1;
824 vp = ncp->nc_dvp;
825 }
826 if (error) {
827 CACHE_UNLOCK();
828 return (error);
829 }
830 if (!slash_prefixed) {
831 if (bp == buf) {
832 numfullpathfail4++;
833 CACHE_UNLOCK();
834 return (ENOMEM);
835 } else {
836 *--bp = '/';
837 }
838 }
839 numfullpathfound++;
840 CACHE_UNLOCK();
841
842 *retbuf = bp;
843 return (0);
844 }
Cache object: ca59e26f3ed3840184fb35c5a60bcab2
|