FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_cache.c
1 /*-
2 * Copyright (c) 1989, 1993, 1995
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Poul-Henning Kamp of the FreeBSD Project.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD: releng/6.0/sys/kern/vfs_cache.c 147450 2005-06-17 01:05:13Z jeff $");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/sysctl.h>
44 #include <sys/mount.h>
45 #include <sys/vnode.h>
46 #include <sys/namei.h>
47 #include <sys/malloc.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/sysproto.h>
50 #include <sys/proc.h>
51 #include <sys/filedesc.h>
52 #include <sys/fnv_hash.h>
53
54 #include <vm/uma.h>
55
56 /*
57 * This structure describes the elements in the cache of recent
58 * names looked up by namei.
59 */
60
61 struct namecache {
62 LIST_ENTRY(namecache) nc_hash; /* hash chain */
63 LIST_ENTRY(namecache) nc_src; /* source vnode list */
64 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
65 struct vnode *nc_dvp; /* vnode of parent of name */
66 struct vnode *nc_vp; /* vnode the name refers to */
67 u_char nc_flag; /* flag bits */
68 u_char nc_nlen; /* length of name */
69 char nc_name[0]; /* segment name */
70 };
71
72 /*
73 * Name caching works as follows:
74 *
75 * Names found by directory scans are retained in a cache
76 * for future reference. It is managed LRU, so frequently
77 * used names will hang around. Cache is indexed by hash value
78 * obtained from (vp, name) where vp refers to the directory
79 * containing name.
80 *
81 * If it is a "negative" entry, (i.e. for a name that is known NOT to
82 * exist) the vnode pointer will be NULL.
83 *
84 * Upon reaching the last segment of a path, if the reference
85 * is for DELETE, or NOCACHE is set (rewrite), and the
86 * name is located in the cache, it will be dropped.
87 */
88
89 /*
90 * Structures associated with name cacheing.
91 */
92 #define NCHHASH(hash) \
93 (&nchashtbl[(hash) & nchash])
94 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
95 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
96 static u_long nchash; /* size of hash table */
97 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
98 static u_long ncnegfactor = 16; /* ratio of negative entries */
99 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
100 static u_long numneg; /* number of cache entries allocated */
101 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
102 static u_long numcache; /* number of cache entries allocated */
103 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
104 static u_long numcachehv; /* number of cache entries with vnodes held */
105 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
106 #if 0
107 static u_long numcachepl; /* number of cache purge for leaf entries */
108 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
109 #endif
110 struct nchstats nchstats; /* cache effectiveness statistics */
111
112 static struct mtx cache_lock;
113 MTX_SYSINIT(vfscache, &cache_lock, "Name Cache", MTX_DEF);
114
115 #define CACHE_LOCK() mtx_lock(&cache_lock)
116 #define CACHE_UNLOCK() mtx_unlock(&cache_lock)
117
118 /*
119 * UMA zones for the VFS cache.
120 *
121 * The small cache is used for entries with short names, which are the
122 * most common. The large cache is used for entries which are too big to
123 * fit in the small cache.
124 */
125 static uma_zone_t cache_zone_small;
126 static uma_zone_t cache_zone_large;
127
128 #define CACHE_PATH_CUTOFF 32
129 #define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF)
130 #define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX)
131
132 #define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
133 cache_zone_small : cache_zone_large, M_WAITOK)
134 #define cache_free(ncp) do { \
135 if (ncp != NULL) \
136 uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
137 cache_zone_small : cache_zone_large, (ncp)); \
138 } while (0)
139
140 static int doingcache = 1; /* 1 => enable the cache */
141 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
142
143 /* Export size information to userland */
144 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
145 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
146
147 /*
148 * The new name cache statistics
149 */
150 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
151 #define STATNODE(mode, name, var) \
152 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
153 STATNODE(CTLFLAG_RD, numneg, &numneg);
154 STATNODE(CTLFLAG_RD, numcache, &numcache);
155 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
156 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
157 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
158 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
159 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
160 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
161 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
162 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
163 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
164 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
165
166 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
167 sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
168
169
170
171 static void cache_zap(struct namecache *ncp);
172 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
173 char *buf, char **retbuf, u_int buflen);
174
175 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
176
177 /*
178 * Flags in namecache.nc_flag
179 */
180 #define NCF_WHITE 1
181
182 /*
183 * Grab an atomic snapshot of the name cache hash chain lengths
184 */
185 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
186
187 static int
188 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
189 {
190 int error;
191 struct nchashhead *ncpp;
192 struct namecache *ncp;
193 int n_nchash;
194 int count;
195
196 n_nchash = nchash + 1; /* nchash is max index, not count */
197 if (!req->oldptr)
198 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
199
200 /* Scan hash tables for applicable entries */
201 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 count = 0;
203 LIST_FOREACH(ncp, ncpp, nc_hash) {
204 count++;
205 }
206 error = SYSCTL_OUT(req, &count, sizeof(count));
207 if (error)
208 return (error);
209 }
210 return (0);
211 }
212 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
213 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
214
215 static int
216 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
217 {
218 int error;
219 struct nchashhead *ncpp;
220 struct namecache *ncp;
221 int n_nchash;
222 int count, maxlength, used, pct;
223
224 if (!req->oldptr)
225 return SYSCTL_OUT(req, 0, 4 * sizeof(int));
226
227 n_nchash = nchash + 1; /* nchash is max index, not count */
228 used = 0;
229 maxlength = 0;
230
231 /* Scan hash tables for applicable entries */
232 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
233 count = 0;
234 LIST_FOREACH(ncp, ncpp, nc_hash) {
235 count++;
236 }
237 if (count)
238 used++;
239 if (maxlength < count)
240 maxlength = count;
241 }
242 n_nchash = nchash + 1;
243 pct = (used * 100 * 100) / n_nchash;
244 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
245 if (error)
246 return (error);
247 error = SYSCTL_OUT(req, &used, sizeof(used));
248 if (error)
249 return (error);
250 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
251 if (error)
252 return (error);
253 error = SYSCTL_OUT(req, &pct, sizeof(pct));
254 if (error)
255 return (error);
256 return (0);
257 }
258 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
259 0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
260
261 /*
262 * cache_zap():
263 *
264 * Removes a namecache entry from cache, whether it contains an actual
265 * pointer to a vnode or if it is just a negative cache entry.
266 */
267 static void
268 cache_zap(ncp)
269 struct namecache *ncp;
270 {
271 struct vnode *vp;
272
273 mtx_assert(&cache_lock, MA_OWNED);
274 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp);
275 vp = NULL;
276 LIST_REMOVE(ncp, nc_hash);
277 LIST_REMOVE(ncp, nc_src);
278 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
279 vp = ncp->nc_dvp;
280 numcachehv--;
281 }
282 if (ncp->nc_vp) {
283 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
284 ncp->nc_vp->v_dd = NULL;
285 } else {
286 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
287 numneg--;
288 }
289 numcache--;
290 cache_free(ncp);
291 if (vp)
292 vdrop(vp);
293 }
294
295 /*
296 * cache_leaf_test()
297 *
298 * Test whether this (directory) vnode's namei cache entry contains
299 * subdirectories or not. Used to determine whether the directory is
300 * a leaf in the namei cache or not. Note: the directory may still
301 * contain files in the namei cache.
302 *
303 * Returns 0 if the directory is a leaf, -1 if it isn't.
304 */
305 int
306 cache_leaf_test(struct vnode *vp)
307 {
308 struct namecache *ncpc;
309 int leaf;
310
311 leaf = 0;
312 CACHE_LOCK();
313 for (ncpc = LIST_FIRST(&vp->v_cache_src);
314 ncpc != NULL;
315 ncpc = LIST_NEXT(ncpc, nc_src)
316 ) {
317 if (ncpc->nc_vp != NULL && ncpc->nc_vp->v_type == VDIR) {
318 leaf = -1;
319 break;
320 }
321 }
322 CACHE_UNLOCK();
323 return (leaf);
324 }
325
326 /*
327 * Lookup an entry in the cache
328 *
329 * Lookup is called with dvp pointing to the directory to search,
330 * cnp pointing to the name of the entry being sought. If the lookup
331 * succeeds, the vnode is returned in *vpp, and a status of -1 is
332 * returned. If the lookup determines that the name does not exist
333 * (negative cacheing), a status of ENOENT is returned. If the lookup
334 * fails, a status of zero is returned.
335 *
336 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is
337 * unlocked. If we're looking up . an extra ref is taken, but the lock is
338 * not recursively acquired.
339 */
340
341 int
342 cache_lookup(dvp, vpp, cnp)
343 struct vnode *dvp;
344 struct vnode **vpp;
345 struct componentname *cnp;
346 {
347 struct namecache *ncp;
348 u_int32_t hash;
349 int error;
350
351 if (!doingcache) {
352 cnp->cn_flags &= ~MAKEENTRY;
353 return (0);
354 }
355 retry:
356 CACHE_LOCK();
357 numcalls++;
358
359 if (cnp->cn_nameptr[0] == '.') {
360 if (cnp->cn_namelen == 1) {
361 *vpp = dvp;
362 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
363 dvp, cnp->cn_nameptr);
364 dothits++;
365 goto success;
366 }
367 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
368 dotdothits++;
369 if (dvp->v_dd == NULL ||
370 (cnp->cn_flags & MAKEENTRY) == 0) {
371 CACHE_UNLOCK();
372 return (0);
373 }
374 *vpp = dvp->v_dd;
375 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
376 dvp, cnp->cn_nameptr, *vpp);
377 goto success;
378 }
379 }
380
381 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
382 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
383 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
384 numchecks++;
385 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
386 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
387 break;
388 }
389
390 /* We failed to find an entry */
391 if (ncp == 0) {
392 if ((cnp->cn_flags & MAKEENTRY) == 0) {
393 nummisszap++;
394 } else {
395 nummiss++;
396 }
397 nchstats.ncs_miss++;
398 CACHE_UNLOCK();
399 return (0);
400 }
401
402 /* We don't want to have an entry, so dump it */
403 if ((cnp->cn_flags & MAKEENTRY) == 0) {
404 numposzaps++;
405 nchstats.ncs_badhits++;
406 cache_zap(ncp);
407 CACHE_UNLOCK();
408 return (0);
409 }
410
411 /* We found a "positive" match, return the vnode */
412 if (ncp->nc_vp) {
413 numposhits++;
414 nchstats.ncs_goodhits++;
415 *vpp = ncp->nc_vp;
416 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
417 dvp, cnp->cn_nameptr, *vpp, ncp);
418 goto success;
419 }
420
421 /* We found a negative match, and want to create it, so purge */
422 if (cnp->cn_nameiop == CREATE) {
423 numnegzaps++;
424 nchstats.ncs_badhits++;
425 cache_zap(ncp);
426 CACHE_UNLOCK();
427 return (0);
428 }
429
430 numneghits++;
431 /*
432 * We found a "negative" match, so we shift it to the end of
433 * the "negative" cache entries queue to satisfy LRU. Also,
434 * check to see if the entry is a whiteout; indicate this to
435 * the componentname, if so.
436 */
437 TAILQ_REMOVE(&ncneg, ncp, nc_dst);
438 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
439 nchstats.ncs_neghits++;
440 if (ncp->nc_flag & NCF_WHITE)
441 cnp->cn_flags |= ISWHITEOUT;
442 CACHE_UNLOCK();
443 return (ENOENT);
444
445 success:
446 /*
447 * On success we return a locked and ref'd vnode as per the lookup
448 * protocol.
449 */
450 if (dvp == *vpp) { /* lookup on "." */
451 VREF(*vpp);
452 CACHE_UNLOCK();
453 return (-1);
454 }
455 if (cnp->cn_flags & ISDOTDOT)
456 VOP_UNLOCK(dvp, 0, cnp->cn_thread);
457 VI_LOCK(*vpp);
458 CACHE_UNLOCK();
459 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread);
460 if (cnp->cn_flags & ISDOTDOT)
461 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
462 if (error) {
463 *vpp = NULL;
464 goto retry;
465 }
466 return (-1);
467 }
468
469 /*
470 * Add an entry to the cache.
471 */
472 void
473 cache_enter(dvp, vp, cnp)
474 struct vnode *dvp;
475 struct vnode *vp;
476 struct componentname *cnp;
477 {
478 struct namecache *ncp;
479 struct nchashhead *ncpp;
480 u_int32_t hash;
481 int hold;
482 int zap;
483 int len;
484
485 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
486 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp,
487 ("cahe_enter: Adding a doomed vnode"));
488
489 if (!doingcache)
490 return;
491
492 if (cnp->cn_nameptr[0] == '.') {
493 if (cnp->cn_namelen == 1) {
494 return;
495 }
496 /*
497 * For dotdot lookups only cache the v_dd pointer if the
498 * directory has a link back to its parent via v_cache_dst.
499 * Without this an unlinked directory would keep a soft
500 * reference to its parent which could not be NULLd at
501 * cache_purge() time.
502 */
503 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
504 CACHE_LOCK();
505 if (!TAILQ_EMPTY(&dvp->v_cache_dst))
506 dvp->v_dd = vp;
507 CACHE_UNLOCK();
508 return;
509 }
510 }
511
512 hold = 0;
513 zap = 0;
514 ncp = cache_alloc(cnp->cn_namelen);
515 CACHE_LOCK();
516 numcache++;
517 if (!vp) {
518 numneg++;
519 ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
520 } else if (vp->v_type == VDIR) {
521 vp->v_dd = dvp;
522 } else {
523 vp->v_dd = NULL;
524 }
525
526 /*
527 * Set the rest of the namecache entry elements, calculate it's
528 * hash key and insert it into the appropriate chain within
529 * the cache entries table.
530 */
531 ncp->nc_vp = vp;
532 ncp->nc_dvp = dvp;
533 len = ncp->nc_nlen = cnp->cn_namelen;
534 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
535 bcopy(cnp->cn_nameptr, ncp->nc_name, len);
536 hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
537 ncpp = NCHHASH(hash);
538 LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
539 if (LIST_EMPTY(&dvp->v_cache_src)) {
540 hold = 1;
541 numcachehv++;
542 }
543 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
544 /*
545 * If the entry is "negative", we place it into the
546 * "negative" cache queue, otherwise, we place it into the
547 * destination vnode's cache entries queue.
548 */
549 if (vp) {
550 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
551 } else {
552 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
553 }
554 if (numneg * ncnegfactor > numcache) {
555 ncp = TAILQ_FIRST(&ncneg);
556 zap = 1;
557 }
558 if (hold)
559 vhold(dvp);
560 if (zap)
561 cache_zap(ncp);
562 CACHE_UNLOCK();
563 }
564
565 /*
566 * Name cache initialization, from vfs_init() when we are booting
567 */
568 static void
569 nchinit(void *dummy __unused)
570 {
571
572 TAILQ_INIT(&ncneg);
573
574 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
575 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
576 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
577 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
578
579 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
580 }
581 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
582
583
584 /*
585 * Invalidate all entries to a particular vnode.
586 */
587 void
588 cache_purge(vp)
589 struct vnode *vp;
590 {
591
592 CTR1(KTR_VFS, "cache_purge(%p)", vp);
593 CACHE_LOCK();
594 while (!LIST_EMPTY(&vp->v_cache_src))
595 cache_zap(LIST_FIRST(&vp->v_cache_src));
596 while (!TAILQ_EMPTY(&vp->v_cache_dst))
597 cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
598 vp->v_dd = NULL;
599 CACHE_UNLOCK();
600 }
601
602 /*
603 * Flush all entries referencing a particular filesystem.
604 *
605 * Since we need to check it anyway, we will flush all the invalid
606 * entries at the same time.
607 */
608 void
609 cache_purgevfs(mp)
610 struct mount *mp;
611 {
612 struct nchashhead *ncpp;
613 struct namecache *ncp, *nnp;
614 struct nchashhead mplist;
615
616 LIST_INIT(&mplist);
617 ncp = NULL;
618
619 /* Scan hash tables for applicable entries */
620 CACHE_LOCK();
621 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
622 for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
623 nnp = LIST_NEXT(ncp, nc_hash);
624 if (ncp->nc_dvp->v_mount == mp) {
625 LIST_REMOVE(ncp, nc_hash);
626 LIST_INSERT_HEAD(&mplist, ncp, nc_hash);
627 }
628 }
629 }
630 while (!LIST_EMPTY(&mplist))
631 cache_zap(LIST_FIRST(&mplist));
632 CACHE_UNLOCK();
633 }
634
635 /*
636 * Perform canonical checks and cache lookup and pass on to filesystem
637 * through the vop_cachedlookup only if needed.
638 */
639
640 int
641 vfs_cache_lookup(ap)
642 struct vop_lookup_args /* {
643 struct vnode *a_dvp;
644 struct vnode **a_vpp;
645 struct componentname *a_cnp;
646 } */ *ap;
647 {
648 struct vnode *dvp;
649 int error;
650 struct vnode **vpp = ap->a_vpp;
651 struct componentname *cnp = ap->a_cnp;
652 struct ucred *cred = cnp->cn_cred;
653 int flags = cnp->cn_flags;
654 struct thread *td = cnp->cn_thread;
655
656 *vpp = NULL;
657 dvp = ap->a_dvp;
658
659 if (dvp->v_type != VDIR)
660 return (ENOTDIR);
661
662 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
663 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
664 return (EROFS);
665
666 error = VOP_ACCESS(dvp, VEXEC, cred, td);
667 if (error)
668 return (error);
669
670 error = cache_lookup(dvp, vpp, cnp);
671 if (error == 0)
672 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
673 if (error == ENOENT)
674 return (error);
675 return (0);
676 }
677
678
679 #ifndef _SYS_SYSPROTO_H_
680 struct __getcwd_args {
681 u_char *buf;
682 u_int buflen;
683 };
684 #endif
685
686 /*
687 * XXX All of these sysctls would probably be more productive dead.
688 */
689 static int disablecwd;
690 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
691 "Disable the getcwd syscall");
692
693 /* Implementation of the getcwd syscall */
694 int
695 __getcwd(td, uap)
696 struct thread *td;
697 struct __getcwd_args *uap;
698 {
699
700 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen));
701 }
702
703 int
704 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen)
705 {
706 char *bp, *tmpbuf;
707 struct filedesc *fdp;
708 int error;
709
710 if (disablecwd)
711 return (ENODEV);
712 if (buflen < 2)
713 return (EINVAL);
714 if (buflen > MAXPATHLEN)
715 buflen = MAXPATHLEN;
716
717 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK);
718 fdp = td->td_proc->p_fd;
719 mtx_lock(&Giant);
720 FILEDESC_LOCK(fdp);
721 error = vn_fullpath1(td, fdp->fd_cdir, fdp->fd_rdir, tmpbuf,
722 &bp, buflen);
723 FILEDESC_UNLOCK(fdp);
724 mtx_unlock(&Giant);
725
726 if (!error) {
727 if (bufseg == UIO_SYSSPACE)
728 bcopy(bp, buf, strlen(bp) + 1);
729 else
730 error = copyout(bp, buf, strlen(bp) + 1);
731 }
732 free(tmpbuf, M_TEMP);
733 return (error);
734 }
735
736 /*
737 * Thus begins the fullpath magic.
738 */
739
740 #undef STATNODE
741 #define STATNODE(name) \
742 static u_int name; \
743 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
744
745 static int disablefullpath;
746 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
747 "Disable the vn_fullpath function");
748
749 /* These count for kern___getcwd(), too. */
750 STATNODE(numfullpathcalls);
751 STATNODE(numfullpathfail1);
752 STATNODE(numfullpathfail2);
753 STATNODE(numfullpathfail4);
754 STATNODE(numfullpathfound);
755
756 /*
757 * Retrieve the full filesystem path that correspond to a vnode from the name
758 * cache (if available)
759 */
760 int
761 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
762 {
763 char *buf;
764 struct filedesc *fdp;
765 int error;
766
767 if (disablefullpath)
768 return (ENODEV);
769 if (vn == NULL)
770 return (EINVAL);
771
772 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
773 fdp = td->td_proc->p_fd;
774 mtx_lock(&Giant);
775 FILEDESC_LOCK(fdp);
776 error = vn_fullpath1(td, vn, fdp->fd_rdir, buf, retbuf, MAXPATHLEN);
777 FILEDESC_UNLOCK(fdp);
778 mtx_unlock(&Giant);
779
780 if (!error)
781 *freebuf = buf;
782 else
783 free(buf, M_TEMP);
784 return (error);
785 }
786
787 /*
788 * The magic behind kern___getcwd() and vn_fullpath().
789 */
790 static int
791 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
792 char *buf, char **retbuf, u_int buflen)
793 {
794 char *bp;
795 int error, i, slash_prefixed;
796 struct namecache *ncp;
797
798 mtx_assert(&Giant, MA_OWNED);
799
800 bp = buf + buflen - 1;
801 *bp = '\0';
802 error = 0;
803 slash_prefixed = 0;
804
805 CACHE_LOCK();
806 numfullpathcalls++;
807 if (vp->v_type != VDIR) {
808 ncp = TAILQ_FIRST(&vp->v_cache_dst);
809 if (!ncp) {
810 numfullpathfail2++;
811 CACHE_UNLOCK();
812 return (ENOENT);
813 }
814 for (i = ncp->nc_nlen - 1; i >= 0 && bp > buf; i--)
815 *--bp = ncp->nc_name[i];
816 if (bp == buf) {
817 numfullpathfail4++;
818 CACHE_UNLOCK();
819 return (ENOMEM);
820 }
821 *--bp = '/';
822 slash_prefixed = 1;
823 vp = ncp->nc_dvp;
824 }
825 while (vp != rdir && vp != rootvnode) {
826 if (vp->v_vflag & VV_ROOT) {
827 if (vp->v_mount == NULL) { /* forced unmount */
828 error = EBADF;
829 break;
830 }
831 vp = vp->v_mount->mnt_vnodecovered;
832 continue;
833 }
834 if (vp->v_dd == NULL) {
835 numfullpathfail1++;
836 error = ENOTDIR;
837 break;
838 }
839 ncp = TAILQ_FIRST(&vp->v_cache_dst);
840 if (!ncp) {
841 numfullpathfail2++;
842 error = ENOENT;
843 break;
844 }
845 MPASS(ncp->nc_dvp == vp->v_dd);
846 for (i = ncp->nc_nlen - 1; i >= 0 && bp != buf; i--)
847 *--bp = ncp->nc_name[i];
848 if (bp == buf) {
849 numfullpathfail4++;
850 error = ENOMEM;
851 break;
852 }
853 *--bp = '/';
854 slash_prefixed = 1;
855 vp = ncp->nc_dvp;
856 }
857 if (error) {
858 CACHE_UNLOCK();
859 return (error);
860 }
861 if (!slash_prefixed) {
862 if (bp == buf) {
863 numfullpathfail4++;
864 CACHE_UNLOCK();
865 return (ENOMEM);
866 } else {
867 *--bp = '/';
868 }
869 }
870 numfullpathfound++;
871 CACHE_UNLOCK();
872
873 *retbuf = bp;
874 return (0);
875 }
Cache object: 32cef66b8003c56bc066af1c31bb6bb0
|