FreeBSD/Linux Kernel Cross Reference
sys/nfs/nfs_node.c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Rick Macklem at The University of Guelph.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
37 * $FreeBSD$
38 */
39
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/mount.h>
45 #include <sys/namei.h>
46 #include <sys/vnode.h>
47 #include <sys/malloc.h>
48
49 #include <vm/vm_zone.h>
50
51 #include <nfs/rpcv2.h>
52 #include <nfs/nfsproto.h>
53 #include <nfs/nfs.h>
54 #include <nfs/nfsnode.h>
55 #include <nfs/nfsmount.h>
56
57 static vm_zone_t nfsnode_zone;
58 static LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl;
59 static u_long nfsnodehash;
60
61 #define TRUE 1
62 #define FALSE 0
63
64 /*
65 * Initialize hash links for nfsnodes
66 * and build nfsnode free list.
67 */
68 void
69 nfs_nhinit()
70 {
71 nfsnode_zone = zinit("NFSNODE", sizeof(struct nfsnode), 0, 0, 1);
72 nfsnodehashtbl = hashinit(desiredvnodes, M_NFSHASH, &nfsnodehash);
73 }
74
75 /*
76 * Compute an entry in the NFS hash table structure
77 */
78 u_long
79 nfs_hash(fhp, fhsize)
80 register nfsfh_t *fhp;
81 int fhsize;
82 {
83 register u_char *fhpp;
84 register u_long fhsum;
85 register int i;
86
87 fhpp = &fhp->fh_bytes[0];
88 fhsum = 0;
89 for (i = 0; i < fhsize; i++)
90 fhsum += *fhpp++;
91 return (fhsum);
92 }
93
94 /*
95 * Look up a vnode/nfsnode by file handle.
96 * Callers must check for mount points!!
97 * In all cases, a pointer to a
98 * nfsnode structure is returned.
99 */
100 static int nfs_node_hash_lock;
101
102 int
103 nfs_nget(mntp, fhp, fhsize, npp)
104 struct mount *mntp;
105 register nfsfh_t *fhp;
106 int fhsize;
107 struct nfsnode **npp;
108 {
109 struct proc *p = curproc; /* XXX */
110 struct nfsnode *np, *np2;
111 struct nfsnodehashhead *nhpp;
112 register struct vnode *vp;
113 struct vnode *nvp;
114 int error;
115
116 retry:
117 nhpp = NFSNOHASH(nfs_hash(fhp, fhsize));
118 loop:
119 for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
120 if (mntp != NFSTOV(np)->v_mount || np->n_fhsize != fhsize ||
121 bcmp((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize))
122 continue;
123 vp = NFSTOV(np);
124 if (vget(vp, LK_EXCLUSIVE, p))
125 goto loop;
126 *npp = np;
127 return(0);
128 }
129 /*
130 * Obtain a lock to prevent a race condition if the getnewvnode()
131 * or MALLOC() below happens to block.
132 */
133 if (nfs_node_hash_lock) {
134 while (nfs_node_hash_lock) {
135 nfs_node_hash_lock = -1;
136 tsleep(&nfs_node_hash_lock, PVM, "nfsngt", 0);
137 }
138 goto loop;
139 }
140 nfs_node_hash_lock = 1;
141
142 /*
143 * Allocate before getnewvnode since doing so afterward
144 * might cause a bogus v_data pointer to get dereferenced
145 * elsewhere if zalloc should block.
146 */
147 np = zalloc(nfsnode_zone);
148
149 error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &nvp);
150 if (error) {
151 if (nfs_node_hash_lock < 0)
152 wakeup(&nfs_node_hash_lock);
153 nfs_node_hash_lock = 0;
154 *npp = 0;
155 zfree(nfsnode_zone, np);
156 return (error);
157 }
158 vp = nvp;
159 bzero((caddr_t)np, sizeof *np);
160 vp->v_data = np;
161 np->n_vnode = vp;
162 /*
163 * Insert the nfsnode in the hash queue for its new file handle
164 */
165 for (np2 = nhpp->lh_first; np2 != 0; np2 = np2->n_hash.le_next) {
166 if (mntp != NFSTOV(np2)->v_mount || np2->n_fhsize != fhsize ||
167 bcmp((caddr_t)fhp, (caddr_t)np2->n_fhp, fhsize))
168 continue;
169 vrele(vp);
170 if (nfs_node_hash_lock < 0)
171 wakeup(&nfs_node_hash_lock);
172 nfs_node_hash_lock = 0;
173 zfree(nfsnode_zone, np);
174 goto retry;
175 }
176 LIST_INSERT_HEAD(nhpp, np, n_hash);
177 if (fhsize > NFS_SMALLFH) {
178 MALLOC(np->n_fhp, nfsfh_t *, fhsize, M_NFSBIGFH, M_WAITOK);
179 } else
180 np->n_fhp = &np->n_fh;
181 bcopy((caddr_t)fhp, (caddr_t)np->n_fhp, fhsize);
182 np->n_fhsize = fhsize;
183 *npp = np;
184
185 if (nfs_node_hash_lock < 0)
186 wakeup(&nfs_node_hash_lock);
187 nfs_node_hash_lock = 0;
188
189 /*
190 * Lock the new nfsnode.
191 */
192 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
193
194 return (0);
195 }
196
197 int
198 nfs_inactive(ap)
199 struct vop_inactive_args /* {
200 struct vnode *a_vp;
201 struct proc *a_p;
202 } */ *ap;
203 {
204 register struct nfsnode *np;
205 register struct sillyrename *sp;
206 struct proc *p = curproc; /* XXX */
207
208 np = VTONFS(ap->a_vp);
209 if (prtactive && ap->a_vp->v_usecount != 0)
210 vprint("nfs_inactive: pushing active", ap->a_vp);
211 if (ap->a_vp->v_type != VDIR) {
212 sp = np->n_sillyrename;
213 np->n_sillyrename = (struct sillyrename *)0;
214 } else
215 sp = (struct sillyrename *)0;
216 if (sp) {
217 /*
218 * We need a reference to keep the vnode from being
219 * recycled by getnewvnode while we do the I/O
220 * associated with discarding the buffers unless we
221 * are being forcibly unmounted in which case we already
222 * have our own reference.
223 */
224 if (ap->a_vp->v_usecount > 0)
225 (void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
226 else if (vget(ap->a_vp, 0, p))
227 panic("nfs_inactive: lost vnode");
228 else {
229 (void) nfs_vinvalbuf(ap->a_vp, 0, sp->s_cred, p, 1);
230 vrele(ap->a_vp);
231 }
232 /*
233 * Remove the silly file that was rename'd earlier
234 */
235 nfs_removeit(sp);
236 crfree(sp->s_cred);
237 vrele(sp->s_dvp);
238 FREE((caddr_t)sp, M_NFSREQ);
239 }
240 np->n_flag &= (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NQNFSEVICTED |
241 NQNFSNONCACHE | NQNFSWRITE);
242 VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
243 return (0);
244 }
245
246 /*
247 * Reclaim an nfsnode so that it can be used for other purposes.
248 */
249 int
250 nfs_reclaim(ap)
251 struct vop_reclaim_args /* {
252 struct vnode *a_vp;
253 } */ *ap;
254 {
255 register struct vnode *vp = ap->a_vp;
256 register struct nfsnode *np = VTONFS(vp);
257 register struct nfsmount *nmp = VFSTONFS(vp->v_mount);
258 register struct nfsdmap *dp, *dp2;
259
260 if (prtactive && vp->v_usecount != 0)
261 vprint("nfs_reclaim: pushing active", vp);
262
263 if (np->n_hash.le_prev != NULL)
264 LIST_REMOVE(np, n_hash);
265
266 /*
267 * For nqnfs, take it off the timer queue as required.
268 */
269 if ((nmp->nm_flag & NFSMNT_NQNFS) && np->n_timer.cqe_next != 0) {
270 CIRCLEQ_REMOVE(&nmp->nm_timerhead, np, n_timer);
271 }
272
273 /*
274 * Free up any directory cookie structures and
275 * large file handle structures that might be associated with
276 * this nfs node.
277 */
278 if (vp->v_type == VDIR) {
279 dp = np->n_cookies.lh_first;
280 while (dp) {
281 dp2 = dp;
282 dp = dp->ndm_list.le_next;
283 FREE((caddr_t)dp2, M_NFSDIROFF);
284 }
285 }
286 if (np->n_fhsize > NFS_SMALLFH) {
287 FREE((caddr_t)np->n_fhp, M_NFSBIGFH);
288 }
289
290 cache_purge(vp);
291 zfree(nfsnode_zone, vp->v_data);
292 vp->v_data = (void *)0;
293 return (0);
294 }
295
296 #if 0
297 /*
298 * Lock an nfsnode
299 */
300 int
301 nfs_lock(ap)
302 struct vop_lock_args /* {
303 struct vnode *a_vp;
304 } */ *ap;
305 {
306 register struct vnode *vp = ap->a_vp;
307
308 /*
309 * Ugh, another place where interruptible mounts will get hung.
310 * If you make this sleep interruptible, then you have to fix all
311 * the VOP_LOCK() calls to expect interruptibility.
312 */
313 while (vp->v_flag & VXLOCK) {
314 vp->v_flag |= VXWANT;
315 (void) tsleep((caddr_t)vp, PINOD, "nfslck", 0);
316 }
317 if (vp->v_tag == VT_NON)
318 return (ENOENT);
319
320 #if 0
321 /*
322 * Only lock regular files. If a server crashed while we were
323 * holding a directory lock, we could easily end up sleeping
324 * until the server rebooted while holding a lock on the root.
325 * Locks are only needed for protecting critical sections in
326 * VMIO at the moment.
327 * New vnodes will have type VNON but they should be locked
328 * since they may become VREG. This is checked in loadattrcache
329 * and unwanted locks are released there.
330 */
331 if (vp->v_type == VREG || vp->v_type == VNON) {
332 while (np->n_flag & NLOCKED) {
333 np->n_flag |= NWANTED;
334 (void) tsleep((caddr_t) np, PINOD, "nfslck2", 0);
335 /*
336 * If the vnode has transmuted into a VDIR while we
337 * were asleep, then skip the lock.
338 */
339 if (vp->v_type != VREG && vp->v_type != VNON)
340 return (0);
341 }
342 np->n_flag |= NLOCKED;
343 }
344 #endif
345
346 return (0);
347 }
348
349 /*
350 * Unlock an nfsnode
351 */
352 int
353 nfs_unlock(ap)
354 struct vop_unlock_args /* {
355 struct vnode *a_vp;
356 } */ *ap;
357 {
358 #if 0
359 struct vnode* vp = ap->a_vp;
360 struct nfsnode* np = VTONFS(vp);
361
362 if (vp->v_type == VREG || vp->v_type == VNON) {
363 if (!(np->n_flag & NLOCKED))
364 panic("nfs_unlock: nfsnode not locked");
365 np->n_flag &= ~NLOCKED;
366 if (np->n_flag & NWANTED) {
367 np->n_flag &= ~NWANTED;
368 wakeup((caddr_t) np);
369 }
370 }
371 #endif
372
373 return (0);
374 }
375
376 /*
377 * Check for a locked nfsnode
378 */
379 int
380 nfs_islocked(ap)
381 struct vop_islocked_args /* {
382 struct vnode *a_vp;
383 } */ *ap;
384 {
385 return VTONFS(ap->a_vp)->n_flag & NLOCKED ? 1 : 0;
386 }
387 #endif
388
389 /*
390 * Nfs abort op, called after namei() when a CREATE/DELETE isn't actually
391 * done. Currently nothing to do.
392 */
393 /* ARGSUSED */
394 int
395 nfs_abortop(ap)
396 struct vop_abortop_args /* {
397 struct vnode *a_dvp;
398 struct componentname *a_cnp;
399 } */ *ap;
400 {
401
402 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
403 zfree(namei_zone, ap->a_cnp->cn_pnbuf);
404 return (0);
405 }
Cache object: 48225636077dcffc6b525b343b313ee8
|