FreeBSD/Linux Kernel Cross Reference
sys/nfs/nfs_node.c
1 /* $NetBSD: nfs_node.c,v 1.106.4.1 2009/02/02 03:11:02 snj Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.106.4.1 2009/02/02 03:11:02 snj Exp $");
39
40 #include "opt_nfs.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/proc.h>
45 #include <sys/mount.h>
46 #include <sys/namei.h>
47 #include <sys/vnode.h>
48 #include <sys/kernel.h>
49 #include <sys/pool.h>
50 #include <sys/lock.h>
51 #include <sys/hash.h>
52 #include <sys/kauth.h>
53
54 #include <nfs/rpcv2.h>
55 #include <nfs/nfsproto.h>
56 #include <nfs/nfs.h>
57 #include <nfs/nfsnode.h>
58 #include <nfs/nfsmount.h>
59 #include <nfs/nfs_var.h>
60
61 struct pool nfs_node_pool;
62 struct pool nfs_vattr_pool;
63 static struct workqueue *nfs_sillyworkq;
64
65 extern int prtactive;
66
67 static void nfs_gop_size(struct vnode *, off_t, off_t *, int);
68 static int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
69 static int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
70 static void nfs_sillyworker(struct work *, void *);
71
72 static const struct genfs_ops nfs_genfsops = {
73 .gop_size = nfs_gop_size,
74 .gop_alloc = nfs_gop_alloc,
75 .gop_write = nfs_gop_write,
76 };
77
78 /*
79 * Reinitialize inode hash table.
80 */
81 void
82 nfs_node_init()
83 {
84
85 pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
86 &pool_allocator_nointr, IPL_NONE);
87 pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
88 &pool_allocator_nointr, IPL_NONE);
89 if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
90 NULL, PRI_NONE, IPL_NONE, 0) != 0) {
91 panic("nfs_node_init");
92 }
93 }
94
95 /*
96 * Free resources previously allocated in nfs_node_reinit().
97 */
98 void
99 nfs_node_done()
100 {
101
102 pool_destroy(&nfs_node_pool);
103 pool_destroy(&nfs_vattr_pool);
104 workqueue_destroy(nfs_sillyworkq);
105 }
106
107 #define RBTONFSNODE(node) \
108 (void *)((uintptr_t)(node) - offsetof(struct nfsnode, n_rbnode))
109
110 struct fh_match {
111 nfsfh_t *fhm_fhp;
112 size_t fhm_fhsize;
113 size_t fhm_fhoffset;
114 };
115
116 static int
117 nfs_compare_nodes(const struct rb_node *parent, const struct rb_node *node)
118 {
119 const struct nfsnode * const pnp = RBTONFSNODE(parent);
120 const struct nfsnode * const np = RBTONFSNODE(node);
121
122 if (pnp->n_fhsize != np->n_fhsize)
123 return np->n_fhsize - pnp->n_fhsize;
124
125 return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize);
126 }
127
128 static int
129 nfs_compare_node_fh(const struct rb_node *b, const void *key)
130 {
131 const struct nfsnode * const pnp = RBTONFSNODE(b);
132 const struct fh_match * const fhm = key;
133
134 if (pnp->n_fhsize != fhm->fhm_fhsize)
135 return fhm->fhm_fhsize - pnp->n_fhsize;
136
137 return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
138 }
139
140 static const struct rb_tree_ops nfs_node_rbtree_ops = {
141 .rbto_compare_nodes = nfs_compare_nodes,
142 .rbto_compare_key = nfs_compare_node_fh,
143 };
144
145 void
146 nfs_rbtinit(struct nfsmount *nmp)
147 {
148 rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
149 }
150
151
152 /*
153 * Look up a vnode/nfsnode by file handle.
154 * Callers must check for mount points!!
155 * In all cases, a pointer to a
156 * nfsnode structure is returned.
157 */
158 int
159 nfs_nget1(mntp, fhp, fhsize, npp, lkflags)
160 struct mount *mntp;
161 nfsfh_t *fhp;
162 int fhsize;
163 struct nfsnode **npp;
164 int lkflags;
165 {
166 struct nfsnode *np;
167 struct vnode *vp;
168 struct nfsmount *nmp = VFSTONFS(mntp);
169 int error;
170 struct fh_match fhm;
171 struct rb_node *node;
172
173 fhm.fhm_fhp = fhp;
174 fhm.fhm_fhsize = fhsize;
175
176 loop:
177 rw_enter(&nmp->nm_rbtlock, RW_READER);
178 node = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
179 if (node != NULL) {
180 np = RBTONFSNODE(node);
181 vp = NFSTOV(np);
182 mutex_enter(&vp->v_interlock);
183 rw_exit(&nmp->nm_rbtlock);
184 error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK | lkflags);
185 if (error == EBUSY)
186 return error;
187 if (error)
188 goto loop;
189 *npp = np;
190 return(0);
191 }
192 rw_exit(&nmp->nm_rbtlock);
193
194 error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, &vp);
195 if (error) {
196 *npp = 0;
197 return (error);
198 }
199 np = pool_get(&nfs_node_pool, PR_WAITOK);
200 memset(np, 0, sizeof *np);
201 np->n_vnode = vp;
202
203 /*
204 * Insert the nfsnode in the hash queue for its new file handle
205 */
206
207 if (fhsize > NFS_SMALLFH) {
208 np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
209 } else
210 np->n_fhp = &np->n_fh;
211 memcpy(np->n_fhp, fhp, fhsize);
212 np->n_fhsize = fhsize;
213 np->n_accstamp = -1;
214 np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
215
216 rw_enter(&nmp->nm_rbtlock, RW_WRITER);
217 if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
218 rw_exit(&nmp->nm_rbtlock);
219 if (fhsize > NFS_SMALLFH) {
220 kmem_free(np->n_fhp, fhsize);
221 }
222 pool_put(&nfs_vattr_pool, np->n_vattr);
223 pool_put(&nfs_node_pool, np);
224 ungetnewvnode(vp);
225 goto loop;
226 }
227 vp->v_data = np;
228 genfs_node_init(vp, &nfs_genfsops);
229 /*
230 * Initalize read/write creds to useful values. VOP_OPEN will
231 * overwrite these.
232 */
233 np->n_rcred = curlwp->l_cred;
234 kauth_cred_hold(np->n_rcred);
235 np->n_wcred = curlwp->l_cred;
236 kauth_cred_hold(np->n_wcred);
237 vlockmgr(&vp->v_lock, LK_EXCLUSIVE);
238 NFS_INVALIDATE_ATTRCACHE(np);
239 uvm_vnp_setsize(vp, 0);
240 rb_tree_insert_node(&nmp->nm_rbtree, &np->n_rbnode);
241 rw_exit(&nmp->nm_rbtlock);
242
243 *npp = np;
244 return (0);
245 }
246
247 int
248 nfs_inactive(v)
249 void *v;
250 {
251 struct vop_inactive_args /* {
252 struct vnode *a_vp;
253 bool *a_recycle;
254 } */ *ap = v;
255 struct nfsnode *np;
256 struct sillyrename *sp;
257 struct vnode *vp = ap->a_vp;
258
259 np = VTONFS(vp);
260 if (vp->v_type != VDIR) {
261 sp = np->n_sillyrename;
262 np->n_sillyrename = (struct sillyrename *)0;
263 } else
264 sp = NULL;
265 if (sp != NULL)
266 nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
267 *ap->a_recycle = (np->n_flag & NREMOVED) != 0;
268 np->n_flag &=
269 (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
270
271 if (vp->v_type == VDIR && np->n_dircache)
272 nfs_invaldircache(vp,
273 NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
274
275 VOP_UNLOCK(vp, 0);
276
277 if (sp != NULL) {
278 workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
279 }
280
281 return (0);
282 }
283
284 /*
285 * Reclaim an nfsnode so that it can be used for other purposes.
286 */
287 int
288 nfs_reclaim(v)
289 void *v;
290 {
291 struct vop_reclaim_args /* {
292 struct vnode *a_vp;
293 } */ *ap = v;
294 struct vnode *vp = ap->a_vp;
295 struct nfsnode *np = VTONFS(vp);
296 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
297
298 if (prtactive && vp->v_usecount > 1)
299 vprint("nfs_reclaim: pushing active", vp);
300
301 rw_enter(&nmp->nm_rbtlock, RW_WRITER);
302 rb_tree_remove_node(&nmp->nm_rbtree, &np->n_rbnode);
303 rw_exit(&nmp->nm_rbtlock);
304
305 /*
306 * Free up any directory cookie structures and
307 * large file handle structures that might be associated with
308 * this nfs node.
309 */
310 if (vp->v_type == VDIR && np->n_dircache != NULL) {
311 nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
312 hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
313 }
314 KASSERT(np->n_dirgens == NULL);
315
316 if (np->n_fhsize > NFS_SMALLFH)
317 kmem_free(np->n_fhp, np->n_fhsize);
318
319 pool_put(&nfs_vattr_pool, np->n_vattr);
320 if (np->n_rcred)
321 kauth_cred_free(np->n_rcred);
322
323 if (np->n_wcred)
324 kauth_cred_free(np->n_wcred);
325
326 cache_purge(vp);
327 if (vp->v_type == VREG) {
328 mutex_destroy(&np->n_commitlock);
329 }
330 genfs_node_destroy(vp);
331 pool_put(&nfs_node_pool, np);
332 vp->v_data = NULL;
333 return (0);
334 }
335
336 void
337 nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
338 {
339
340 *eobp = MAX(size, vp->v_size);
341 }
342
343 int
344 nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
345 kauth_cred_t cred)
346 {
347
348 return 0;
349 }
350
351 int
352 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
353 {
354 int i;
355
356 for (i = 0; i < npages; i++) {
357 pmap_page_protect(pgs[i], VM_PROT_READ);
358 }
359 return genfs_gop_write(vp, pgs, npages, flags);
360 }
361
362 /*
363 * Remove a silly file that was rename'd earlier
364 */
365 static void
366 nfs_sillyworker(struct work *work, void *arg)
367 {
368 struct sillyrename *sp;
369 int error;
370
371 sp = (struct sillyrename *)work;
372 error = vn_lock(sp->s_dvp, LK_EXCLUSIVE);
373 if (error || sp->s_dvp->v_data == NULL) {
374 /* XXX should recover */
375 printf("%s: vp=%p error=%d\n", __func__, sp->s_dvp, error);
376 if (error == 0) {
377 vput(sp->s_dvp);
378 } else {
379 vrele(sp->s_dvp);
380 }
381 } else {
382 nfs_removeit(sp);
383 vput(sp->s_dvp);
384 }
385 kauth_cred_free(sp->s_cred);
386 kmem_free(sp, sizeof(*sp));
387 }
Cache object: 5ff6bd40ca9e3bf7509133601d4dee59
|