1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software donated to Berkeley by
8 * Jan-Simon Pendry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)null_subr.c 8.7 (Berkeley) 5/14/95
35 *
36 * $FreeBSD$
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/rwlock.h>
44 #include <sys/malloc.h>
45 #include <sys/mount.h>
46 #include <sys/proc.h>
47 #include <sys/vnode.h>
48
49 #include <fs/nullfs/null.h>
50
51 /*
52 * Null layer cache:
53 * Each cache entry holds a reference to the lower vnode
54 * along with a pointer to the alias vnode. When an
55 * entry is added the lower vnode is VREF'd. When the
56 * alias is removed the lower vnode is vrele'd.
57 */
58
59 #define NULL_NHASH(vp) (&null_node_hashtbl[vfs_hash_index(vp) & null_hash_mask])
60
61 static LIST_HEAD(null_node_hashhead, null_node) *null_node_hashtbl;
62 static struct rwlock null_hash_lock;
63 static u_long null_hash_mask;
64
65 static MALLOC_DEFINE(M_NULLFSHASH, "nullfs_hash", "NULLFS hash table");
66 MALLOC_DEFINE(M_NULLFSNODE, "nullfs_node", "NULLFS vnode private part");
67
68 static struct vnode * null_hashins(struct mount *, struct null_node *);
69
70 /*
71 * Initialise cache headers
72 */
73 int
74 nullfs_init(vfsp)
75 struct vfsconf *vfsp;
76 {
77
78 null_node_hashtbl = hashinit(desiredvnodes, M_NULLFSHASH,
79 &null_hash_mask);
80 rw_init(&null_hash_lock, "nullhs");
81 return (0);
82 }
83
84 int
85 nullfs_uninit(vfsp)
86 struct vfsconf *vfsp;
87 {
88
89 rw_destroy(&null_hash_lock);
90 hashdestroy(null_node_hashtbl, M_NULLFSHASH, null_hash_mask);
91 return (0);
92 }
93
94 /*
95 * Return a VREF'ed alias for lower vnode if already exists, else 0.
96 * Lower vnode should be locked on entry and will be left locked on exit.
97 */
98 struct vnode *
99 null_hashget(mp, lowervp)
100 struct mount *mp;
101 struct vnode *lowervp;
102 {
103 struct null_node_hashhead *hd;
104 struct null_node *a;
105 struct vnode *vp;
106
107 ASSERT_VOP_LOCKED(lowervp, "null_hashget");
108
109 /*
110 * Find hash base, and then search the (two-way) linked
111 * list looking for a null_node structure which is referencing
112 * the lower vnode. If found, the increment the null_node
113 * reference count (but NOT the lower vnode's VREF counter).
114 */
115 hd = NULL_NHASH(lowervp);
116 if (LIST_EMPTY(hd))
117 return (NULLVP);
118 rw_rlock(&null_hash_lock);
119 LIST_FOREACH(a, hd, null_hash) {
120 if (a->null_lowervp == lowervp && NULLTOV(a)->v_mount == mp) {
121 /*
122 * Since we have the lower node locked the nullfs
123 * node can not be in the process of recycling. If
124 * it had been recycled before we grabed the lower
125 * lock it would not have been found on the hash.
126 */
127 vp = NULLTOV(a);
128 vref(vp);
129 rw_runlock(&null_hash_lock);
130 return (vp);
131 }
132 }
133 rw_runlock(&null_hash_lock);
134 return (NULLVP);
135 }
136
137 /*
138 * Act like null_hashget, but add passed null_node to hash if no existing
139 * node found.
140 */
141 static struct vnode *
142 null_hashins(mp, xp)
143 struct mount *mp;
144 struct null_node *xp;
145 {
146 struct null_node_hashhead *hd;
147 struct null_node *oxp;
148 struct vnode *ovp;
149
150 hd = NULL_NHASH(xp->null_lowervp);
151 rw_wlock(&null_hash_lock);
152 LIST_FOREACH(oxp, hd, null_hash) {
153 if (oxp->null_lowervp == xp->null_lowervp &&
154 NULLTOV(oxp)->v_mount == mp) {
155 /*
156 * See null_hashget for a description of this
157 * operation.
158 */
159 ovp = NULLTOV(oxp);
160 vref(ovp);
161 rw_wunlock(&null_hash_lock);
162 return (ovp);
163 }
164 }
165 LIST_INSERT_HEAD(hd, xp, null_hash);
166 rw_wunlock(&null_hash_lock);
167 return (NULLVP);
168 }
169
170 static void
171 null_destroy_proto(struct vnode *vp, void *xp)
172 {
173
174 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
175 VI_LOCK(vp);
176 vp->v_data = NULL;
177 vp->v_vnlock = &vp->v_lock;
178 vp->v_op = &dead_vnodeops;
179 VI_UNLOCK(vp);
180 vgone(vp);
181 vput(vp);
182 free(xp, M_NULLFSNODE);
183 }
184
185 static void
186 null_insmntque_dtr(struct vnode *vp, void *xp)
187 {
188
189 vput(((struct null_node *)xp)->null_lowervp);
190 null_destroy_proto(vp, xp);
191 }
192
193 /*
194 * Make a new or get existing nullfs node.
195 * Vp is the alias vnode, lowervp is the lower vnode.
196 *
197 * The lowervp assumed to be locked and having "spare" reference. This routine
198 * vrele lowervp if nullfs node was taken from hash. Otherwise it "transfers"
199 * the caller's "spare" reference to created nullfs vnode.
200 */
201 int
202 null_nodeget(mp, lowervp, vpp)
203 struct mount *mp;
204 struct vnode *lowervp;
205 struct vnode **vpp;
206 {
207 struct null_node *xp;
208 struct vnode *vp;
209 int error;
210
211 ASSERT_VOP_LOCKED(lowervp, "lowervp");
212 VNPASS(lowervp->v_usecount > 0, lowervp);
213
214 /* Lookup the hash firstly. */
215 *vpp = null_hashget(mp, lowervp);
216 if (*vpp != NULL) {
217 vrele(lowervp);
218 return (0);
219 }
220
221 /*
222 * The insmntque1() call below requires the exclusive lock on
223 * the nullfs vnode. Upgrade the lock now if hash failed to
224 * provide ready to use vnode.
225 */
226 if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) {
227 vn_lock(lowervp, LK_UPGRADE | LK_RETRY);
228 if (VN_IS_DOOMED(lowervp)) {
229 vput(lowervp);
230 return (ENOENT);
231 }
232 }
233
234 /*
235 * We do not serialize vnode creation, instead we will check for
236 * duplicates later, when adding new vnode to hash.
237 * Note that duplicate can only appear in hash if the lowervp is
238 * locked LK_SHARED.
239 */
240 xp = malloc(sizeof(struct null_node), M_NULLFSNODE, M_WAITOK);
241
242 error = getnewvnode("nullfs", mp, &null_vnodeops, &vp);
243 if (error) {
244 vput(lowervp);
245 free(xp, M_NULLFSNODE);
246 return (error);
247 }
248
249 xp->null_vnode = vp;
250 xp->null_lowervp = lowervp;
251 xp->null_flags = 0;
252 vp->v_type = lowervp->v_type;
253 vp->v_data = xp;
254 vp->v_vnlock = lowervp->v_vnlock;
255 error = insmntque1(vp, mp, null_insmntque_dtr, xp);
256 if (error != 0)
257 return (error);
258 if (lowervp == MOUNTTONULLMOUNT(mp)->nullm_lowerrootvp)
259 vp->v_vflag |= VV_ROOT;
260
261 /*
262 * We might miss the case where lower vnode sets VIRF_PGREAD
263 * some time after construction, which is typical case.
264 * null_open rechecks.
265 */
266 if ((vn_irflag_read(lowervp) & VIRF_PGREAD) != 0) {
267 MPASS(lowervp->v_object != NULL);
268 if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) {
269 if (vp->v_object == NULL)
270 vp->v_object = lowervp->v_object;
271 else
272 MPASS(vp->v_object == lowervp->v_object);
273 vn_irflag_set_cond(vp, VIRF_PGREAD);
274 } else {
275 MPASS(vp->v_object != NULL);
276 }
277 }
278
279 /*
280 * Atomically insert our new node into the hash or vget existing
281 * if someone else has beaten us to it.
282 */
283 *vpp = null_hashins(mp, xp);
284 if (*vpp != NULL) {
285 vrele(lowervp);
286 vp->v_object = NULL; /* in case VIRF_PGREAD set it */
287 null_destroy_proto(vp, xp);
288 return (0);
289 }
290 *vpp = vp;
291
292 return (0);
293 }
294
295 /*
296 * Remove node from hash.
297 */
298 void
299 null_hashrem(xp)
300 struct null_node *xp;
301 {
302
303 rw_wlock(&null_hash_lock);
304 LIST_REMOVE(xp, null_hash);
305 rw_wunlock(&null_hash_lock);
306 }
307
308 #ifdef DIAGNOSTIC
309
310 struct vnode *
311 null_checkvp(vp, fil, lno)
312 struct vnode *vp;
313 char *fil;
314 int lno;
315 {
316 struct null_node *a = VTONULL(vp);
317
318 #ifdef notyet
319 /*
320 * Can't do this check because vop_reclaim runs
321 * with a funny vop vector.
322 */
323 if (vp->v_op != null_vnodeop_p) {
324 printf ("null_checkvp: on non-null-node\n");
325 panic("null_checkvp");
326 }
327 #endif
328 if (a->null_lowervp == NULLVP) {
329 /* Should never happen */
330 panic("null_checkvp %p", vp);
331 }
332 VI_LOCK_FLAGS(a->null_lowervp, MTX_DUPOK);
333 if (a->null_lowervp->v_usecount < 1)
334 panic ("null with unref'ed lowervp, vp %p lvp %p",
335 vp, a->null_lowervp);
336 VI_UNLOCK(a->null_lowervp);
337 #ifdef notyet
338 printf("null %x/%d -> %x/%d [%s, %d]\n",
339 NULLTOV(a), vrefcnt(NULLTOV(a)),
340 a->null_lowervp, vrefcnt(a->null_lowervp),
341 fil, lno);
342 #endif
343 return (a->null_lowervp);
344 }
345 #endif
Cache object: 4db3a2e5a9a44d7dda12bc985c294f2e
|