FreeBSD/Linux Kernel Cross Reference
sys/kern/vfs_init.c
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)vfs_init.c 8.3 (Berkeley) 1/4/94
39 * $FreeBSD: releng/5.0/sys/kern/vfs_init.c 98641 2002-06-22 21:44:11Z mux $
40 */
41
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kernel.h>
46 #include <sys/mount.h>
47 #include <sys/sysctl.h>
48 #include <sys/vnode.h>
49 #include <sys/malloc.h>
50
51
52 MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes");
53
54 /*
55 * The highest defined VFS number.
56 */
57 int maxvfsconf = VFS_GENERIC + 1;
58
59 /*
60 * Single-linked list of configured VFSes.
61 * New entries are added/deleted by vfs_register()/vfs_unregister()
62 */
63 struct vfsconf *vfsconf;
64
65 /*
66 * vfs_init.c
67 *
68 * Allocate and fill in operations vectors.
69 *
70 * An undocumented feature of this approach to defining operations is that
71 * there can be multiple entries in vfs_opv_descs for the same operations
72 * vector. This allows third parties to extend the set of operations
73 * supported by another layer in a binary compatibile way. For example,
74 * assume that NFS needed to be modified to support Ficus. NFS has an entry
75 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
76 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
77 * listing those new operations Ficus adds to NFS, all without modifying the
78 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
79 * that is a(whole)nother story.) This is a feature.
80 */
81
82 /* Table of known vnodeop vectors (list of VFS vnode vectors) */
83 static const struct vnodeopv_desc **vnodeopv_descs;
84 static int vnodeopv_num;
85
86 /* Table of known descs (list of vnode op handlers "vop_access_desc") */
87 static struct vnodeop_desc **vfs_op_descs;
88 /* Reference counts for vfs_op_descs */
89 static int *vfs_op_desc_refs;
90 /* Number of descriptions */
91 static int num_op_descs;
92 /* Number of entries in each description */
93 static int vfs_opv_numops = 64;
94
95 /* Allow this number to be tuned at boot */
96 TUNABLE_INT("vfs.opv_numops", &vfs_opv_numops);
97 SYSCTL_INT(_vfs, OID_AUTO, opv_numops, CTLFLAG_RD, &vfs_opv_numops,
98 0, "Maximum number of operations in vop_t vector");
99
100 static int int_cmp(const void *a, const void *b);
101
102 static int
103 int_cmp(const void *a, const void *b)
104 {
105 return(*(const int *)a - *(const int *)b);
106 }
107
108 /*
109 * Recalculate the operations vector/description (those parts of it that can
110 * be recalculated, that is.)
111 * Always allocate operations vector large enough to hold vfs_opv_numops
112 * entries. The vector is never freed or deallocated once it is initialized,
113 * so that vnodes might safely reference it through their v_op pointer without
114 * vector changing suddenly from under them.
115 */
116 static void
117 vfs_opv_recalc(void)
118 {
119 int i, j, k;
120 int *vfs_op_offsets;
121 vop_t ***opv_desc_vector_p;
122 vop_t **opv_desc_vector;
123 struct vnodeopv_entry_desc *opve_descp;
124 const struct vnodeopv_desc *opv;
125
126 if (vfs_op_descs == NULL)
127 panic("vfs_opv_recalc called with null vfs_op_descs");
128
129 /*
130 * Allocate and initialize temporary array to store
131 * offsets. Sort it to put all uninitialized entries
132 * first and to make holes in existing offset sequence
133 * detectable.
134 */
135 MALLOC(vfs_op_offsets, int *,
136 num_op_descs * sizeof(int), M_TEMP, M_WAITOK);
137 if (vfs_op_offsets == NULL)
138 panic("vfs_opv_recalc: no memory");
139 for (i = 0; i < num_op_descs; i++)
140 vfs_op_offsets[i] = vfs_op_descs[i]->vdesc_offset;
141 qsort(vfs_op_offsets, num_op_descs, sizeof(int), int_cmp);
142
143 /*
144 * Run through and make sure all known descs have an offset.
145 * Use vfs_op_offsets to locate holes in offset sequence and
146 * reuse them.
147 * vop_default_desc is hardwired at offset 1, and offset 0
148 * is a panic sanity check.
149 */
150 j = 1; k = 1;
151 for (i = 0; i < num_op_descs; i++) {
152 if (vfs_op_descs[i]->vdesc_offset != 0)
153 continue;
154 /*
155 * Look at two adjacent entries vfs_op_offsets[j - 1] and
156 * vfs_op_offsets[j] and see if we can fit a new offset
157 * number in between. If not, look at the next pair until
158 * hole is found or the end of the vfs_op_offsets vector is
159 * reached. j has been initialized to 1 above so that
160 * referencing (j-1)-th element is safe and the loop will
161 * never execute if num_op_descs is 1. For each new value s
162 * of i the j loop pick up from where previous iteration has
163 * left off. When the last hole has been consumed or if no
164 * hole has been found, we will start allocating new numbers
165 * starting from the biggest already available offset + 1.
166 */
167 for (; j < num_op_descs; j++) {
168 if (vfs_op_offsets[j - 1] < k && vfs_op_offsets[j] > k)
169 break;
170 k = vfs_op_offsets[j] + 1;
171 }
172 vfs_op_descs[i]->vdesc_offset = k++;
173 }
174 FREE(vfs_op_offsets, M_TEMP);
175
176 /* Panic if new vops will cause vector overflow */
177 if (k > vfs_opv_numops)
178 panic("VFS: Ran out of vop_t vector entries. %d entries required, only %d available.\n", k, vfs_opv_numops);
179
180 /*
181 * Allocate and fill in the vectors
182 */
183 for (i = 0; i < vnodeopv_num; i++) {
184 opv = vnodeopv_descs[i];
185 opv_desc_vector_p = opv->opv_desc_vector_p;
186 if (*opv_desc_vector_p == NULL)
187 MALLOC(*opv_desc_vector_p, vop_t **,
188 vfs_opv_numops * sizeof(vop_t *), M_VNODE,
189 M_WAITOK | M_ZERO);
190
191 /* Fill in, with slot 0 being to return EOPNOTSUPP */
192 opv_desc_vector = *opv_desc_vector_p;
193 opv_desc_vector[0] = (vop_t *)vop_eopnotsupp;
194 for (j = 0; opv->opv_desc_ops[j].opve_op; j++) {
195 opve_descp = &(opv->opv_desc_ops[j]);
196 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
197 opve_descp->opve_impl;
198 }
199
200 /* Replace unfilled routines with their default (slot 1). */
201 opv_desc_vector = *(opv->opv_desc_vector_p);
202 if (opv_desc_vector[1] == NULL)
203 panic("vfs_opv_recalc: vector without a default.");
204 for (j = 0; j < vfs_opv_numops; j++)
205 if (opv_desc_vector[j] == NULL)
206 opv_desc_vector[j] = opv_desc_vector[1];
207 }
208 }
209
210 /* Add a set of vnode operations (a description) to the table above. */
211 void
212 vfs_add_vnodeops(const void *data)
213 {
214 const struct vnodeopv_desc *opv;
215 const struct vnodeopv_desc **newopv;
216 struct vnodeop_desc **newop;
217 int *newref;
218 vop_t **opv_desc_vector;
219 struct vnodeop_desc *desc;
220 int i, j;
221
222 opv = (const struct vnodeopv_desc *)data;
223 MALLOC(newopv, const struct vnodeopv_desc **,
224 (vnodeopv_num + 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
225 if (vnodeopv_descs) {
226 bcopy(vnodeopv_descs, newopv, vnodeopv_num * sizeof(*newopv));
227 FREE(vnodeopv_descs, M_VNODE);
228 }
229 newopv[vnodeopv_num] = opv;
230 vnodeopv_descs = newopv;
231 vnodeopv_num++;
232
233 /* See if we have turned up a new vnode op desc */
234 opv_desc_vector = *(opv->opv_desc_vector_p);
235 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
236 for (j = 0; j < num_op_descs; j++) {
237 if (desc == vfs_op_descs[j]) {
238 /* found it, increase reference count */
239 vfs_op_desc_refs[j]++;
240 break;
241 }
242 }
243 if (j == num_op_descs) {
244 /* not found, new entry */
245 MALLOC(newop, struct vnodeop_desc **,
246 (num_op_descs + 1) * sizeof(*newop),
247 M_VNODE, M_WAITOK);
248 /* new reference count (for unload) */
249 MALLOC(newref, int *,
250 (num_op_descs + 1) * sizeof(*newref),
251 M_VNODE, M_WAITOK);
252 if (vfs_op_descs) {
253 bcopy(vfs_op_descs, newop,
254 num_op_descs * sizeof(*newop));
255 FREE(vfs_op_descs, M_VNODE);
256 }
257 if (vfs_op_desc_refs) {
258 bcopy(vfs_op_desc_refs, newref,
259 num_op_descs * sizeof(*newref));
260 FREE(vfs_op_desc_refs, M_VNODE);
261 }
262 newop[num_op_descs] = desc;
263 newref[num_op_descs] = 1;
264 vfs_op_descs = newop;
265 vfs_op_desc_refs = newref;
266 num_op_descs++;
267 }
268 }
269 vfs_opv_recalc();
270 }
271
272 /* Remove a vnode type from the vnode description table above. */
273 void
274 vfs_rm_vnodeops(const void *data)
275 {
276 const struct vnodeopv_desc *opv;
277 const struct vnodeopv_desc **newopv;
278 struct vnodeop_desc **newop;
279 int *newref;
280 vop_t **opv_desc_vector;
281 struct vnodeop_desc *desc;
282 int i, j, k;
283
284 opv = (const struct vnodeopv_desc *)data;
285 /* Lower ref counts on descs in the table and release if zero */
286 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
287 for (j = 0; j < num_op_descs; j++) {
288 if (desc == vfs_op_descs[j]) {
289 /* found it, decrease reference count */
290 vfs_op_desc_refs[j]--;
291 break;
292 }
293 }
294 for (j = 0; j < num_op_descs; j++) {
295 if (vfs_op_desc_refs[j] > 0)
296 continue;
297 if (vfs_op_desc_refs[j] < 0)
298 panic("vfs_remove_vnodeops: negative refcnt");
299 /* Entry is going away - replace it with defaultop */
300 for (k = 0; k < vnodeopv_num; k++) {
301 opv_desc_vector =
302 *(vnodeopv_descs[k]->opv_desc_vector_p);
303 if (opv_desc_vector != NULL)
304 opv_desc_vector[desc->vdesc_offset] =
305 opv_desc_vector[1];
306 }
307 MALLOC(newop, struct vnodeop_desc **,
308 (num_op_descs - 1) * sizeof(*newop),
309 M_VNODE, M_WAITOK);
310 /* new reference count (for unload) */
311 MALLOC(newref, int *,
312 (num_op_descs - 1) * sizeof(*newref),
313 M_VNODE, M_WAITOK);
314 for (k = j; k < (num_op_descs - 1); k++) {
315 vfs_op_descs[k] = vfs_op_descs[k + 1];
316 vfs_op_desc_refs[k] = vfs_op_desc_refs[k + 1];
317 }
318 bcopy(vfs_op_descs, newop,
319 (num_op_descs - 1) * sizeof(*newop));
320 bcopy(vfs_op_desc_refs, newref,
321 (num_op_descs - 1) * sizeof(*newref));
322 FREE(vfs_op_descs, M_VNODE);
323 FREE(vfs_op_desc_refs, M_VNODE);
324 vfs_op_descs = newop;
325 vfs_op_desc_refs = newref;
326 num_op_descs--;
327 }
328 }
329
330 for (i = 0; i < vnodeopv_num; i++) {
331 if (vnodeopv_descs[i] == opv) {
332 for (j = i; j < (vnodeopv_num - 1); j++)
333 vnodeopv_descs[j] = vnodeopv_descs[j + 1];
334 break;
335 }
336 }
337 if (i == vnodeopv_num)
338 panic("vfs_remove_vnodeops: opv not found");
339 opv_desc_vector = *(opv->opv_desc_vector_p);
340 if (opv_desc_vector != NULL)
341 FREE(opv_desc_vector, M_VNODE);
342 MALLOC(newopv, const struct vnodeopv_desc **,
343 (vnodeopv_num - 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
344 bcopy(vnodeopv_descs, newopv, (vnodeopv_num - 1) * sizeof(*newopv));
345 FREE(vnodeopv_descs, M_VNODE);
346 vnodeopv_descs = newopv;
347 vnodeopv_num--;
348
349 vfs_opv_recalc();
350 }
351
352 /*
353 * Routines having to do with the management of the vnode table.
354 */
355 struct vattr va_null;
356
357 /*
358 * Initialize the vnode structures and initialize each filesystem type.
359 */
360 /* ARGSUSED*/
361 static void
362 vfsinit(void *dummy)
363 {
364
365 vattr_null(&va_null);
366 }
367 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vfsinit, NULL)
368
369 /* Register a new filesystem type in the global table */
370 int
371 vfs_register(struct vfsconf *vfc)
372 {
373 struct sysctl_oid *oidp;
374 struct vfsconf *vfsp;
375
376 vfsp = NULL;
377 if (vfsconf)
378 for (vfsp = vfsconf; vfsp->vfc_next; vfsp = vfsp->vfc_next)
379 if (strcmp(vfc->vfc_name, vfsp->vfc_name) == 0)
380 return EEXIST;
381
382 vfc->vfc_typenum = maxvfsconf++;
383 if (vfsp)
384 vfsp->vfc_next = vfc;
385 else
386 vfsconf = vfc;
387 vfc->vfc_next = NULL;
388
389 /*
390 * If this filesystem has a sysctl node under vfs
391 * (i.e. vfs.xxfs), then change the oid number of that node to
392 * match the filesystem's type number. This allows user code
393 * which uses the type number to read sysctl variables defined
394 * by the filesystem to continue working. Since the oids are
395 * in a sorted list, we need to make sure the order is
396 * preserved by re-registering the oid after modifying its
397 * number.
398 */
399 SLIST_FOREACH(oidp, &sysctl__vfs_children, oid_link)
400 if (strcmp(oidp->oid_name, vfc->vfc_name) == 0) {
401 sysctl_unregister_oid(oidp);
402 oidp->oid_number = vfc->vfc_typenum;
403 sysctl_register_oid(oidp);
404 }
405
406 /*
407 * Call init function for this VFS...
408 */
409 (*(vfc->vfc_vfsops->vfs_init))(vfc);
410
411 return 0;
412 }
413
414
415 /* Remove registration of a filesystem type */
416 int
417 vfs_unregister(struct vfsconf *vfc)
418 {
419 struct vfsconf *vfsp, *prev_vfsp;
420 int error, i, maxtypenum;
421
422 i = vfc->vfc_typenum;
423
424 prev_vfsp = NULL;
425 for (vfsp = vfsconf; vfsp;
426 prev_vfsp = vfsp, vfsp = vfsp->vfc_next) {
427 if (!strcmp(vfc->vfc_name, vfsp->vfc_name))
428 break;
429 }
430 if (vfsp == NULL)
431 return EINVAL;
432 if (vfsp->vfc_refcount)
433 return EBUSY;
434 if (vfc->vfc_vfsops->vfs_uninit != NULL) {
435 error = (*vfc->vfc_vfsops->vfs_uninit)(vfsp);
436 if (error)
437 return (error);
438 }
439 if (prev_vfsp)
440 prev_vfsp->vfc_next = vfsp->vfc_next;
441 else
442 vfsconf = vfsp->vfc_next;
443 maxtypenum = VFS_GENERIC;
444 for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
445 if (maxtypenum < vfsp->vfc_typenum)
446 maxtypenum = vfsp->vfc_typenum;
447 maxvfsconf = maxtypenum + 1;
448 return 0;
449 }
450
451 /*
452 * Standard kernel module handling code for filesystem modules.
453 * Referenced from VFS_SET().
454 */
455 int
456 vfs_modevent(module_t mod, int type, void *data)
457 {
458 struct vfsconf *vfc;
459 int error = 0;
460
461 vfc = (struct vfsconf *)data;
462
463 switch (type) {
464 case MOD_LOAD:
465 if (vfc)
466 error = vfs_register(vfc);
467 break;
468
469 case MOD_UNLOAD:
470 if (vfc)
471 error = vfs_unregister(vfc);
472 break;
473 default: /* including MOD_SHUTDOWN */
474 break;
475 }
476 return (error);
477 }
Cache object: 79a94658103e00e7c60f479ef43a83f3
|