1 /*-
2 * Copyright (c) 2000,2004
3 * Poul-Henning Kamp. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Neither the name of the University nor the names of its contributors
11 * may be used to endorse or promote products derived from this software
12 * without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
27 *
28 * $FreeBSD$
29 */
30
31 #include "opt_devfs.h"
32 #include "opt_mac.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/conf.h>
37 #include <sys/dirent.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/lock.h>
41 #include <sys/mac.h>
42 #include <sys/malloc.h>
43 #include <sys/proc.h>
44 #include <sys/sx.h>
45 #include <sys/sysctl.h>
46 #include <sys/vnode.h>
47
48 #include <sys/kdb.h>
49
50 #include <fs/devfs/devfs.h>
51 #include <fs/devfs/devfs_int.h>
52
53 /*
54 * The one true (but secret) list of active devices in the system.
55 * Locked by dev_lock()/devmtx
56 */
57 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
58
59 struct unrhdr *devfs_inos;
60
61
62 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
63 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
64 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
65
66 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem");
67
68 static unsigned devfs_generation;
69 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
70 &devfs_generation, 0, "DEVFS generation number");
71
72 unsigned devfs_rule_depth = 1;
73 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
74 &devfs_rule_depth, 0, "Max depth of ruleset include");
75
76 /*
77 * Helper sysctl for devname(3). We're given a struct cdev * and return
78 * the name, if any, registered by the device driver.
79 */
80 static int
81 sysctl_devname(SYSCTL_HANDLER_ARGS)
82 {
83 int error;
84 dev_t ud;
85 struct cdev_priv *cdp;
86
87 error = SYSCTL_IN(req, &ud, sizeof (ud));
88 if (error)
89 return (error);
90 if (ud == NODEV)
91 return(EINVAL);
92 /*
93 ud ^ devfs_random();
94 */
95 dev_lock();
96 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
97 if (cdp->cdp_inode == ud)
98 break;
99 dev_unlock();
100 if (cdp == NULL)
101 return(ENOENT);
102 return(SYSCTL_OUT(req, cdp->cdp_c.si_name, strlen(cdp->cdp_c.si_name) + 1));
103 return (error);
104 }
105
106 SYSCTL_PROC(_kern, OID_AUTO, devname, CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY,
107 NULL, 0, sysctl_devname, "", "devname(3) handler");
108
109 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
110 0, sizeof(struct cdev), "sizeof(struct cdev)");
111
112 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
113 0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
114
115 struct cdev *
116 devfs_alloc(void)
117 {
118 struct cdev_priv *cdp;
119 struct cdev *cdev;
120
121 cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO | M_WAITOK);
122
123 cdp->cdp_dirents = &cdp->cdp_dirent0;
124 cdp->cdp_dirent0 = NULL;
125 cdp->cdp_maxdirent = 0;
126
127 cdev = &cdp->cdp_c;
128 cdev->si_priv = cdp;
129
130 cdev->si_name = cdev->__si_namebuf;
131 LIST_INIT(&cdev->si_children);
132 return (cdev);
133 }
134
135 void
136 devfs_free(struct cdev *cdev)
137 {
138 struct cdev_priv *cdp;
139
140 cdp = cdev->si_priv;
141 if (cdev->si_cred != NULL)
142 crfree(cdev->si_cred);
143 if (cdp->cdp_inode > 0)
144 free_unr(devfs_inos, cdp->cdp_inode);
145 if (cdp->cdp_maxdirent > 0)
146 free(cdp->cdp_dirents, M_DEVFS2);
147 free(cdp, M_CDEVP);
148 }
149
150 struct devfs_dirent *
151 devfs_find(struct devfs_dirent *dd, const char *name, int namelen)
152 {
153 struct devfs_dirent *de;
154
155 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
156 if (namelen != de->de_dirent->d_namlen)
157 continue;
158 if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
159 continue;
160 break;
161 }
162 return (de);
163 }
164
165 struct devfs_dirent *
166 devfs_newdirent(char *name, int namelen)
167 {
168 int i;
169 struct devfs_dirent *de;
170 struct dirent d;
171
172 d.d_namlen = namelen;
173 i = sizeof (*de) + GENERIC_DIRSIZ(&d);
174 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
175 de->de_dirent = (struct dirent *)(de + 1);
176 de->de_dirent->d_namlen = namelen;
177 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
178 bcopy(name, de->de_dirent->d_name, namelen);
179 de->de_dirent->d_name[namelen] = '\0';
180 vfs_timestamp(&de->de_ctime);
181 de->de_mtime = de->de_atime = de->de_ctime;
182 de->de_links = 1;
183 de->de_holdcnt = 1;
184 #ifdef MAC
185 mac_init_devfsdirent(de);
186 #endif
187 return (de);
188 }
189
190 struct devfs_dirent *
191 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode)
192 {
193 struct devfs_dirent *dd;
194 struct devfs_dirent *de;
195
196 /* Create the new directory */
197 dd = devfs_newdirent(name, namelen);
198 TAILQ_INIT(&dd->de_dlist);
199 dd->de_dirent->d_type = DT_DIR;
200 dd->de_mode = 0555;
201 dd->de_links = 2;
202 dd->de_dir = dd;
203 if (inode != 0)
204 dd->de_inode = inode;
205 else
206 dd->de_inode = alloc_unr(devfs_inos);
207
208 /* Create the "." entry in the new directory */
209 de = devfs_newdirent(".", 1);
210 de->de_dirent->d_type = DT_DIR;
211 de->de_flags |= DE_DOT;
212 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
213 de->de_dir = dd;
214
215 /* Create the ".." entry in the new directory */
216 de = devfs_newdirent("..", 2);
217 de->de_dirent->d_type = DT_DIR;
218 de->de_flags |= DE_DOTDOT;
219 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
220 if (dotdot == NULL) {
221 de->de_dir = dd;
222 } else {
223 de->de_dir = dotdot;
224 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
225 dotdot->de_links++;
226 }
227
228 #ifdef MAC
229 mac_create_devfs_directory(dmp->dm_mount, name, namelen, dd);
230 #endif
231 return (dd);
232 }
233
234 void
235 devfs_dirent_free(struct devfs_dirent *de)
236 {
237 free(de, M_DEVFS3);
238 }
239
240 /*
241 * The caller needs to hold the dm for the duration of the call since
242 * dm->dm_lock may be temporary dropped.
243 */
244 void
245 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked)
246 {
247 struct vnode *vp;
248 struct thread *td;
249
250 KASSERT((de->de_flags & DE_DOOMED) == 0,
251 ("devfs_delete doomed dirent"));
252 td = curthread;
253 de->de_flags |= DE_DOOMED;
254 mtx_lock(&devfs_de_interlock);
255 vp = de->de_vnode;
256 if (vp != NULL) {
257 VI_LOCK(vp);
258 mtx_unlock(&devfs_de_interlock);
259 vholdl(vp);
260 sx_unlock(&dm->dm_lock);
261 if (!vp_locked)
262 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
263 else
264 VI_UNLOCK(vp);
265 vgone(vp);
266 if (!vp_locked)
267 VOP_UNLOCK(vp, 0, td);
268 vdrop(vp);
269 sx_xlock(&dm->dm_lock);
270 } else
271 mtx_unlock(&devfs_de_interlock);
272 if (de->de_symlink) {
273 free(de->de_symlink, M_DEVFS);
274 de->de_symlink = NULL;
275 }
276 #ifdef MAC
277 mac_destroy_devfsdirent(de);
278 #endif
279 if (de->de_inode > DEVFS_ROOTINO) {
280 free_unr(devfs_inos, de->de_inode);
281 de->de_inode = 0;
282 }
283 if (DEVFS_DE_DROP(de))
284 devfs_dirent_free(de);
285 }
286
287 /*
288 * Called on unmount.
289 * Recursively removes the entire tree.
290 * The caller needs to hold the dm for the duration of the call.
291 */
292
293 static void
294 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
295 {
296 struct devfs_dirent *de;
297
298 sx_assert(&dm->dm_lock, SX_XLOCKED);
299 for (;;) {
300 de = TAILQ_FIRST(&dd->de_dlist);
301 if (de == NULL)
302 break;
303 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
304 if (de->de_flags & (DE_DOT|DE_DOTDOT))
305 devfs_delete(dm, de, 0);
306 else if (de->de_dirent->d_type == DT_DIR)
307 devfs_purge(dm, de);
308 else
309 devfs_delete(dm, de, 0);
310 }
311 devfs_delete(dm, dd, 0);
312 }
313
314 /*
315 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
316 * by the mount points dm_idx.
317 * This function extends the array when necessary, taking into account that
318 * the default array is 1 element and not malloc'ed.
319 */
320 static void
321 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
322 {
323 struct devfs_dirent **dep;
324 int siz;
325
326 siz = (dm->dm_idx + 1) * sizeof *dep;
327 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
328 dev_lock();
329 if (dm->dm_idx <= cdp->cdp_maxdirent) {
330 /* We got raced */
331 dev_unlock();
332 free(dep, M_DEVFS2);
333 return;
334 }
335 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
336 if (cdp->cdp_maxdirent > 0)
337 free(cdp->cdp_dirents, M_DEVFS2);
338 cdp->cdp_dirents = dep;
339 /*
340 * XXX: if malloc told us how much we actually got this could
341 * XXX: be optimized.
342 */
343 cdp->cdp_maxdirent = dm->dm_idx;
344 dev_unlock();
345 }
346
347 /*
348 * The caller needs to hold the dm for the duration of the call.
349 */
350 static int
351 devfs_populate_loop(struct devfs_mount *dm, int cleanup)
352 {
353 struct cdev_priv *cdp;
354 struct devfs_dirent *de;
355 struct devfs_dirent *dd;
356 struct cdev *pdev;
357 int j;
358 char *q, *s;
359
360 sx_assert(&dm->dm_lock, SX_XLOCKED);
361 dev_lock();
362 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
363
364 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
365
366 /*
367 * If we are unmounting, or the device has been destroyed,
368 * clean up our dirent.
369 */
370 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
371 dm->dm_idx <= cdp->cdp_maxdirent &&
372 cdp->cdp_dirents[dm->dm_idx] != NULL) {
373 de = cdp->cdp_dirents[dm->dm_idx];
374 cdp->cdp_dirents[dm->dm_idx] = NULL;
375 KASSERT(cdp == de->de_cdp,
376 ("%s %d %s %p %p", __func__, __LINE__,
377 cdp->cdp_c.si_name, cdp, de->de_cdp));
378 KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
379 dev_unlock();
380
381 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
382 de->de_cdp = NULL;
383 de->de_inode = 0;
384 devfs_delete(dm, de, 0);
385 dev_lock();
386 cdp->cdp_inuse--;
387 dev_unlock();
388 return (1);
389 }
390 /*
391 * GC any lingering devices
392 */
393 if (!(cdp->cdp_flags & CDP_ACTIVE)) {
394 if (cdp->cdp_inuse > 0)
395 continue;
396 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
397 dev_unlock();
398 dev_rel(&cdp->cdp_c);
399 return (1);
400 }
401 /*
402 * Don't create any new dirents if we are unmounting
403 */
404 if (cleanup)
405 continue;
406 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
407
408 if (dm->dm_idx <= cdp->cdp_maxdirent &&
409 cdp->cdp_dirents[dm->dm_idx] != NULL) {
410 de = cdp->cdp_dirents[dm->dm_idx];
411 KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
412 continue;
413 }
414
415
416 cdp->cdp_inuse++;
417 dev_unlock();
418
419 if (dm->dm_idx > cdp->cdp_maxdirent)
420 devfs_metoo(cdp, dm);
421
422 dd = dm->dm_rootdir;
423 s = cdp->cdp_c.si_name;
424 for (;;) {
425 for (q = s; *q != '/' && *q != '\0'; q++)
426 continue;
427 if (*q != '/')
428 break;
429 de = devfs_find(dd, s, q - s);
430 if (de == NULL)
431 de = devfs_vmkdir(dm, s, q - s, dd, 0);
432 s = q + 1;
433 dd = de;
434 }
435
436 de = devfs_newdirent(s, q - s);
437 if (cdp->cdp_c.si_flags & SI_ALIAS) {
438 de->de_uid = 0;
439 de->de_gid = 0;
440 de->de_mode = 0755;
441 de->de_dirent->d_type = DT_LNK;
442 pdev = cdp->cdp_c.si_parent;
443 j = strlen(pdev->si_name) + 1;
444 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
445 bcopy(pdev->si_name, de->de_symlink, j);
446 } else {
447 de->de_uid = cdp->cdp_c.si_uid;
448 de->de_gid = cdp->cdp_c.si_gid;
449 de->de_mode = cdp->cdp_c.si_mode;
450 de->de_dirent->d_type = DT_CHR;
451 }
452 de->de_inode = cdp->cdp_inode;
453 de->de_cdp = cdp;
454 #ifdef MAC
455 mac_create_devfs_device(cdp->cdp_c.si_cred, dm->dm_mount,
456 &cdp->cdp_c, de);
457 #endif
458 de->de_dir = dd;
459 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
460 devfs_rules_apply(dm, de);
461 dev_lock();
462 /* XXX: could check that cdp is still active here */
463 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
464 ("%s %d\n", __func__, __LINE__));
465 cdp->cdp_dirents[dm->dm_idx] = de;
466 KASSERT(de->de_cdp != (void *)0xdeadc0de,
467 ("%s %d\n", __func__, __LINE__));
468 dev_unlock();
469 return (1);
470 }
471 dev_unlock();
472 return (0);
473 }
474
475 /*
476 * The caller needs to hold the dm for the duration of the call.
477 */
478 void
479 devfs_populate(struct devfs_mount *dm)
480 {
481
482 sx_assert(&dm->dm_lock, SX_XLOCKED);
483 if (dm->dm_generation == devfs_generation)
484 return;
485 while (devfs_populate_loop(dm, 0))
486 continue;
487 dm->dm_generation = devfs_generation;
488 }
489
490 /*
491 * The caller needs to hold the dm for the duration of the call.
492 */
493 void
494 devfs_cleanup(struct devfs_mount *dm)
495 {
496
497 sx_assert(&dm->dm_lock, SX_XLOCKED);
498 while (devfs_populate_loop(dm, 1))
499 continue;
500 devfs_purge(dm, dm->dm_rootdir);
501 }
502
503 /*
504 * devfs_create() and devfs_destroy() are called from kern_conf.c and
505 * in both cases the devlock() mutex is held, so no further locking
506 * is necesary and no sleeping allowed.
507 */
508
509 void
510 devfs_create(struct cdev *dev)
511 {
512 struct cdev_priv *cdp;
513
514 mtx_assert(&devmtx, MA_OWNED);
515 cdp = dev->si_priv;
516 cdp->cdp_flags |= CDP_ACTIVE;
517 cdp->cdp_inode = alloc_unrl(devfs_inos);
518 dev_refl(dev);
519 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
520 devfs_generation++;
521 }
522
523 void
524 devfs_destroy(struct cdev *dev)
525 {
526 struct cdev_priv *cdp;
527
528 mtx_assert(&devmtx, MA_OWNED);
529 cdp = dev->si_priv;
530 cdp->cdp_flags &= ~CDP_ACTIVE;
531 devfs_generation++;
532 }
533
534 static void
535 devfs_devs_init(void *junk __unused)
536 {
537
538 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
539 }
540
541 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
Cache object: 2d200dd279e7b3d19325919c0ea86cb2
|