1 /*-
2 * Copyright (c) 2000,2004
3 * Poul-Henning Kamp. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Neither the name of the University nor the names of its contributors
11 * may be used to endorse or promote products derived from this software
12 * without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
27 *
28 * $FreeBSD$
29 */
30
31 #include "opt_mac.h"
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/conf.h>
36 #include <sys/dirent.h>
37 #include <sys/kernel.h>
38 #include <sys/limits.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/proc.h>
42 #include <sys/sx.h>
43 #include <sys/sysctl.h>
44 #include <sys/vnode.h>
45
46 #include <sys/kdb.h>
47
48 #include <fs/devfs/devfs.h>
49 #include <fs/devfs/devfs_int.h>
50
51 #include <security/mac/mac_framework.h>
52
53 /*
54 * The one true (but secret) list of active devices in the system.
55 * Locked by dev_lock()/devmtx
56 */
57 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
58
59 struct unrhdr *devfs_inos;
60
61
62 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
63 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
64 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
65
66 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem");
67
68 static unsigned devfs_generation;
69 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
70 &devfs_generation, 0, "DEVFS generation number");
71
72 unsigned devfs_rule_depth = 1;
73 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
74 &devfs_rule_depth, 0, "Max depth of ruleset include");
75
76 /*
77 * Helper sysctl for devname(3). We're given a struct cdev * and return
78 * the name, if any, registered by the device driver.
79 */
80 static int
81 sysctl_devname(SYSCTL_HANDLER_ARGS)
82 {
83 int error;
84 dev_t ud;
85 struct cdev_priv *cdp;
86
87 error = SYSCTL_IN(req, &ud, sizeof (ud));
88 if (error)
89 return (error);
90 if (ud == NODEV)
91 return(EINVAL);
92 /*
93 ud ^ devfs_random();
94 */
95 dev_lock();
96 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
97 if (cdp->cdp_inode == ud)
98 break;
99 dev_unlock();
100 if (cdp == NULL)
101 return(ENOENT);
102 return(SYSCTL_OUT(req, cdp->cdp_c.si_name, strlen(cdp->cdp_c.si_name) + 1));
103 return (error);
104 }
105
106 SYSCTL_PROC(_kern, OID_AUTO, devname,
107 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
108 NULL, 0, sysctl_devname, "", "devname(3) handler");
109
110 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
111 0, sizeof(struct cdev), "sizeof(struct cdev)");
112
113 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
114 0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
115
116 struct cdev *
117 devfs_alloc(void)
118 {
119 struct cdev_priv *cdp;
120 struct cdev *cdev;
121
122 cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO | M_WAITOK);
123
124 cdp->cdp_dirents = &cdp->cdp_dirent0;
125 cdp->cdp_dirent0 = NULL;
126 cdp->cdp_maxdirent = 0;
127
128 cdev = &cdp->cdp_c;
129 cdev->si_priv = cdp;
130
131 cdev->si_name = cdev->__si_namebuf;
132 LIST_INIT(&cdev->si_children);
133 return (cdev);
134 }
135
136 void
137 devfs_free(struct cdev *cdev)
138 {
139 struct cdev_priv *cdp;
140
141 cdp = cdev->si_priv;
142 if (cdev->si_cred != NULL)
143 crfree(cdev->si_cred);
144 if (cdp->cdp_inode > 0)
145 free_unr(devfs_inos, cdp->cdp_inode);
146 if (cdp->cdp_maxdirent > 0)
147 free(cdp->cdp_dirents, M_DEVFS2);
148 free(cdp, M_CDEVP);
149 }
150
151 struct devfs_dirent *
152 devfs_find(struct devfs_dirent *dd, const char *name, int namelen)
153 {
154 struct devfs_dirent *de;
155
156 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
157 if (namelen != de->de_dirent->d_namlen)
158 continue;
159 if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
160 continue;
161 break;
162 }
163 return (de);
164 }
165
166 struct devfs_dirent *
167 devfs_newdirent(char *name, int namelen)
168 {
169 int i;
170 struct devfs_dirent *de;
171 struct dirent d;
172
173 d.d_namlen = namelen;
174 i = sizeof (*de) + GENERIC_DIRSIZ(&d);
175 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
176 de->de_dirent = (struct dirent *)(de + 1);
177 de->de_dirent->d_namlen = namelen;
178 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
179 bcopy(name, de->de_dirent->d_name, namelen);
180 de->de_dirent->d_name[namelen] = '\0';
181 vfs_timestamp(&de->de_ctime);
182 de->de_mtime = de->de_atime = de->de_ctime;
183 de->de_links = 1;
184 de->de_holdcnt = 1;
185 #ifdef MAC
186 mac_init_devfs(de);
187 #endif
188 return (de);
189 }
190
191 struct devfs_dirent *
192 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode)
193 {
194 struct devfs_dirent *dd;
195 struct devfs_dirent *de;
196
197 /* Create the new directory */
198 dd = devfs_newdirent(name, namelen);
199 TAILQ_INIT(&dd->de_dlist);
200 dd->de_dirent->d_type = DT_DIR;
201 dd->de_mode = 0555;
202 dd->de_links = 2;
203 dd->de_dir = dd;
204 if (inode != 0)
205 dd->de_inode = inode;
206 else
207 dd->de_inode = alloc_unr(devfs_inos);
208
209 /* Create the "." entry in the new directory */
210 de = devfs_newdirent(".", 1);
211 de->de_dirent->d_type = DT_DIR;
212 de->de_flags |= DE_DOT;
213 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
214 de->de_dir = dd;
215
216 /* Create the ".." entry in the new directory */
217 de = devfs_newdirent("..", 2);
218 de->de_dirent->d_type = DT_DIR;
219 de->de_flags |= DE_DOTDOT;
220 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
221 if (dotdot == NULL) {
222 de->de_dir = dd;
223 } else {
224 de->de_dir = dotdot;
225 sx_assert(&dmp->dm_lock, SX_XLOCKED);
226 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
227 dotdot->de_links++;
228 devfs_rules_apply(dmp, dd);
229 }
230
231 #ifdef MAC
232 mac_create_devfs_directory(dmp->dm_mount, name, namelen, dd);
233 #endif
234 return (dd);
235 }
236
237 void
238 devfs_dirent_free(struct devfs_dirent *de)
239 {
240 free(de, M_DEVFS3);
241 }
242
243 /*
244 * The caller needs to hold the dm for the duration of the call since
245 * dm->dm_lock may be temporary dropped.
246 */
247 void
248 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked)
249 {
250 struct vnode *vp;
251 struct thread *td;
252
253 KASSERT((de->de_flags & DE_DOOMED) == 0,
254 ("devfs_delete doomed dirent"));
255 td = curthread;
256 de->de_flags |= DE_DOOMED;
257 mtx_lock(&devfs_de_interlock);
258 vp = de->de_vnode;
259 if (vp != NULL) {
260 VI_LOCK(vp);
261 mtx_unlock(&devfs_de_interlock);
262 vholdl(vp);
263 sx_unlock(&dm->dm_lock);
264 if (!vp_locked)
265 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY, td);
266 else
267 VI_UNLOCK(vp);
268 vgone(vp);
269 if (!vp_locked)
270 VOP_UNLOCK(vp, 0, td);
271 vdrop(vp);
272 sx_xlock(&dm->dm_lock);
273 } else
274 mtx_unlock(&devfs_de_interlock);
275 if (de->de_symlink) {
276 free(de->de_symlink, M_DEVFS);
277 de->de_symlink = NULL;
278 }
279 #ifdef MAC
280 mac_destroy_devfs(de);
281 #endif
282 if (de->de_inode > DEVFS_ROOTINO) {
283 free_unr(devfs_inos, de->de_inode);
284 de->de_inode = 0;
285 }
286 if (DEVFS_DE_DROP(de))
287 devfs_dirent_free(de);
288 }
289
290 /*
291 * Called on unmount.
292 * Recursively removes the entire tree.
293 * The caller needs to hold the dm for the duration of the call.
294 */
295
296 static void
297 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
298 {
299 struct devfs_dirent *de;
300
301 sx_assert(&dm->dm_lock, SX_XLOCKED);
302 for (;;) {
303 de = TAILQ_FIRST(&dd->de_dlist);
304 if (de == NULL)
305 break;
306 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
307 if (de->de_flags & (DE_DOT|DE_DOTDOT))
308 devfs_delete(dm, de, 0);
309 else if (de->de_dirent->d_type == DT_DIR)
310 devfs_purge(dm, de);
311 else
312 devfs_delete(dm, de, 0);
313 }
314 devfs_delete(dm, dd, 0);
315 }
316
317 /*
318 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
319 * by the mount points dm_idx.
320 * This function extends the array when necessary, taking into account that
321 * the default array is 1 element and not malloc'ed.
322 */
323 static void
324 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
325 {
326 struct devfs_dirent **dep;
327 int siz;
328
329 siz = (dm->dm_idx + 1) * sizeof *dep;
330 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
331 dev_lock();
332 if (dm->dm_idx <= cdp->cdp_maxdirent) {
333 /* We got raced */
334 dev_unlock();
335 free(dep, M_DEVFS2);
336 return;
337 }
338 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
339 if (cdp->cdp_maxdirent > 0)
340 free(cdp->cdp_dirents, M_DEVFS2);
341 cdp->cdp_dirents = dep;
342 /*
343 * XXX: if malloc told us how much we actually got this could
344 * XXX: be optimized.
345 */
346 cdp->cdp_maxdirent = dm->dm_idx;
347 dev_unlock();
348 }
349
350 /*
351 * The caller needs to hold the dm for the duration of the call.
352 */
353 static int
354 devfs_populate_loop(struct devfs_mount *dm, int cleanup)
355 {
356 struct cdev_priv *cdp;
357 struct devfs_dirent *de;
358 struct devfs_dirent *dd;
359 struct cdev *pdev;
360 int j;
361 char *q, *s;
362
363 sx_assert(&dm->dm_lock, SX_XLOCKED);
364 dev_lock();
365 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
366
367 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
368
369 /*
370 * If we are unmounting, or the device has been destroyed,
371 * clean up our dirent.
372 */
373 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
374 dm->dm_idx <= cdp->cdp_maxdirent &&
375 cdp->cdp_dirents[dm->dm_idx] != NULL) {
376 de = cdp->cdp_dirents[dm->dm_idx];
377 cdp->cdp_dirents[dm->dm_idx] = NULL;
378 KASSERT(cdp == de->de_cdp,
379 ("%s %d %s %p %p", __func__, __LINE__,
380 cdp->cdp_c.si_name, cdp, de->de_cdp));
381 KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
382 dev_unlock();
383
384 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
385 de->de_cdp = NULL;
386 de->de_inode = 0;
387 devfs_delete(dm, de, 0);
388 dev_lock();
389 cdp->cdp_inuse--;
390 dev_unlock();
391 return (1);
392 }
393 /*
394 * GC any lingering devices
395 */
396 if (!(cdp->cdp_flags & CDP_ACTIVE)) {
397 if (cdp->cdp_inuse > 0)
398 continue;
399 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
400 dev_unlock();
401 dev_rel(&cdp->cdp_c);
402 return (1);
403 }
404 /*
405 * Don't create any new dirents if we are unmounting
406 */
407 if (cleanup)
408 continue;
409 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
410
411 if (dm->dm_idx <= cdp->cdp_maxdirent &&
412 cdp->cdp_dirents[dm->dm_idx] != NULL) {
413 de = cdp->cdp_dirents[dm->dm_idx];
414 KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
415 continue;
416 }
417
418
419 cdp->cdp_inuse++;
420 dev_unlock();
421
422 if (dm->dm_idx > cdp->cdp_maxdirent)
423 devfs_metoo(cdp, dm);
424
425 dd = dm->dm_rootdir;
426 s = cdp->cdp_c.si_name;
427 for (;;) {
428 for (q = s; *q != '/' && *q != '\0'; q++)
429 continue;
430 if (*q != '/')
431 break;
432 de = devfs_find(dd, s, q - s);
433 if (de == NULL)
434 de = devfs_vmkdir(dm, s, q - s, dd, 0);
435 s = q + 1;
436 dd = de;
437 }
438
439 de = devfs_newdirent(s, q - s);
440 if (cdp->cdp_c.si_flags & SI_ALIAS) {
441 de->de_uid = 0;
442 de->de_gid = 0;
443 de->de_mode = 0755;
444 de->de_dirent->d_type = DT_LNK;
445 pdev = cdp->cdp_c.si_parent;
446 j = strlen(pdev->si_name) + 1;
447 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
448 bcopy(pdev->si_name, de->de_symlink, j);
449 } else {
450 de->de_uid = cdp->cdp_c.si_uid;
451 de->de_gid = cdp->cdp_c.si_gid;
452 de->de_mode = cdp->cdp_c.si_mode;
453 de->de_dirent->d_type = DT_CHR;
454 }
455 de->de_inode = cdp->cdp_inode;
456 de->de_cdp = cdp;
457 #ifdef MAC
458 mac_create_devfs_device(cdp->cdp_c.si_cred, dm->dm_mount,
459 &cdp->cdp_c, de);
460 #endif
461 de->de_dir = dd;
462 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
463 devfs_rules_apply(dm, de);
464 dev_lock();
465 /* XXX: could check that cdp is still active here */
466 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
467 ("%s %d\n", __func__, __LINE__));
468 cdp->cdp_dirents[dm->dm_idx] = de;
469 KASSERT(de->de_cdp != (void *)0xdeadc0de,
470 ("%s %d\n", __func__, __LINE__));
471 dev_unlock();
472 return (1);
473 }
474 dev_unlock();
475 return (0);
476 }
477
478 /*
479 * The caller needs to hold the dm for the duration of the call.
480 */
481 void
482 devfs_populate(struct devfs_mount *dm)
483 {
484 unsigned gen;
485
486 sx_assert(&dm->dm_lock, SX_XLOCKED);
487 gen = devfs_generation;
488 if (dm->dm_generation == gen)
489 return;
490 while (devfs_populate_loop(dm, 0))
491 continue;
492 dm->dm_generation = gen;
493 }
494
495 /*
496 * The caller needs to hold the dm for the duration of the call.
497 */
498 void
499 devfs_cleanup(struct devfs_mount *dm)
500 {
501
502 sx_assert(&dm->dm_lock, SX_XLOCKED);
503 while (devfs_populate_loop(dm, 1))
504 continue;
505 devfs_purge(dm, dm->dm_rootdir);
506 }
507
508 /*
509 * devfs_create() and devfs_destroy() are called from kern_conf.c and
510 * in both cases the devlock() mutex is held, so no further locking
511 * is necesary and no sleeping allowed.
512 */
513
514 void
515 devfs_create(struct cdev *dev)
516 {
517 struct cdev_priv *cdp;
518
519 mtx_assert(&devmtx, MA_OWNED);
520 cdp = dev->si_priv;
521 cdp->cdp_flags |= CDP_ACTIVE;
522 cdp->cdp_inode = alloc_unrl(devfs_inos);
523 dev_refl(dev);
524 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
525 devfs_generation++;
526 }
527
528 void
529 devfs_destroy(struct cdev *dev)
530 {
531 struct cdev_priv *cdp;
532
533 mtx_assert(&devmtx, MA_OWNED);
534 cdp = dev->si_priv;
535 cdp->cdp_flags &= ~CDP_ACTIVE;
536 devfs_generation++;
537 }
538
539 static void
540 devfs_devs_init(void *junk __unused)
541 {
542
543 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
544 }
545
546 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
Cache object: 08b27ca94d2137e1dc01a45f8292b688
|