1 /*-
2 * Copyright (c) 2000,2004
3 * Poul-Henning Kamp. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Neither the name of the University nor the names of its contributors
11 * may be used to endorse or promote products derived from this software
12 * without specific prior written permission.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * From: FreeBSD: src/sys/miscfs/kernfs/kernfs_vfsops.c 1.36
27 *
28 * $FreeBSD$
29 */
30
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/conf.h>
34 #include <sys/dirent.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/proc.h>
40 #include <sys/sx.h>
41 #include <sys/sysctl.h>
42 #include <sys/vnode.h>
43
44 #include <sys/kdb.h>
45
46 #include <fs/devfs/devfs.h>
47 #include <fs/devfs/devfs_int.h>
48
49 #include <security/mac/mac_framework.h>
50
51 /*
52 * The one true (but secret) list of active devices in the system.
53 * Locked by dev_lock()/devmtx
54 */
55 struct cdev_priv_list cdevp_list = TAILQ_HEAD_INITIALIZER(cdevp_list);
56
57 struct unrhdr *devfs_inos;
58
59
60 static MALLOC_DEFINE(M_DEVFS2, "DEVFS2", "DEVFS data 2");
61 static MALLOC_DEFINE(M_DEVFS3, "DEVFS3", "DEVFS data 3");
62 static MALLOC_DEFINE(M_CDEVP, "DEVFS1", "DEVFS cdev_priv storage");
63
64 static SYSCTL_NODE(_vfs, OID_AUTO, devfs, CTLFLAG_RW, 0, "DEVFS filesystem");
65
66 static unsigned devfs_generation;
67 SYSCTL_UINT(_vfs_devfs, OID_AUTO, generation, CTLFLAG_RD,
68 &devfs_generation, 0, "DEVFS generation number");
69
70 unsigned devfs_rule_depth = 1;
71 SYSCTL_UINT(_vfs_devfs, OID_AUTO, rule_depth, CTLFLAG_RW,
72 &devfs_rule_depth, 0, "Max depth of ruleset include");
73
74 /*
75 * Helper sysctl for devname(3). We're given a struct cdev * and return
76 * the name, if any, registered by the device driver.
77 */
78 static int
79 sysctl_devname(SYSCTL_HANDLER_ARGS)
80 {
81 int error;
82 dev_t ud;
83 struct cdev_priv *cdp;
84
85 error = SYSCTL_IN(req, &ud, sizeof (ud));
86 if (error)
87 return (error);
88 if (ud == NODEV)
89 return(EINVAL);
90 /*
91 ud ^ devfs_random();
92 */
93 dev_lock();
94 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list)
95 if (cdp->cdp_inode == ud)
96 break;
97 dev_unlock();
98 if (cdp == NULL)
99 return(ENOENT);
100 return(SYSCTL_OUT(req, cdp->cdp_c.si_name, strlen(cdp->cdp_c.si_name) + 1));
101 return (error);
102 }
103
104 SYSCTL_PROC(_kern, OID_AUTO, devname,
105 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MPSAFE,
106 NULL, 0, sysctl_devname, "", "devname(3) handler");
107
108 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev, CTLFLAG_RD,
109 0, sizeof(struct cdev), "sizeof(struct cdev)");
110
111 SYSCTL_INT(_debug_sizeof, OID_AUTO, cdev_priv, CTLFLAG_RD,
112 0, sizeof(struct cdev_priv), "sizeof(struct cdev_priv)");
113
114 struct cdev *
115 devfs_alloc(int flags)
116 {
117 struct cdev_priv *cdp;
118 struct cdev *cdev;
119 struct timespec ts;
120
121 cdp = malloc(sizeof *cdp, M_CDEVP, M_USE_RESERVE | M_ZERO |
122 ((flags & MAKEDEV_NOWAIT) ? M_NOWAIT : M_WAITOK));
123 if (cdp == NULL)
124 return (NULL);
125
126 cdp->cdp_dirents = &cdp->cdp_dirent0;
127 cdp->cdp_dirent0 = NULL;
128 cdp->cdp_maxdirent = 0;
129 cdp->cdp_inode = 0;
130
131 cdev = &cdp->cdp_c;
132
133 cdev->si_name = cdev->__si_namebuf;
134 LIST_INIT(&cdev->si_children);
135 vfs_timestamp(&ts);
136 cdev->si_atime = cdev->si_mtime = cdev->si_ctime = ts;
137 cdev->si_cred = NULL;
138
139 return (cdev);
140 }
141
142 void
143 devfs_free(struct cdev *cdev)
144 {
145 struct cdev_priv *cdp;
146
147 cdp = cdev2priv(cdev);
148 if (cdev->si_cred != NULL)
149 crfree(cdev->si_cred);
150 devfs_free_cdp_inode(cdp->cdp_inode);
151 if (cdp->cdp_maxdirent > 0)
152 free(cdp->cdp_dirents, M_DEVFS2);
153 free(cdp, M_CDEVP);
154 }
155
156 struct devfs_dirent *
157 devfs_find(struct devfs_dirent *dd, const char *name, int namelen)
158 {
159 struct devfs_dirent *de;
160
161 TAILQ_FOREACH(de, &dd->de_dlist, de_list) {
162 if (namelen != de->de_dirent->d_namlen)
163 continue;
164 if (bcmp(name, de->de_dirent->d_name, namelen) != 0)
165 continue;
166 break;
167 }
168 return (de);
169 }
170
171 struct devfs_dirent *
172 devfs_newdirent(char *name, int namelen)
173 {
174 int i;
175 struct devfs_dirent *de;
176 struct dirent d;
177
178 d.d_namlen = namelen;
179 i = sizeof (*de) + GENERIC_DIRSIZ(&d);
180 de = malloc(i, M_DEVFS3, M_WAITOK | M_ZERO);
181 de->de_dirent = (struct dirent *)(de + 1);
182 de->de_dirent->d_namlen = namelen;
183 de->de_dirent->d_reclen = GENERIC_DIRSIZ(&d);
184 bcopy(name, de->de_dirent->d_name, namelen);
185 de->de_dirent->d_name[namelen] = '\0';
186 vfs_timestamp(&de->de_ctime);
187 de->de_mtime = de->de_atime = de->de_ctime;
188 de->de_links = 1;
189 de->de_holdcnt = 1;
190 #ifdef MAC
191 mac_devfs_init(de);
192 #endif
193 return (de);
194 }
195
196 struct devfs_dirent *
197 devfs_parent_dirent(struct devfs_dirent *de)
198 {
199
200 if (de->de_dirent->d_type != DT_DIR)
201 return (de->de_dir);
202
203 if (de->de_flags & (DE_DOT | DE_DOTDOT))
204 return (NULL);
205
206 de = TAILQ_FIRST(&de->de_dlist); /* "." */
207 if (de == NULL)
208 return (NULL);
209 de = TAILQ_NEXT(de, de_list); /* ".." */
210 if (de == NULL)
211 return (NULL);
212
213 return (de->de_dir);
214 }
215
216 struct devfs_dirent *
217 devfs_vmkdir(struct devfs_mount *dmp, char *name, int namelen, struct devfs_dirent *dotdot, u_int inode)
218 {
219 struct devfs_dirent *dd;
220 struct devfs_dirent *de;
221
222 /* Create the new directory */
223 dd = devfs_newdirent(name, namelen);
224 TAILQ_INIT(&dd->de_dlist);
225 dd->de_dirent->d_type = DT_DIR;
226 dd->de_mode = 0555;
227 dd->de_links = 2;
228 dd->de_dir = dd;
229 if (inode != 0)
230 dd->de_inode = inode;
231 else
232 dd->de_inode = alloc_unr(devfs_inos);
233
234 /* Create the "." entry in the new directory */
235 de = devfs_newdirent(".", 1);
236 de->de_dirent->d_type = DT_DIR;
237 de->de_flags |= DE_DOT;
238 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
239 de->de_dir = dd;
240
241 /* Create the ".." entry in the new directory */
242 de = devfs_newdirent("..", 2);
243 de->de_dirent->d_type = DT_DIR;
244 de->de_flags |= DE_DOTDOT;
245 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
246 if (dotdot == NULL) {
247 de->de_dir = dd;
248 } else {
249 de->de_dir = dotdot;
250 sx_assert(&dmp->dm_lock, SX_XLOCKED);
251 TAILQ_INSERT_TAIL(&dotdot->de_dlist, dd, de_list);
252 dotdot->de_links++;
253 devfs_rules_apply(dmp, dd);
254 }
255
256 #ifdef MAC
257 mac_devfs_create_directory(dmp->dm_mount, name, namelen, dd);
258 #endif
259 return (dd);
260 }
261
262 void
263 devfs_dirent_free(struct devfs_dirent *de)
264 {
265 free(de, M_DEVFS3);
266 }
267
268 /*
269 * The caller needs to hold the dm for the duration of the call since
270 * dm->dm_lock may be temporary dropped.
271 */
272 void
273 devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked)
274 {
275 struct vnode *vp;
276
277 KASSERT((de->de_flags & DE_DOOMED) == 0,
278 ("devfs_delete doomed dirent"));
279 de->de_flags |= DE_DOOMED;
280 mtx_lock(&devfs_de_interlock);
281 vp = de->de_vnode;
282 if (vp != NULL) {
283 VI_LOCK(vp);
284 mtx_unlock(&devfs_de_interlock);
285 vholdl(vp);
286 sx_unlock(&dm->dm_lock);
287 if (!vp_locked)
288 vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
289 else
290 VI_UNLOCK(vp);
291 vgone(vp);
292 if (!vp_locked)
293 VOP_UNLOCK(vp, 0);
294 vdrop(vp);
295 sx_xlock(&dm->dm_lock);
296 } else
297 mtx_unlock(&devfs_de_interlock);
298 if (de->de_symlink) {
299 free(de->de_symlink, M_DEVFS);
300 de->de_symlink = NULL;
301 }
302 #ifdef MAC
303 mac_devfs_destroy(de);
304 #endif
305 if (de->de_inode > DEVFS_ROOTINO) {
306 devfs_free_cdp_inode(de->de_inode);
307 de->de_inode = 0;
308 }
309 if (DEVFS_DE_DROP(de))
310 devfs_dirent_free(de);
311 }
312
313 /*
314 * Called on unmount.
315 * Recursively removes the entire tree.
316 * The caller needs to hold the dm for the duration of the call.
317 */
318
319 static void
320 devfs_purge(struct devfs_mount *dm, struct devfs_dirent *dd)
321 {
322 struct devfs_dirent *de;
323
324 sx_assert(&dm->dm_lock, SX_XLOCKED);
325 for (;;) {
326 de = TAILQ_FIRST(&dd->de_dlist);
327 if (de == NULL)
328 break;
329 TAILQ_REMOVE(&dd->de_dlist, de, de_list);
330 if (de->de_flags & (DE_DOT|DE_DOTDOT))
331 devfs_delete(dm, de, 0);
332 else if (de->de_dirent->d_type == DT_DIR)
333 devfs_purge(dm, de);
334 else
335 devfs_delete(dm, de, 0);
336 }
337 devfs_delete(dm, dd, 0);
338 }
339
340 /*
341 * Each cdev_priv has an array of pointers to devfs_dirent which is indexed
342 * by the mount points dm_idx.
343 * This function extends the array when necessary, taking into account that
344 * the default array is 1 element and not malloc'ed.
345 */
346 static void
347 devfs_metoo(struct cdev_priv *cdp, struct devfs_mount *dm)
348 {
349 struct devfs_dirent **dep;
350 int siz;
351
352 siz = (dm->dm_idx + 1) * sizeof *dep;
353 dep = malloc(siz, M_DEVFS2, M_WAITOK | M_ZERO);
354 dev_lock();
355 if (dm->dm_idx <= cdp->cdp_maxdirent) {
356 /* We got raced */
357 dev_unlock();
358 free(dep, M_DEVFS2);
359 return;
360 }
361 memcpy(dep, cdp->cdp_dirents, (cdp->cdp_maxdirent + 1) * sizeof *dep);
362 if (cdp->cdp_maxdirent > 0)
363 free(cdp->cdp_dirents, M_DEVFS2);
364 cdp->cdp_dirents = dep;
365 /*
366 * XXX: if malloc told us how much we actually got this could
367 * XXX: be optimized.
368 */
369 cdp->cdp_maxdirent = dm->dm_idx;
370 dev_unlock();
371 }
372
373 /*
374 * The caller needs to hold the dm for the duration of the call.
375 */
376 static int
377 devfs_populate_loop(struct devfs_mount *dm, int cleanup)
378 {
379 struct cdev_priv *cdp;
380 struct devfs_dirent *de;
381 struct devfs_dirent *dd;
382 struct cdev *pdev;
383 int j;
384 char *q, *s;
385
386 sx_assert(&dm->dm_lock, SX_XLOCKED);
387 dev_lock();
388 TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) {
389
390 KASSERT(cdp->cdp_dirents != NULL, ("NULL cdp_dirents"));
391
392 /*
393 * If we are unmounting, or the device has been destroyed,
394 * clean up our dirent.
395 */
396 if ((cleanup || !(cdp->cdp_flags & CDP_ACTIVE)) &&
397 dm->dm_idx <= cdp->cdp_maxdirent &&
398 cdp->cdp_dirents[dm->dm_idx] != NULL) {
399 de = cdp->cdp_dirents[dm->dm_idx];
400 cdp->cdp_dirents[dm->dm_idx] = NULL;
401 KASSERT(cdp == de->de_cdp,
402 ("%s %d %s %p %p", __func__, __LINE__,
403 cdp->cdp_c.si_name, cdp, de->de_cdp));
404 KASSERT(de->de_dir != NULL, ("Null de->de_dir"));
405 dev_unlock();
406
407 TAILQ_REMOVE(&de->de_dir->de_dlist, de, de_list);
408 de->de_cdp = NULL;
409 de->de_inode = 0;
410 devfs_delete(dm, de, 0);
411 dev_lock();
412 cdp->cdp_inuse--;
413 dev_unlock();
414 return (1);
415 }
416 /*
417 * GC any lingering devices
418 */
419 if (!(cdp->cdp_flags & CDP_ACTIVE)) {
420 if (cdp->cdp_inuse > 0)
421 continue;
422 TAILQ_REMOVE(&cdevp_list, cdp, cdp_list);
423 dev_unlock();
424 dev_rel(&cdp->cdp_c);
425 return (1);
426 }
427 /*
428 * Don't create any new dirents if we are unmounting
429 */
430 if (cleanup)
431 continue;
432 KASSERT((cdp->cdp_flags & CDP_ACTIVE), ("Bogons, I tell ya'!"));
433
434 if (dm->dm_idx <= cdp->cdp_maxdirent &&
435 cdp->cdp_dirents[dm->dm_idx] != NULL) {
436 de = cdp->cdp_dirents[dm->dm_idx];
437 KASSERT(cdp == de->de_cdp, ("inconsistent cdp"));
438 continue;
439 }
440
441
442 cdp->cdp_inuse++;
443 dev_unlock();
444
445 if (dm->dm_idx > cdp->cdp_maxdirent)
446 devfs_metoo(cdp, dm);
447
448 dd = dm->dm_rootdir;
449 s = cdp->cdp_c.si_name;
450 for (;;) {
451 for (q = s; *q != '/' && *q != '\0'; q++)
452 continue;
453 if (*q != '/')
454 break;
455 de = devfs_find(dd, s, q - s);
456 if (de == NULL)
457 de = devfs_vmkdir(dm, s, q - s, dd, 0);
458 s = q + 1;
459 dd = de;
460 }
461
462 de = devfs_newdirent(s, q - s);
463 if (cdp->cdp_c.si_flags & SI_ALIAS) {
464 de->de_uid = 0;
465 de->de_gid = 0;
466 de->de_mode = 0755;
467 de->de_dirent->d_type = DT_LNK;
468 pdev = cdp->cdp_c.si_parent;
469 j = strlen(pdev->si_name) + 1;
470 de->de_symlink = malloc(j, M_DEVFS, M_WAITOK);
471 bcopy(pdev->si_name, de->de_symlink, j);
472 } else {
473 de->de_uid = cdp->cdp_c.si_uid;
474 de->de_gid = cdp->cdp_c.si_gid;
475 de->de_mode = cdp->cdp_c.si_mode;
476 de->de_dirent->d_type = DT_CHR;
477 }
478 de->de_inode = cdp->cdp_inode;
479 de->de_cdp = cdp;
480 #ifdef MAC
481 mac_devfs_create_device(cdp->cdp_c.si_cred, dm->dm_mount,
482 &cdp->cdp_c, de);
483 #endif
484 de->de_dir = dd;
485 TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list);
486 devfs_rules_apply(dm, de);
487 dev_lock();
488 /* XXX: could check that cdp is still active here */
489 KASSERT(cdp->cdp_dirents[dm->dm_idx] == NULL,
490 ("%s %d\n", __func__, __LINE__));
491 cdp->cdp_dirents[dm->dm_idx] = de;
492 KASSERT(de->de_cdp != (void *)0xdeadc0de,
493 ("%s %d\n", __func__, __LINE__));
494 dev_unlock();
495 return (1);
496 }
497 dev_unlock();
498 return (0);
499 }
500
501 /*
502 * The caller needs to hold the dm for the duration of the call.
503 */
504 void
505 devfs_populate(struct devfs_mount *dm)
506 {
507 unsigned gen;
508
509 sx_assert(&dm->dm_lock, SX_XLOCKED);
510 gen = devfs_generation;
511 if (dm->dm_generation == gen)
512 return;
513 while (devfs_populate_loop(dm, 0))
514 continue;
515 dm->dm_generation = gen;
516 }
517
518 /*
519 * The caller needs to hold the dm for the duration of the call.
520 */
521 void
522 devfs_cleanup(struct devfs_mount *dm)
523 {
524
525 sx_assert(&dm->dm_lock, SX_XLOCKED);
526 while (devfs_populate_loop(dm, 1))
527 continue;
528 devfs_purge(dm, dm->dm_rootdir);
529 }
530
531 /*
532 * devfs_create() and devfs_destroy() are called from kern_conf.c and
533 * in both cases the devlock() mutex is held, so no further locking
534 * is necesary and no sleeping allowed.
535 */
536
537 void
538 devfs_create(struct cdev *dev)
539 {
540 struct cdev_priv *cdp;
541
542 mtx_assert(&devmtx, MA_OWNED);
543 cdp = cdev2priv(dev);
544 cdp->cdp_flags |= CDP_ACTIVE;
545 cdp->cdp_inode = alloc_unrl(devfs_inos);
546 dev_refl(dev);
547 TAILQ_INSERT_TAIL(&cdevp_list, cdp, cdp_list);
548 devfs_generation++;
549 }
550
551 void
552 devfs_destroy(struct cdev *dev)
553 {
554 struct cdev_priv *cdp;
555
556 mtx_assert(&devmtx, MA_OWNED);
557 cdp = cdev2priv(dev);
558 cdp->cdp_flags &= ~CDP_ACTIVE;
559 devfs_generation++;
560 }
561
562 ino_t
563 devfs_alloc_cdp_inode(void)
564 {
565
566 return (alloc_unr(devfs_inos));
567 }
568
569 void
570 devfs_free_cdp_inode(ino_t ino)
571 {
572
573 if (ino > 0)
574 free_unr(devfs_inos, ino);
575 }
576
577 static void
578 devfs_devs_init(void *junk __unused)
579 {
580
581 devfs_inos = new_unrhdr(DEVFS_ROOTINO + 1, INT_MAX, &devmtx);
582 }
583
584 SYSINIT(devfs_devs, SI_SUB_DEVFS, SI_ORDER_FIRST, devfs_devs_init, NULL);
Cache object: 40ce4e122fe5721b2f18580b45a5e0e1
|