FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_conf.c
1 /*-
2 * Copyright (c) 1999-2002 Poul-Henning Kamp
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/bio.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/malloc.h>
38 #include <sys/conf.h>
39 #include <sys/vnode.h>
40 #include <sys/queue.h>
41 #include <sys/poll.h>
42 #include <sys/ctype.h>
43 #include <sys/tty.h>
44 #include <sys/ucred.h>
45 #include <machine/stdarg.h>
46
47 #include <fs/devfs/devfs_int.h>
48
49 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
50
51 struct mtx devmtx;
52 static void destroy_devl(struct cdev *dev);
53 static struct cdev *make_dev_credv(struct cdevsw *devsw, int minornr,
54 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
55 va_list ap);
56
57 void
58 dev_lock(void)
59 {
60
61 mtx_lock(&devmtx);
62 }
63
64 void
65 dev_unlock(void)
66 {
67
68 mtx_unlock(&devmtx);
69 }
70
71 void
72 dev_ref(struct cdev *dev)
73 {
74
75 mtx_assert(&devmtx, MA_NOTOWNED);
76 mtx_lock(&devmtx);
77 dev->si_refcount++;
78 mtx_unlock(&devmtx);
79 }
80
81 void
82 dev_refl(struct cdev *dev)
83 {
84
85 mtx_assert(&devmtx, MA_OWNED);
86 dev->si_refcount++;
87 }
88
89 void
90 dev_rel(struct cdev *dev)
91 {
92 int flag = 0;
93
94 mtx_assert(&devmtx, MA_NOTOWNED);
95 dev_lock();
96 dev->si_refcount--;
97 KASSERT(dev->si_refcount >= 0,
98 ("dev_rel(%s) gave negative count", devtoname(dev)));
99 #if 0
100 if (dev->si_usecount == 0 &&
101 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED))
102 ;
103 else
104 #endif
105 if (dev->si_devsw == NULL && dev->si_refcount == 0) {
106 LIST_REMOVE(dev, si_list);
107 flag = 1;
108 }
109 dev_unlock();
110 if (flag)
111 devfs_free(dev);
112 }
113
114 struct cdevsw *
115 dev_refthread(struct cdev *dev)
116 {
117 struct cdevsw *csw;
118
119 mtx_assert(&devmtx, MA_NOTOWNED);
120 dev_lock();
121 csw = dev->si_devsw;
122 if (csw != NULL)
123 dev->si_threadcount++;
124 dev_unlock();
125 return (csw);
126 }
127
128 struct cdevsw *
129 devvn_refthread(struct vnode *vp, struct cdev **devp)
130 {
131 struct cdevsw *csw;
132
133 mtx_assert(&devmtx, MA_NOTOWNED);
134 csw = NULL;
135 dev_lock();
136 *devp = vp->v_rdev;
137 if (*devp != NULL) {
138 csw = (*devp)->si_devsw;
139 if (csw != NULL)
140 (*devp)->si_threadcount++;
141 }
142 dev_unlock();
143 return (csw);
144 }
145
146 void
147 dev_relthread(struct cdev *dev)
148 {
149
150 mtx_assert(&devmtx, MA_NOTOWNED);
151 dev_lock();
152 dev->si_threadcount--;
153 dev_unlock();
154 }
155
156 int
157 nullop(void)
158 {
159
160 return (0);
161 }
162
163 int
164 eopnotsupp(void)
165 {
166
167 return (EOPNOTSUPP);
168 }
169
170 static int
171 enxio(void)
172 {
173 return (ENXIO);
174 }
175
176 static int
177 enodev(void)
178 {
179 return (ENODEV);
180 }
181
182 /* Define a dead_cdevsw for use when devices leave unexpectedly. */
183
184 #define dead_open (d_open_t *)enxio
185 #define dead_close (d_close_t *)enxio
186 #define dead_read (d_read_t *)enxio
187 #define dead_write (d_write_t *)enxio
188 #define dead_ioctl (d_ioctl_t *)enxio
189 #define dead_poll (d_poll_t *)enodev
190 #define dead_mmap (d_mmap_t *)enodev
191
192 static void
193 dead_strategy(struct bio *bp)
194 {
195
196 biofinish(bp, NULL, ENXIO);
197 }
198
199 #define dead_dump (dumper_t *)enxio
200 #define dead_kqfilter (d_kqfilter_t *)enxio
201
202 static struct cdevsw dead_cdevsw = {
203 .d_version = D_VERSION,
204 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */
205 .d_open = dead_open,
206 .d_close = dead_close,
207 .d_read = dead_read,
208 .d_write = dead_write,
209 .d_ioctl = dead_ioctl,
210 .d_poll = dead_poll,
211 .d_mmap = dead_mmap,
212 .d_strategy = dead_strategy,
213 .d_name = "dead",
214 .d_dump = dead_dump,
215 .d_kqfilter = dead_kqfilter
216 };
217
218 /* Default methods if driver does not specify method */
219
220 #define null_open (d_open_t *)nullop
221 #define null_close (d_close_t *)nullop
222 #define no_read (d_read_t *)enodev
223 #define no_write (d_write_t *)enodev
224 #define no_ioctl (d_ioctl_t *)enodev
225 #define no_mmap (d_mmap_t *)enodev
226 #define no_kqfilter (d_kqfilter_t *)enodev
227
228 static void
229 no_strategy(struct bio *bp)
230 {
231
232 biofinish(bp, NULL, ENODEV);
233 }
234
235 static int
236 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
237 {
238 /*
239 * Return true for read/write. If the user asked for something
240 * special, return POLLNVAL, so that clients have a way of
241 * determining reliably whether or not the extended
242 * functionality is present without hard-coding knowledge
243 * of specific filesystem implementations.
244 * Stay in sync with vop_nopoll().
245 */
246 if (events & ~POLLSTANDARD)
247 return (POLLNVAL);
248
249 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
250 }
251
252 #define no_dump (dumper_t *)enodev
253
254 static int
255 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
256 {
257 struct cdevsw *dsw;
258 int retval;
259
260 dsw = dev_refthread(dev);
261 if (dsw == NULL)
262 return (ENXIO);
263 mtx_lock(&Giant);
264 retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td);
265 mtx_unlock(&Giant);
266 dev_relthread(dev);
267 return (retval);
268 }
269
270 static int
271 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, int fdidx)
272 {
273 struct cdevsw *dsw;
274 int retval;
275
276 dsw = dev_refthread(dev);
277 if (dsw == NULL)
278 return (ENXIO);
279 mtx_lock(&Giant);
280 retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fdidx);
281 mtx_unlock(&Giant);
282 dev_relthread(dev);
283 return (retval);
284 }
285
286 static int
287 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
288 {
289 struct cdevsw *dsw;
290 int retval;
291
292 dsw = dev_refthread(dev);
293 if (dsw == NULL)
294 return (ENXIO);
295 mtx_lock(&Giant);
296 retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td);
297 mtx_unlock(&Giant);
298 dev_relthread(dev);
299 return (retval);
300 }
301
302 static void
303 giant_strategy(struct bio *bp)
304 {
305 struct cdevsw *dsw;
306 struct cdev *dev;
307
308 dev = bp->bio_dev;
309 dsw = dev_refthread(dev);
310 if (dsw == NULL) {
311 biofinish(bp, NULL, ENXIO);
312 return;
313 }
314 mtx_lock(&Giant);
315 dsw->d_gianttrick->d_strategy(bp);
316 mtx_unlock(&Giant);
317 dev_relthread(dev);
318 }
319
320 static int
321 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
322 {
323 struct cdevsw *dsw;
324 int retval;
325
326 dsw = dev_refthread(dev);
327 if (dsw == NULL)
328 return (ENXIO);
329 mtx_lock(&Giant);
330 retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td);
331 mtx_unlock(&Giant);
332 dev_relthread(dev);
333 return (retval);
334 }
335
336 static int
337 giant_read(struct cdev *dev, struct uio *uio, int ioflag)
338 {
339 struct cdevsw *dsw;
340 int retval;
341
342 dsw = dev_refthread(dev);
343 if (dsw == NULL)
344 return (ENXIO);
345 mtx_lock(&Giant);
346 retval = dsw->d_gianttrick->d_read(dev, uio, ioflag);
347 mtx_unlock(&Giant);
348 dev_relthread(dev);
349 return (retval);
350 }
351
352 static int
353 giant_write(struct cdev *dev, struct uio *uio, int ioflag)
354 {
355 struct cdevsw *dsw;
356 int retval;
357
358 dsw = dev_refthread(dev);
359 if (dsw == NULL)
360 return (ENXIO);
361 mtx_lock(&Giant);
362 retval = dsw->d_gianttrick->d_write(dev, uio, ioflag);
363 mtx_unlock(&Giant);
364 dev_relthread(dev);
365 return (retval);
366 }
367
368 static int
369 giant_poll(struct cdev *dev, int events, struct thread *td)
370 {
371 struct cdevsw *dsw;
372 int retval;
373
374 dsw = dev_refthread(dev);
375 if (dsw == NULL)
376 return (ENXIO);
377 mtx_lock(&Giant);
378 retval = dsw->d_gianttrick->d_poll(dev, events, td);
379 mtx_unlock(&Giant);
380 dev_relthread(dev);
381 return (retval);
382 }
383
384 static int
385 giant_kqfilter(struct cdev *dev, struct knote *kn)
386 {
387 struct cdevsw *dsw;
388 int retval;
389
390 dsw = dev_refthread(dev);
391 if (dsw == NULL)
392 return (ENXIO);
393 mtx_lock(&Giant);
394 retval = dsw->d_gianttrick->d_kqfilter(dev, kn);
395 mtx_unlock(&Giant);
396 dev_relthread(dev);
397 return (retval);
398 }
399
400 static int
401 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
402 {
403 struct cdevsw *dsw;
404 int retval;
405
406 dsw = dev_refthread(dev);
407 if (dsw == NULL)
408 return (ENXIO);
409 mtx_lock(&Giant);
410 retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot);
411 mtx_unlock(&Giant);
412 dev_relthread(dev);
413 return (retval);
414 }
415
416
417 /*
418 * struct cdev * and u_dev_t primitives
419 */
420
421 int
422 minor(struct cdev *x)
423 {
424 if (x == NULL)
425 return NODEV;
426 return(x->si_drv0 & MAXMINOR);
427 }
428
429 int
430 dev2unit(struct cdev *x)
431 {
432
433 if (x == NULL)
434 return NODEV;
435 return (minor2unit(minor(x)));
436 }
437
438 u_int
439 minor2unit(u_int _minor)
440 {
441
442 KASSERT((_minor & ~MAXMINOR) == 0, ("Illegal minor %x", _minor));
443 return ((_minor & 0xff) | ((_minor >> 8) & 0xffff00));
444 }
445
446 int
447 unit2minor(int unit)
448 {
449
450 KASSERT(unit <= 0xffffff, ("Invalid unit (%d) in unit2minor", unit));
451 return ((unit & 0xff) | ((unit << 8) & ~0xffff));
452 }
453
454 static struct cdev *
455 newdev(struct cdevsw *csw, int y, struct cdev *si)
456 {
457 struct cdev *si2;
458 dev_t udev;
459
460 mtx_assert(&devmtx, MA_OWNED);
461 udev = y;
462 LIST_FOREACH(si2, &csw->d_devs, si_list) {
463 if (si2->si_drv0 == udev) {
464 devfs_free(si);
465 return (si2);
466 }
467 }
468 si->si_drv0 = udev;
469 si->si_devsw = csw;
470 LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
471 return (si);
472 }
473
474 int
475 uminor(dev_t dev)
476 {
477 return (dev & MAXMINOR);
478 }
479
480 int
481 umajor(dev_t dev)
482 {
483 return ((dev & ~MAXMINOR) >> 8);
484 }
485
486 static void
487 fini_cdevsw(struct cdevsw *devsw)
488 {
489 struct cdevsw *gt;
490
491 if (devsw->d_gianttrick != NULL) {
492 gt = devsw->d_gianttrick;
493 memcpy(devsw, gt, sizeof *devsw);
494 free(gt, M_DEVT);
495 devsw->d_gianttrick = NULL;
496 }
497 devsw->d_flags &= ~D_INIT;
498 }
499
500 static void
501 prep_cdevsw(struct cdevsw *devsw)
502 {
503 struct cdevsw *dsw2;
504
505 if (devsw->d_flags & D_NEEDGIANT)
506 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK);
507 else
508 dsw2 = NULL;
509 dev_lock();
510
511 if (devsw->d_version != D_VERSION_01) {
512 printf(
513 "WARNING: Device driver \"%s\" has wrong version %s\n",
514 devsw->d_name == NULL ? "???" : devsw->d_name,
515 "and is disabled. Recompile KLD module.");
516 devsw->d_open = dead_open;
517 devsw->d_close = dead_close;
518 devsw->d_read = dead_read;
519 devsw->d_write = dead_write;
520 devsw->d_ioctl = dead_ioctl;
521 devsw->d_poll = dead_poll;
522 devsw->d_mmap = dead_mmap;
523 devsw->d_strategy = dead_strategy;
524 devsw->d_dump = dead_dump;
525 devsw->d_kqfilter = dead_kqfilter;
526 }
527
528 if (devsw->d_flags & D_TTY) {
529 if (devsw->d_ioctl == NULL) devsw->d_ioctl = ttyioctl;
530 if (devsw->d_read == NULL) devsw->d_read = ttyread;
531 if (devsw->d_write == NULL) devsw->d_write = ttywrite;
532 if (devsw->d_kqfilter == NULL) devsw->d_kqfilter = ttykqfilter;
533 if (devsw->d_poll == NULL) devsw->d_poll = ttypoll;
534 }
535
536 if (devsw->d_flags & D_NEEDGIANT) {
537 if (devsw->d_gianttrick == NULL) {
538 memcpy(dsw2, devsw, sizeof *dsw2);
539 devsw->d_gianttrick = dsw2;
540 } else
541 free(dsw2, M_DEVT);
542 }
543
544 #define FIXUP(member, noop, giant) \
545 do { \
546 if (devsw->member == NULL) { \
547 devsw->member = noop; \
548 } else if (devsw->d_flags & D_NEEDGIANT) \
549 devsw->member = giant; \
550 } \
551 while (0)
552
553 FIXUP(d_open, null_open, giant_open);
554 FIXUP(d_fdopen, NULL, giant_fdopen);
555 FIXUP(d_close, null_close, giant_close);
556 FIXUP(d_read, no_read, giant_read);
557 FIXUP(d_write, no_write, giant_write);
558 FIXUP(d_ioctl, no_ioctl, giant_ioctl);
559 FIXUP(d_poll, no_poll, giant_poll);
560 FIXUP(d_mmap, no_mmap, giant_mmap);
561 FIXUP(d_strategy, no_strategy, giant_strategy);
562 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter);
563
564 if (devsw->d_dump == NULL) devsw->d_dump = no_dump;
565
566 LIST_INIT(&devsw->d_devs);
567
568 devsw->d_flags |= D_INIT;
569
570 dev_unlock();
571 }
572
573 static struct cdev *
574 make_dev_credv(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid,
575 gid_t gid, int mode, const char *fmt, va_list ap)
576 {
577 struct cdev *dev;
578 int i;
579
580 KASSERT((minornr & ~MAXMINOR) == 0,
581 ("Invalid minor (0x%x) in make_dev", minornr));
582
583 if (!(devsw->d_flags & D_INIT))
584 prep_cdevsw(devsw);
585 dev = devfs_alloc();
586 dev_lock();
587 dev = newdev(devsw, minornr, dev);
588 if (dev->si_flags & SI_CHEAPCLONE &&
589 dev->si_flags & SI_NAMED) {
590 /*
591 * This is allowed as it removes races and generally
592 * simplifies cloning devices.
593 * XXX: still ??
594 */
595 dev_unlock();
596 return (dev);
597 }
598 KASSERT(!(dev->si_flags & SI_NAMED),
599 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
600 devsw->d_name, minor(dev), devtoname(dev)));
601
602 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
603 if (i > (sizeof dev->__si_namebuf - 1)) {
604 printf("WARNING: Device name truncated! (%s)\n",
605 dev->__si_namebuf);
606 }
607
608 dev->si_flags |= SI_NAMED;
609 #ifdef MAC
610 if (cr != NULL)
611 dev->si_cred = crhold(cr);
612 else
613 #endif
614 dev->si_cred = NULL;
615 dev->si_uid = uid;
616 dev->si_gid = gid;
617 dev->si_mode = mode;
618
619 devfs_create(dev);
620 dev_unlock();
621 return (dev);
622 }
623
624 struct cdev *
625 make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode,
626 const char *fmt, ...)
627 {
628 struct cdev *dev;
629 va_list ap;
630
631 va_start(ap, fmt);
632 dev = make_dev_credv(devsw, minornr, NULL, uid, gid, mode, fmt, ap);
633 va_end(ap);
634 return (dev);
635 }
636
637 struct cdev *
638 make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid,
639 gid_t gid, int mode, const char *fmt, ...)
640 {
641 struct cdev *dev;
642 va_list ap;
643
644 va_start(ap, fmt);
645 dev = make_dev_credv(devsw, minornr, cr, uid, gid, mode, fmt, ap);
646 va_end(ap);
647
648 return (dev);
649 }
650
651 static void
652 dev_dependsl(struct cdev *pdev, struct cdev *cdev)
653 {
654
655 cdev->si_parent = pdev;
656 cdev->si_flags |= SI_CHILD;
657 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
658 }
659
660
661 void
662 dev_depends(struct cdev *pdev, struct cdev *cdev)
663 {
664
665 dev_lock();
666 dev_dependsl(pdev, cdev);
667 dev_unlock();
668 }
669
670 struct cdev *
671 make_dev_alias(struct cdev *pdev, const char *fmt, ...)
672 {
673 struct cdev *dev;
674 va_list ap;
675 int i;
676
677 dev = devfs_alloc();
678 dev_lock();
679 dev->si_flags |= SI_ALIAS;
680 dev->si_flags |= SI_NAMED;
681 va_start(ap, fmt);
682 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
683 if (i > (sizeof dev->__si_namebuf - 1)) {
684 printf("WARNING: Device name truncated! (%s)\n",
685 dev->__si_namebuf);
686 }
687 va_end(ap);
688
689 devfs_create(dev);
690 dev_unlock();
691 dev_depends(pdev, dev);
692 return (dev);
693 }
694
695 static void
696 destroy_devl(struct cdev *dev)
697 {
698 struct cdevsw *csw;
699
700 mtx_assert(&devmtx, MA_OWNED);
701 KASSERT(dev->si_flags & SI_NAMED,
702 ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev)));
703
704 devfs_destroy(dev);
705
706 /* Remove name marking */
707 dev->si_flags &= ~SI_NAMED;
708
709 /* If we are a child, remove us from the parents list */
710 if (dev->si_flags & SI_CHILD) {
711 LIST_REMOVE(dev, si_siblings);
712 dev->si_flags &= ~SI_CHILD;
713 }
714
715 /* Kill our children */
716 while (!LIST_EMPTY(&dev->si_children))
717 destroy_devl(LIST_FIRST(&dev->si_children));
718
719 /* Remove from clone list */
720 if (dev->si_flags & SI_CLONELIST) {
721 LIST_REMOVE(dev, si_clone);
722 dev->si_flags &= ~SI_CLONELIST;
723 }
724
725 csw = dev->si_devsw;
726 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */
727 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
728 printf("Purging %lu threads from %s\n",
729 dev->si_threadcount, devtoname(dev));
730 csw->d_purge(dev);
731 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
732 }
733 if (csw != NULL && csw->d_purge != NULL)
734 printf("All threads purged from %s\n", devtoname(dev));
735
736 dev->si_drv1 = 0;
737 dev->si_drv2 = 0;
738 bzero(&dev->__si_u, sizeof(dev->__si_u));
739
740 if (!(dev->si_flags & SI_ALIAS)) {
741 /* Remove from cdevsw list */
742 LIST_REMOVE(dev, si_list);
743
744 /* If cdevsw has no more struct cdev *'s, clean it */
745 if (LIST_EMPTY(&csw->d_devs))
746 fini_cdevsw(csw);
747 }
748 dev->si_flags &= ~SI_ALIAS;
749
750 if (dev->si_refcount > 0) {
751 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
752 } else {
753 devfs_free(dev);
754 }
755 }
756
757 void
758 destroy_dev(struct cdev *dev)
759 {
760
761 dev_lock();
762 destroy_devl(dev);
763 dev_unlock();
764 }
765
766 const char *
767 devtoname(struct cdev *dev)
768 {
769 char *p;
770 struct cdevsw *csw;
771 int mynor;
772
773 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') {
774 p = dev->si_name;
775 csw = dev_refthread(dev);
776 if (csw != NULL) {
777 sprintf(p, "(%s)", csw->d_name);
778 dev_relthread(dev);
779 }
780 p += strlen(p);
781 mynor = minor(dev);
782 if (mynor < 0 || mynor > 255)
783 sprintf(p, "/%#x", (u_int)mynor);
784 else
785 sprintf(p, "/%d", mynor);
786 }
787 return (dev->si_name);
788 }
789
790 int
791 dev_stdclone(char *name, char **namep, const char *stem, int *unit)
792 {
793 int u, i;
794
795 i = strlen(stem);
796 if (bcmp(stem, name, i) != 0)
797 return (0);
798 if (!isdigit(name[i]))
799 return (0);
800 u = 0;
801 if (name[i] == '' && isdigit(name[i+1]))
802 return (0);
803 while (isdigit(name[i])) {
804 u *= 10;
805 u += name[i++] - '';
806 }
807 if (u > 0xffffff)
808 return (0);
809 *unit = u;
810 if (namep)
811 *namep = &name[i];
812 if (name[i])
813 return (2);
814 return (1);
815 }
816
817 /*
818 * Helper functions for cloning device drivers.
819 *
820 * The objective here is to make it unnecessary for the device drivers to
821 * use rman or similar to manage their unit number space. Due to the way
822 * we do "on-demand" devices, using rman or other "private" methods
823 * will be very tricky to lock down properly once we lock down this file.
824 *
825 * Instead we give the drivers these routines which puts the struct cdev *'s
826 * that are to be managed on their own list, and gives the driver the ability
827 * to ask for the first free unit number or a given specified unit number.
828 *
829 * In addition these routines support paired devices (pty, nmdm and similar)
830 * by respecting a number of "flag" bits in the minor number.
831 *
832 */
833
834 struct clonedevs {
835 LIST_HEAD(,cdev) head;
836 };
837
838 void
839 clone_setup(struct clonedevs **cdp)
840 {
841
842 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
843 LIST_INIT(&(*cdp)->head);
844 }
845
846 int
847 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra)
848 {
849 struct clonedevs *cd;
850 struct cdev *dev, *ndev, *dl, *de;
851 int unit, low, u;
852
853 KASSERT(*cdp != NULL,
854 ("clone_setup() not called in driver \"%s\"", csw->d_name));
855 KASSERT(!(extra & CLONE_UNITMASK),
856 ("Illegal extra bits (0x%x) in clone_create", extra));
857 KASSERT(*up <= CLONE_UNITMASK,
858 ("Too high unit (0x%x) in clone_create", *up));
859
860 if (!(csw->d_flags & D_INIT))
861 prep_cdevsw(csw);
862
863 /*
864 * Search the list for a lot of things in one go:
865 * A preexisting match is returned immediately.
866 * The lowest free unit number if we are passed -1, and the place
867 * in the list where we should insert that new element.
868 * The place to insert a specified unit number, if applicable
869 * the end of the list.
870 */
871 unit = *up;
872 ndev = devfs_alloc();
873 dev_lock();
874 low = extra;
875 de = dl = NULL;
876 cd = *cdp;
877 LIST_FOREACH(dev, &cd->head, si_clone) {
878 KASSERT(dev->si_flags & SI_CLONELIST,
879 ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
880 u = dev2unit(dev);
881 if (u == (unit | extra)) {
882 *dp = dev;
883 devfs_free(ndev);
884 dev_unlock();
885 return (0);
886 }
887 if (unit == -1 && u == low) {
888 low++;
889 de = dev;
890 continue;
891 } else if (u < (unit | extra)) {
892 de = dev;
893 continue;
894 } else if (u > (unit | extra)) {
895 dl = dev;
896 break;
897 }
898 }
899 if (unit == -1)
900 unit = low & CLONE_UNITMASK;
901 dev = newdev(csw, unit2minor(unit | extra), ndev);
902 if (dev->si_flags & SI_CLONELIST) {
903 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
904 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
905 LIST_FOREACH(dev, &cd->head, si_clone) {
906 printf("\t%p %s\n", dev, dev->si_name);
907 }
908 panic("foo");
909 }
910 KASSERT(!(dev->si_flags & SI_CLONELIST),
911 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
912 if (dl != NULL)
913 LIST_INSERT_BEFORE(dl, dev, si_clone);
914 else if (de != NULL)
915 LIST_INSERT_AFTER(de, dev, si_clone);
916 else
917 LIST_INSERT_HEAD(&cd->head, dev, si_clone);
918 dev->si_flags |= SI_CLONELIST;
919 *up = unit;
920 dev_unlock();
921 return (1);
922 }
923
924 /*
925 * Kill everything still on the list. The driver should already have
926 * disposed of any softc hung of the struct cdev *'s at this time.
927 */
928 void
929 clone_cleanup(struct clonedevs **cdp)
930 {
931 struct cdev *dev, *tdev;
932 struct clonedevs *cd;
933
934 cd = *cdp;
935 if (cd == NULL)
936 return;
937 dev_lock();
938 LIST_FOREACH_SAFE(dev, &cd->head, si_clone, tdev) {
939 KASSERT(dev->si_flags & SI_CLONELIST,
940 ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
941 KASSERT(dev->si_flags & SI_NAMED,
942 ("Driver has goofed in cloning underways udev %x", dev->si_drv0));
943 destroy_devl(dev);
944 }
945 dev_unlock();
946 free(cd, M_DEVBUF);
947 *cdp = NULL;
948 }
Cache object: b249e060ab3e4b4e33411ee415528c52
|