FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_conf.c
1 /*-
2 * Copyright (c) 1999-2002 Poul-Henning Kamp
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_conf.c 174389 2007-12-07 03:46:23Z thompsa $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
33 #include <sys/bio.h>
34 #include <sys/lock.h>
35 #include <sys/mutex.h>
36 #include <sys/module.h>
37 #include <sys/malloc.h>
38 #include <sys/conf.h>
39 #include <sys/vnode.h>
40 #include <sys/queue.h>
41 #include <sys/poll.h>
42 #include <sys/ctype.h>
43 #include <sys/tty.h>
44 #include <sys/ucred.h>
45 #include <machine/stdarg.h>
46
47 #include <fs/devfs/devfs_int.h>
48
49 static MALLOC_DEFINE(M_DEVT, "cdev", "cdev storage");
50
51 struct mtx devmtx;
52 static void destroy_devl(struct cdev *dev);
53 static struct cdev *make_dev_credv(struct cdevsw *devsw, int minornr,
54 struct ucred *cr, uid_t uid, gid_t gid, int mode, const char *fmt,
55 va_list ap);
56
57 void
58 dev_lock(void)
59 {
60
61 mtx_lock(&devmtx);
62 }
63
64 void
65 dev_unlock(void)
66 {
67
68 mtx_unlock(&devmtx);
69 }
70
71 void
72 dev_ref(struct cdev *dev)
73 {
74
75 mtx_assert(&devmtx, MA_NOTOWNED);
76 mtx_lock(&devmtx);
77 dev->si_refcount++;
78 mtx_unlock(&devmtx);
79 }
80
81 void
82 dev_refl(struct cdev *dev)
83 {
84
85 mtx_assert(&devmtx, MA_OWNED);
86 dev->si_refcount++;
87 }
88
89 void
90 dev_rel(struct cdev *dev)
91 {
92 int flag = 0;
93
94 mtx_assert(&devmtx, MA_NOTOWNED);
95 dev_lock();
96 dev->si_refcount--;
97 KASSERT(dev->si_refcount >= 0,
98 ("dev_rel(%s) gave negative count", devtoname(dev)));
99 #if 0
100 if (dev->si_usecount == 0 &&
101 (dev->si_flags & SI_CHEAPCLONE) && (dev->si_flags & SI_NAMED))
102 ;
103 else
104 #endif
105 if (dev->si_devsw == NULL && dev->si_refcount == 0) {
106 LIST_REMOVE(dev, si_list);
107 flag = 1;
108 }
109 dev_unlock();
110 if (flag)
111 devfs_free(dev);
112 }
113
114 struct cdevsw *
115 dev_refthread(struct cdev *dev)
116 {
117 struct cdevsw *csw;
118
119 mtx_assert(&devmtx, MA_NOTOWNED);
120 dev_lock();
121 csw = dev->si_devsw;
122 if (csw != NULL)
123 dev->si_threadcount++;
124 dev_unlock();
125 return (csw);
126 }
127
128 struct cdevsw *
129 devvn_refthread(struct vnode *vp, struct cdev **devp)
130 {
131 struct cdevsw *csw;
132
133 mtx_assert(&devmtx, MA_NOTOWNED);
134 csw = NULL;
135 dev_lock();
136 *devp = vp->v_rdev;
137 if (*devp != NULL) {
138 csw = (*devp)->si_devsw;
139 if (csw != NULL)
140 (*devp)->si_threadcount++;
141 }
142 dev_unlock();
143 return (csw);
144 }
145
146 void
147 dev_relthread(struct cdev *dev)
148 {
149
150 mtx_assert(&devmtx, MA_NOTOWNED);
151 dev_lock();
152 dev->si_threadcount--;
153 dev_unlock();
154 }
155
156 int
157 nullop(void)
158 {
159
160 return (0);
161 }
162
163 int
164 eopnotsupp(void)
165 {
166
167 return (EOPNOTSUPP);
168 }
169
170 static int
171 enxio(void)
172 {
173 return (ENXIO);
174 }
175
176 static int
177 enodev(void)
178 {
179 return (ENODEV);
180 }
181
182 /* Define a dead_cdevsw for use when devices leave unexpectedly. */
183
184 #define dead_open (d_open_t *)enxio
185 #define dead_close (d_close_t *)enxio
186 #define dead_read (d_read_t *)enxio
187 #define dead_write (d_write_t *)enxio
188 #define dead_ioctl (d_ioctl_t *)enxio
189 #define dead_poll (d_poll_t *)enodev
190 #define dead_mmap (d_mmap_t *)enodev
191
192 static void
193 dead_strategy(struct bio *bp)
194 {
195
196 biofinish(bp, NULL, ENXIO);
197 }
198
199 #define dead_dump (dumper_t *)enxio
200 #define dead_kqfilter (d_kqfilter_t *)enxio
201
202 static struct cdevsw dead_cdevsw = {
203 .d_version = D_VERSION,
204 .d_flags = D_NEEDGIANT, /* XXX: does dead_strategy need this ? */
205 .d_open = dead_open,
206 .d_close = dead_close,
207 .d_read = dead_read,
208 .d_write = dead_write,
209 .d_ioctl = dead_ioctl,
210 .d_poll = dead_poll,
211 .d_mmap = dead_mmap,
212 .d_strategy = dead_strategy,
213 .d_name = "dead",
214 .d_dump = dead_dump,
215 .d_kqfilter = dead_kqfilter
216 };
217
218 /* Default methods if driver does not specify method */
219
220 #define null_open (d_open_t *)nullop
221 #define null_close (d_close_t *)nullop
222 #define no_read (d_read_t *)enodev
223 #define no_write (d_write_t *)enodev
224 #define no_ioctl (d_ioctl_t *)enodev
225 #define no_mmap (d_mmap_t *)enodev
226 #define no_kqfilter (d_kqfilter_t *)enodev
227
228 static void
229 no_strategy(struct bio *bp)
230 {
231
232 biofinish(bp, NULL, ENODEV);
233 }
234
235 static int
236 no_poll(struct cdev *dev __unused, int events, struct thread *td __unused)
237 {
238 /*
239 * Return true for read/write. If the user asked for something
240 * special, return POLLNVAL, so that clients have a way of
241 * determining reliably whether or not the extended
242 * functionality is present without hard-coding knowledge
243 * of specific filesystem implementations.
244 * Stay in sync with vop_nopoll().
245 */
246 if (events & ~POLLSTANDARD)
247 return (POLLNVAL);
248
249 return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
250 }
251
252 #define no_dump (dumper_t *)enodev
253
254 static int
255 giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
256 {
257 int retval;
258
259 mtx_lock(&Giant);
260 retval = dev->si_devsw->d_gianttrick->
261 d_open(dev, oflags, devtype, td);
262 mtx_unlock(&Giant);
263 return (retval);
264 }
265
266 static int
267 giant_fdopen(struct cdev *dev, int oflags, struct thread *td, int fdidx)
268 {
269 int retval;
270
271 mtx_lock(&Giant);
272 retval = dev->si_devsw->d_gianttrick->
273 d_fdopen(dev, oflags, td, fdidx);
274 mtx_unlock(&Giant);
275 return (retval);
276 }
277
278 static int
279 giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
280 {
281 int retval;
282
283 mtx_lock(&Giant);
284 retval = dev->si_devsw->d_gianttrick->
285 d_close(dev, fflag, devtype, td);
286 mtx_unlock(&Giant);
287 return (retval);
288 }
289
290 static void
291 giant_strategy(struct bio *bp)
292 {
293
294 mtx_lock(&Giant);
295 bp->bio_dev->si_devsw->d_gianttrick->
296 d_strategy(bp);
297 mtx_unlock(&Giant);
298 }
299
300 static int
301 giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
302 {
303 int retval;
304
305 mtx_lock(&Giant);
306 retval = dev->si_devsw->d_gianttrick->
307 d_ioctl(dev, cmd, data, fflag, td);
308 mtx_unlock(&Giant);
309 return (retval);
310 }
311
312 static int
313 giant_read(struct cdev *dev, struct uio *uio, int ioflag)
314 {
315 int retval;
316
317 mtx_lock(&Giant);
318 retval = dev->si_devsw->d_gianttrick->
319 d_read(dev, uio, ioflag);
320 mtx_unlock(&Giant);
321 return (retval);
322 }
323
324 static int
325 giant_write(struct cdev *dev, struct uio *uio, int ioflag)
326 {
327 int retval;
328
329 mtx_lock(&Giant);
330 retval = dev->si_devsw->d_gianttrick->
331 d_write(dev, uio, ioflag);
332 mtx_unlock(&Giant);
333 return (retval);
334 }
335
336 static int
337 giant_poll(struct cdev *dev, int events, struct thread *td)
338 {
339 int retval;
340
341 mtx_lock(&Giant);
342 retval = dev->si_devsw->d_gianttrick->
343 d_poll(dev, events, td);
344 mtx_unlock(&Giant);
345 return (retval);
346 }
347
348 static int
349 giant_kqfilter(struct cdev *dev, struct knote *kn)
350 {
351 int retval;
352
353 mtx_lock(&Giant);
354 retval = dev->si_devsw->d_gianttrick->
355 d_kqfilter(dev, kn);
356 mtx_unlock(&Giant);
357 return (retval);
358 }
359
360 static int
361 giant_mmap(struct cdev *dev, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
362 {
363 int retval;
364
365 mtx_lock(&Giant);
366 retval = dev->si_devsw->d_gianttrick->
367 d_mmap(dev, offset, paddr, nprot);
368 mtx_unlock(&Giant);
369 return (retval);
370 }
371
372
373 /*
374 * struct cdev * and u_dev_t primitives
375 */
376
377 int
378 minor(struct cdev *x)
379 {
380 if (x == NULL)
381 return NODEV;
382 return(x->si_drv0 & MAXMINOR);
383 }
384
385 int
386 dev2unit(struct cdev *x)
387 {
388
389 if (x == NULL)
390 return NODEV;
391 return (minor2unit(minor(x)));
392 }
393
394 u_int
395 minor2unit(u_int _minor)
396 {
397
398 KASSERT((_minor & ~MAXMINOR) == 0, ("Illegal minor %x", _minor));
399 return ((_minor & 0xff) | ((_minor >> 8) & 0xffff00));
400 }
401
402 int
403 unit2minor(int unit)
404 {
405
406 KASSERT(unit <= 0xffffff, ("Invalid unit (%d) in unit2minor", unit));
407 return ((unit & 0xff) | ((unit << 8) & ~0xffff));
408 }
409
410 static struct cdev *
411 newdev(struct cdevsw *csw, int y, struct cdev *si)
412 {
413 struct cdev *si2;
414 dev_t udev;
415
416 mtx_assert(&devmtx, MA_OWNED);
417 udev = y;
418 LIST_FOREACH(si2, &csw->d_devs, si_list) {
419 if (si2->si_drv0 == udev) {
420 devfs_free(si);
421 return (si2);
422 }
423 }
424 si->si_drv0 = udev;
425 si->si_devsw = csw;
426 LIST_INSERT_HEAD(&csw->d_devs, si, si_list);
427 return (si);
428 }
429
430 int
431 uminor(dev_t dev)
432 {
433 return (dev & MAXMINOR);
434 }
435
436 int
437 umajor(dev_t dev)
438 {
439 return ((dev & ~MAXMINOR) >> 8);
440 }
441
442 static void
443 fini_cdevsw(struct cdevsw *devsw)
444 {
445 struct cdevsw *gt;
446
447 if (devsw->d_gianttrick != NULL) {
448 gt = devsw->d_gianttrick;
449 memcpy(devsw, gt, sizeof *devsw);
450 free(gt, M_DEVT);
451 devsw->d_gianttrick = NULL;
452 }
453 devsw->d_flags &= ~D_INIT;
454 }
455
456 static void
457 prep_cdevsw(struct cdevsw *devsw)
458 {
459 struct cdevsw *dsw2;
460
461 if (devsw->d_flags & D_NEEDGIANT)
462 dsw2 = malloc(sizeof *dsw2, M_DEVT, M_WAITOK);
463 else
464 dsw2 = NULL;
465 dev_lock();
466
467 if (devsw->d_version != D_VERSION_01) {
468 printf(
469 "WARNING: Device driver \"%s\" has wrong version %s\n",
470 devsw->d_name == NULL ? "???" : devsw->d_name,
471 "and is disabled. Recompile KLD module.");
472 devsw->d_open = dead_open;
473 devsw->d_close = dead_close;
474 devsw->d_read = dead_read;
475 devsw->d_write = dead_write;
476 devsw->d_ioctl = dead_ioctl;
477 devsw->d_poll = dead_poll;
478 devsw->d_mmap = dead_mmap;
479 devsw->d_strategy = dead_strategy;
480 devsw->d_dump = dead_dump;
481 devsw->d_kqfilter = dead_kqfilter;
482 }
483
484 if (devsw->d_flags & D_TTY) {
485 if (devsw->d_ioctl == NULL) devsw->d_ioctl = ttyioctl;
486 if (devsw->d_read == NULL) devsw->d_read = ttyread;
487 if (devsw->d_write == NULL) devsw->d_write = ttywrite;
488 if (devsw->d_kqfilter == NULL) devsw->d_kqfilter = ttykqfilter;
489 if (devsw->d_poll == NULL) devsw->d_poll = ttypoll;
490 }
491
492 if (devsw->d_flags & D_NEEDGIANT) {
493 if (devsw->d_gianttrick == NULL) {
494 memcpy(dsw2, devsw, sizeof *dsw2);
495 devsw->d_gianttrick = dsw2;
496 } else
497 free(dsw2, M_DEVT);
498 }
499
500 #define FIXUP(member, noop, giant) \
501 do { \
502 if (devsw->member == NULL) { \
503 devsw->member = noop; \
504 } else if (devsw->d_flags & D_NEEDGIANT) \
505 devsw->member = giant; \
506 } \
507 while (0)
508
509 FIXUP(d_open, null_open, giant_open);
510 FIXUP(d_fdopen, NULL, giant_fdopen);
511 FIXUP(d_close, null_close, giant_close);
512 FIXUP(d_read, no_read, giant_read);
513 FIXUP(d_write, no_write, giant_write);
514 FIXUP(d_ioctl, no_ioctl, giant_ioctl);
515 FIXUP(d_poll, no_poll, giant_poll);
516 FIXUP(d_mmap, no_mmap, giant_mmap);
517 FIXUP(d_strategy, no_strategy, giant_strategy);
518 FIXUP(d_kqfilter, no_kqfilter, giant_kqfilter);
519
520 if (devsw->d_dump == NULL) devsw->d_dump = no_dump;
521
522 LIST_INIT(&devsw->d_devs);
523
524 devsw->d_flags |= D_INIT;
525
526 dev_unlock();
527 }
528
529 static struct cdev *
530 make_dev_credv(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid,
531 gid_t gid, int mode, const char *fmt, va_list ap)
532 {
533 struct cdev *dev;
534 int i;
535
536 KASSERT((minornr & ~MAXMINOR) == 0,
537 ("Invalid minor (0x%x) in make_dev", minornr));
538
539 if (!(devsw->d_flags & D_INIT))
540 prep_cdevsw(devsw);
541 dev = devfs_alloc();
542 dev_lock();
543 dev = newdev(devsw, minornr, dev);
544 if (dev->si_flags & SI_CHEAPCLONE &&
545 dev->si_flags & SI_NAMED) {
546 /*
547 * This is allowed as it removes races and generally
548 * simplifies cloning devices.
549 * XXX: still ??
550 */
551 dev_unlock();
552 return (dev);
553 }
554 KASSERT(!(dev->si_flags & SI_NAMED),
555 ("make_dev() by driver %s on pre-existing device (min=%x, name=%s)",
556 devsw->d_name, minor(dev), devtoname(dev)));
557
558 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
559 if (i > (sizeof dev->__si_namebuf - 1)) {
560 printf("WARNING: Device name truncated! (%s)\n",
561 dev->__si_namebuf);
562 }
563
564 dev->si_flags |= SI_NAMED;
565 #ifdef MAC
566 if (cr != NULL)
567 dev->si_cred = crhold(cr);
568 else
569 #endif
570 dev->si_cred = NULL;
571 dev->si_uid = uid;
572 dev->si_gid = gid;
573 dev->si_mode = mode;
574
575 devfs_create(dev);
576 dev_unlock();
577 return (dev);
578 }
579
580 struct cdev *
581 make_dev(struct cdevsw *devsw, int minornr, uid_t uid, gid_t gid, int mode,
582 const char *fmt, ...)
583 {
584 struct cdev *dev;
585 va_list ap;
586
587 va_start(ap, fmt);
588 dev = make_dev_credv(devsw, minornr, NULL, uid, gid, mode, fmt, ap);
589 va_end(ap);
590 return (dev);
591 }
592
593 struct cdev *
594 make_dev_cred(struct cdevsw *devsw, int minornr, struct ucred *cr, uid_t uid,
595 gid_t gid, int mode, const char *fmt, ...)
596 {
597 struct cdev *dev;
598 va_list ap;
599
600 va_start(ap, fmt);
601 dev = make_dev_credv(devsw, minornr, cr, uid, gid, mode, fmt, ap);
602 va_end(ap);
603
604 return (dev);
605 }
606
607 static void
608 dev_dependsl(struct cdev *pdev, struct cdev *cdev)
609 {
610
611 cdev->si_parent = pdev;
612 cdev->si_flags |= SI_CHILD;
613 LIST_INSERT_HEAD(&pdev->si_children, cdev, si_siblings);
614 }
615
616
617 void
618 dev_depends(struct cdev *pdev, struct cdev *cdev)
619 {
620
621 dev_lock();
622 dev_dependsl(pdev, cdev);
623 dev_unlock();
624 }
625
626 struct cdev *
627 make_dev_alias(struct cdev *pdev, const char *fmt, ...)
628 {
629 struct cdev *dev;
630 va_list ap;
631 int i;
632
633 dev = devfs_alloc();
634 dev_lock();
635 dev->si_flags |= SI_ALIAS;
636 dev->si_flags |= SI_NAMED;
637 va_start(ap, fmt);
638 i = vsnrprintf(dev->__si_namebuf, sizeof dev->__si_namebuf, 32, fmt, ap);
639 if (i > (sizeof dev->__si_namebuf - 1)) {
640 printf("WARNING: Device name truncated! (%s)\n",
641 dev->__si_namebuf);
642 }
643 va_end(ap);
644
645 devfs_create(dev);
646 dev_unlock();
647 dev_depends(pdev, dev);
648 return (dev);
649 }
650
651 static void
652 destroy_devl(struct cdev *dev)
653 {
654 struct cdevsw *csw;
655
656 mtx_assert(&devmtx, MA_OWNED);
657 KASSERT(dev->si_flags & SI_NAMED,
658 ("WARNING: Driver mistake: destroy_dev on %d\n", minor(dev)));
659
660 devfs_destroy(dev);
661
662 /* Remove name marking */
663 dev->si_flags &= ~SI_NAMED;
664
665 /* If we are a child, remove us from the parents list */
666 if (dev->si_flags & SI_CHILD) {
667 LIST_REMOVE(dev, si_siblings);
668 dev->si_flags &= ~SI_CHILD;
669 }
670
671 /* Kill our children */
672 while (!LIST_EMPTY(&dev->si_children))
673 destroy_devl(LIST_FIRST(&dev->si_children));
674
675 /* Remove from clone list */
676 if (dev->si_flags & SI_CLONELIST) {
677 LIST_REMOVE(dev, si_clone);
678 dev->si_flags &= ~SI_CLONELIST;
679 }
680
681 csw = dev->si_devsw;
682 dev->si_devsw = NULL; /* already NULL for SI_ALIAS */
683 while (csw != NULL && csw->d_purge != NULL && dev->si_threadcount) {
684 printf("Purging %lu threads from %s\n",
685 dev->si_threadcount, devtoname(dev));
686 csw->d_purge(dev);
687 msleep(csw, &devmtx, PRIBIO, "devprg", hz/10);
688 }
689 if (csw != NULL && csw->d_purge != NULL)
690 printf("All threads purged from %s\n", devtoname(dev));
691
692 dev->si_drv1 = 0;
693 dev->si_drv2 = 0;
694 bzero(&dev->__si_u, sizeof(dev->__si_u));
695
696 if (!(dev->si_flags & SI_ALIAS)) {
697 /* Remove from cdevsw list */
698 LIST_REMOVE(dev, si_list);
699
700 /* If cdevsw has no more struct cdev *'s, clean it */
701 if (LIST_EMPTY(&csw->d_devs))
702 fini_cdevsw(csw);
703 }
704 dev->si_flags &= ~SI_ALIAS;
705
706 if (dev->si_refcount > 0) {
707 LIST_INSERT_HEAD(&dead_cdevsw.d_devs, dev, si_list);
708 } else {
709 devfs_free(dev);
710 }
711 }
712
713 void
714 destroy_dev(struct cdev *dev)
715 {
716
717 dev_lock();
718 destroy_devl(dev);
719 dev_unlock();
720 }
721
722 const char *
723 devtoname(struct cdev *dev)
724 {
725 char *p;
726 struct cdevsw *csw;
727 int mynor;
728
729 if (dev->si_name[0] == '#' || dev->si_name[0] == '\0') {
730 p = dev->si_name;
731 csw = dev_refthread(dev);
732 if (csw != NULL) {
733 sprintf(p, "(%s)", csw->d_name);
734 dev_relthread(dev);
735 }
736 p += strlen(p);
737 mynor = minor(dev);
738 if (mynor < 0 || mynor > 255)
739 sprintf(p, "/%#x", (u_int)mynor);
740 else
741 sprintf(p, "/%d", mynor);
742 }
743 return (dev->si_name);
744 }
745
746 int
747 dev_stdclone(char *name, char **namep, const char *stem, int *unit)
748 {
749 int u, i;
750
751 i = strlen(stem);
752 if (bcmp(stem, name, i) != 0)
753 return (0);
754 if (!isdigit(name[i]))
755 return (0);
756 u = 0;
757 if (name[i] == '' && isdigit(name[i+1]))
758 return (0);
759 while (isdigit(name[i])) {
760 u *= 10;
761 u += name[i++] - '';
762 }
763 if (u > 0xffffff)
764 return (0);
765 *unit = u;
766 if (namep)
767 *namep = &name[i];
768 if (name[i])
769 return (2);
770 return (1);
771 }
772
773 /*
774 * Helper functions for cloning device drivers.
775 *
776 * The objective here is to make it unnecessary for the device drivers to
777 * use rman or similar to manage their unit number space. Due to the way
778 * we do "on-demand" devices, using rman or other "private" methods
779 * will be very tricky to lock down properly once we lock down this file.
780 *
781 * Instead we give the drivers these routines which puts the struct cdev *'s
782 * that are to be managed on their own list, and gives the driver the ability
783 * to ask for the first free unit number or a given specified unit number.
784 *
785 * In addition these routines support paired devices (pty, nmdm and similar)
786 * by respecting a number of "flag" bits in the minor number.
787 *
788 */
789
790 struct clonedevs {
791 LIST_HEAD(,cdev) head;
792 };
793
794 void
795 clone_setup(struct clonedevs **cdp)
796 {
797
798 *cdp = malloc(sizeof **cdp, M_DEVBUF, M_WAITOK | M_ZERO);
799 LIST_INIT(&(*cdp)->head);
800 }
801
802 int
803 clone_create(struct clonedevs **cdp, struct cdevsw *csw, int *up, struct cdev **dp, int extra)
804 {
805 struct clonedevs *cd;
806 struct cdev *dev, *ndev, *dl, *de;
807 int unit, low, u;
808
809 KASSERT(*cdp != NULL,
810 ("clone_setup() not called in driver \"%s\"", csw->d_name));
811 KASSERT(!(extra & CLONE_UNITMASK),
812 ("Illegal extra bits (0x%x) in clone_create", extra));
813 KASSERT(*up <= CLONE_UNITMASK,
814 ("Too high unit (0x%x) in clone_create", *up));
815
816 if (!(csw->d_flags & D_INIT))
817 prep_cdevsw(csw);
818
819 /*
820 * Search the list for a lot of things in one go:
821 * A preexisting match is returned immediately.
822 * The lowest free unit number if we are passed -1, and the place
823 * in the list where we should insert that new element.
824 * The place to insert a specified unit number, if applicable
825 * the end of the list.
826 */
827 unit = *up;
828 ndev = devfs_alloc();
829 dev_lock();
830 low = extra;
831 de = dl = NULL;
832 cd = *cdp;
833 LIST_FOREACH(dev, &cd->head, si_clone) {
834 KASSERT(dev->si_flags & SI_CLONELIST,
835 ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
836 u = dev2unit(dev);
837 if (u == (unit | extra)) {
838 *dp = dev;
839 devfs_free(ndev);
840 dev_unlock();
841 return (0);
842 }
843 if (unit == -1 && u == low) {
844 low++;
845 de = dev;
846 continue;
847 } else if (u < (unit | extra)) {
848 de = dev;
849 continue;
850 } else if (u > (unit | extra)) {
851 dl = dev;
852 break;
853 }
854 }
855 if (unit == -1)
856 unit = low & CLONE_UNITMASK;
857 dev = newdev(csw, unit2minor(unit | extra), ndev);
858 if (dev->si_flags & SI_CLONELIST) {
859 printf("dev %p (%s) is on clonelist\n", dev, dev->si_name);
860 printf("unit=%d, low=%d, extra=0x%x\n", unit, low, extra);
861 LIST_FOREACH(dev, &cd->head, si_clone) {
862 printf("\t%p %s\n", dev, dev->si_name);
863 }
864 panic("foo");
865 }
866 KASSERT(!(dev->si_flags & SI_CLONELIST),
867 ("Dev %p(%s) should not be on clonelist", dev, dev->si_name));
868 if (dl != NULL)
869 LIST_INSERT_BEFORE(dl, dev, si_clone);
870 else if (de != NULL)
871 LIST_INSERT_AFTER(de, dev, si_clone);
872 else
873 LIST_INSERT_HEAD(&cd->head, dev, si_clone);
874 dev->si_flags |= SI_CLONELIST;
875 *up = unit;
876 dev_unlock();
877 return (1);
878 }
879
880 /*
881 * Kill everything still on the list. The driver should already have
882 * disposed of any softc hung of the struct cdev *'s at this time.
883 */
884 void
885 clone_cleanup(struct clonedevs **cdp)
886 {
887 struct cdev *dev, *tdev;
888 struct clonedevs *cd;
889
890 cd = *cdp;
891 if (cd == NULL)
892 return;
893 dev_lock();
894 LIST_FOREACH_SAFE(dev, &cd->head, si_clone, tdev) {
895 KASSERT(dev->si_flags & SI_CLONELIST,
896 ("Dev %p(%s) should be on clonelist", dev, dev->si_name));
897 KASSERT(dev->si_flags & SI_NAMED,
898 ("Driver has goofed in cloning underways udev %x", dev->si_drv0));
899 destroy_devl(dev);
900 }
901 dev_unlock();
902 free(cd, M_DEVBUF);
903 *cdp = NULL;
904 }
Cache object: 051bffc4a67cf0649d237f0b1267865c
|