FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_devsw.c
1 /* $NetBSD: subr_devsw.c,v 1.22.8.1 2009/02/07 02:32:37 snj Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2002, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by MAEKAWA Masahide <gehenna@NetBSD.org>, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Overview
34 *
35 * subr_devsw.c: registers device drivers by name and by major
36 * number, and provides wrapper methods for performing I/O and
37 * other tasks on device drivers, keying on the device number
38 * (dev_t).
39 *
40 * When the system is built, the config(8) command generates
41 * static tables of device drivers built into the kernel image
42 * along with their associated methods. These are recorded in
43 * the cdevsw0 and bdevsw0 tables. Drivers can also be added to
44 * and removed from the system dynamically.
45 *
46 * Allocation
47 *
48 * When the system initially boots only the statically allocated
49 * indexes (bdevsw0, cdevsw0) are used. If these overflow due to
50 * allocation, we allocate a fixed block of memory to hold the new,
51 * expanded index. This "fork" of the table is only ever performed
52 * once in order to guarantee that other threads may safely access
53 * the device tables:
54 *
55 * o Once a thread has a "reference" to the table via an earlier
56 * open() call, we know that the entry in the table must exist
57 * and so it is safe to access it.
58 *
59 * o Regardless of whether other threads see the old or new
60 * pointers, they will point to a correct device switch
61 * structure for the operation being performed.
62 *
63 * XXX Currently, the wrapper methods such as cdev_read() verify
64 * that a device driver does in fact exist before calling the
65 * associated driver method. This should be changed so that
66 * once the device is has been referenced by a vnode (opened),
67 * calling the other methods should be valid until that reference
68 * is dropped.
69 */
70
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: subr_devsw.c,v 1.22.8.1 2009/02/07 02:32:37 snj Exp $");
73
74 #include <sys/param.h>
75 #include <sys/conf.h>
76 #include <sys/kmem.h>
77 #include <sys/systm.h>
78 #include <sys/poll.h>
79 #include <sys/tty.h>
80 #include <sys/cpu.h>
81 #include <sys/buf.h>
82
83 #include <miscfs/specfs/specdev.h>
84
85 #ifdef DEVSW_DEBUG
86 #define DPRINTF(x) printf x
87 #else /* DEVSW_DEBUG */
88 #define DPRINTF(x)
89 #endif /* DEVSW_DEBUG */
90
91 #define MAXDEVSW 512 /* the maximum of major device number */
92 #define BDEVSW_SIZE (sizeof(struct bdevsw *))
93 #define CDEVSW_SIZE (sizeof(struct cdevsw *))
94 #define DEVSWCONV_SIZE (sizeof(struct devsw_conv))
95
96 extern const struct bdevsw **bdevsw, *bdevsw0[];
97 extern const struct cdevsw **cdevsw, *cdevsw0[];
98 extern struct devsw_conv *devsw_conv, devsw_conv0[];
99 extern const int sys_bdevsws, sys_cdevsws;
100 extern int max_bdevsws, max_cdevsws, max_devsw_convs;
101
102 static int bdevsw_attach(const struct bdevsw *, int *);
103 static int cdevsw_attach(const struct cdevsw *, int *);
104 static void devsw_detach_locked(const struct bdevsw *, const struct cdevsw *);
105
106 void
107 devsw_init(void)
108 {
109
110 KASSERT(sys_bdevsws < MAXDEVSW - 1);
111 KASSERT(sys_cdevsws < MAXDEVSW - 1);
112 }
113
114 int
115 devsw_attach(const char *devname, const struct bdevsw *bdev, int *bmajor,
116 const struct cdevsw *cdev, int *cmajor)
117 {
118 struct devsw_conv *conv;
119 char *name;
120 int error, i;
121 size_t len;
122
123 if (devname == NULL || cdev == NULL)
124 return (EINVAL);
125
126 mutex_enter(&specfs_lock);
127
128 for (i = 0 ; i < max_devsw_convs ; i++) {
129 conv = &devsw_conv[i];
130 if (conv->d_name == NULL || strcmp(devname, conv->d_name) != 0)
131 continue;
132
133 if (*bmajor < 0)
134 *bmajor = conv->d_bmajor;
135 if (*cmajor < 0)
136 *cmajor = conv->d_cmajor;
137
138 if (*bmajor != conv->d_bmajor || *cmajor != conv->d_cmajor) {
139 error = EINVAL;
140 goto fail;
141 }
142 if ((*bmajor >= 0 && bdev == NULL) || *cmajor < 0) {
143 error = EINVAL;
144 goto fail;
145 }
146
147 if ((*bmajor >= 0 && bdevsw[*bmajor] != NULL) ||
148 cdevsw[*cmajor] != NULL) {
149 error = EEXIST;
150 goto fail;
151 }
152
153 if (bdev != NULL)
154 bdevsw[*bmajor] = bdev;
155 cdevsw[*cmajor] = cdev;
156
157 mutex_exit(&specfs_lock);
158 return (0);
159 }
160
161 error = bdevsw_attach(bdev, bmajor);
162 if (error != 0)
163 goto fail;
164 error = cdevsw_attach(cdev, cmajor);
165 if (error != 0) {
166 devsw_detach_locked(bdev, NULL);
167 goto fail;
168 }
169
170 for (i = 0 ; i < max_devsw_convs ; i++) {
171 if (devsw_conv[i].d_name == NULL)
172 break;
173 }
174 if (i == max_devsw_convs) {
175 struct devsw_conv *newptr;
176 int old, new;
177
178 old = max_devsw_convs;
179 new = old + 1;
180
181 newptr = kmem_zalloc(new * DEVSWCONV_SIZE, KM_NOSLEEP);
182 if (newptr == NULL) {
183 devsw_detach_locked(bdev, cdev);
184 error = ENOMEM;
185 goto fail;
186 }
187 newptr[old].d_name = NULL;
188 newptr[old].d_bmajor = -1;
189 newptr[old].d_cmajor = -1;
190 memcpy(newptr, devsw_conv, old * DEVSWCONV_SIZE);
191 if (devsw_conv != devsw_conv0)
192 kmem_free(devsw_conv, old * DEVSWCONV_SIZE);
193 devsw_conv = newptr;
194 max_devsw_convs = new;
195 }
196
197 len = strlen(devname) + 1;
198 name = kmem_alloc(len, KM_NOSLEEP);
199 if (name == NULL) {
200 devsw_detach_locked(bdev, cdev);
201 error = ENOMEM;
202 goto fail;
203 }
204 strlcpy(name, devname, len);
205
206 devsw_conv[i].d_name = name;
207 devsw_conv[i].d_bmajor = *bmajor;
208 devsw_conv[i].d_cmajor = *cmajor;
209
210 mutex_exit(&specfs_lock);
211 return (0);
212 fail:
213 mutex_exit(&specfs_lock);
214 return (error);
215 }
216
217 static int
218 bdevsw_attach(const struct bdevsw *devsw, int *devmajor)
219 {
220 const struct bdevsw **newptr;
221 int bmajor, i;
222
223 KASSERT(mutex_owned(&specfs_lock));
224
225 if (devsw == NULL)
226 return (0);
227
228 if (*devmajor < 0) {
229 for (bmajor = sys_bdevsws ; bmajor < max_bdevsws ; bmajor++) {
230 if (bdevsw[bmajor] != NULL)
231 continue;
232 for (i = 0 ; i < max_devsw_convs ; i++) {
233 if (devsw_conv[i].d_bmajor == bmajor)
234 break;
235 }
236 if (i != max_devsw_convs)
237 continue;
238 break;
239 }
240 *devmajor = bmajor;
241 }
242
243 if (*devmajor >= MAXDEVSW) {
244 printf("bdevsw_attach: block majors exhausted");
245 return (ENOMEM);
246 }
247
248 if (*devmajor >= max_bdevsws) {
249 KASSERT(bdevsw == bdevsw0);
250 newptr = kmem_zalloc(MAXDEVSW * BDEVSW_SIZE, KM_NOSLEEP);
251 if (newptr == NULL)
252 return (ENOMEM);
253 memcpy(newptr, bdevsw, max_bdevsws * BDEVSW_SIZE);
254 bdevsw = newptr;
255 max_bdevsws = MAXDEVSW;
256 }
257
258 if (bdevsw[*devmajor] != NULL)
259 return (EEXIST);
260
261 bdevsw[*devmajor] = devsw;
262
263 return (0);
264 }
265
266 static int
267 cdevsw_attach(const struct cdevsw *devsw, int *devmajor)
268 {
269 const struct cdevsw **newptr;
270 int cmajor, i;
271
272 KASSERT(mutex_owned(&specfs_lock));
273
274 if (*devmajor < 0) {
275 for (cmajor = sys_cdevsws ; cmajor < max_cdevsws ; cmajor++) {
276 if (cdevsw[cmajor] != NULL)
277 continue;
278 for (i = 0 ; i < max_devsw_convs ; i++) {
279 if (devsw_conv[i].d_cmajor == cmajor)
280 break;
281 }
282 if (i != max_devsw_convs)
283 continue;
284 break;
285 }
286 *devmajor = cmajor;
287 }
288
289 if (*devmajor >= MAXDEVSW) {
290 printf("cdevsw_attach: character majors exhausted");
291 return (ENOMEM);
292 }
293
294 if (*devmajor >= max_cdevsws) {
295 KASSERT(cdevsw == cdevsw0);
296 newptr = kmem_zalloc(MAXDEVSW * CDEVSW_SIZE, KM_NOSLEEP);
297 if (newptr == NULL)
298 return (ENOMEM);
299 memcpy(newptr, cdevsw, max_cdevsws * CDEVSW_SIZE);
300 cdevsw = newptr;
301 max_cdevsws = MAXDEVSW;
302 }
303
304 if (cdevsw[*devmajor] != NULL)
305 return (EEXIST);
306
307 cdevsw[*devmajor] = devsw;
308
309 return (0);
310 }
311
312 static void
313 devsw_detach_locked(const struct bdevsw *bdev, const struct cdevsw *cdev)
314 {
315 int i;
316
317 KASSERT(mutex_owned(&specfs_lock));
318
319 if (bdev != NULL) {
320 for (i = 0 ; i < max_bdevsws ; i++) {
321 if (bdevsw[i] != bdev)
322 continue;
323 bdevsw[i] = NULL;
324 break;
325 }
326 }
327 if (cdev != NULL) {
328 for (i = 0 ; i < max_cdevsws ; i++) {
329 if (cdevsw[i] != cdev)
330 continue;
331 cdevsw[i] = NULL;
332 break;
333 }
334 }
335 }
336
337 int
338 devsw_detach(const struct bdevsw *bdev, const struct cdevsw *cdev)
339 {
340
341 mutex_enter(&specfs_lock);
342 devsw_detach_locked(bdev, cdev);
343 mutex_exit(&specfs_lock);
344 return 0;
345 }
346
347 /*
348 * Look up a block device by number.
349 *
350 * => Caller must ensure that the device is attached.
351 */
352 const struct bdevsw *
353 bdevsw_lookup(dev_t dev)
354 {
355 int bmajor;
356
357 if (dev == NODEV)
358 return (NULL);
359 bmajor = major(dev);
360 if (bmajor < 0 || bmajor >= max_bdevsws)
361 return (NULL);
362
363 return (bdevsw[bmajor]);
364 }
365
366 /*
367 * Look up a character device by number.
368 *
369 * => Caller must ensure that the device is attached.
370 */
371 const struct cdevsw *
372 cdevsw_lookup(dev_t dev)
373 {
374 int cmajor;
375
376 if (dev == NODEV)
377 return (NULL);
378 cmajor = major(dev);
379 if (cmajor < 0 || cmajor >= max_cdevsws)
380 return (NULL);
381
382 return (cdevsw[cmajor]);
383 }
384
385 /*
386 * Look up a block device by reference to its operations set.
387 *
388 * => Caller must ensure that the device is not detached, and therefore
389 * that the returned major is still valid when dereferenced.
390 */
391 int
392 bdevsw_lookup_major(const struct bdevsw *bdev)
393 {
394 int bmajor;
395
396 for (bmajor = 0 ; bmajor < max_bdevsws ; bmajor++) {
397 if (bdevsw[bmajor] == bdev)
398 return (bmajor);
399 }
400
401 return (-1);
402 }
403
404 /*
405 * Look up a character device by reference to its operations set.
406 *
407 * => Caller must ensure that the device is not detached, and therefore
408 * that the returned major is still valid when dereferenced.
409 */
410 int
411 cdevsw_lookup_major(const struct cdevsw *cdev)
412 {
413 int cmajor;
414
415 for (cmajor = 0 ; cmajor < max_cdevsws ; cmajor++) {
416 if (cdevsw[cmajor] == cdev)
417 return (cmajor);
418 }
419
420 return (-1);
421 }
422
423 /*
424 * Convert from block major number to name.
425 *
426 * => Caller must ensure that the device is not detached, and therefore
427 * that the name pointer is still valid when dereferenced.
428 */
429 const char *
430 devsw_blk2name(int bmajor)
431 {
432 const char *name;
433 int cmajor, i;
434
435 name = NULL;
436 cmajor = -1;
437
438 mutex_enter(&specfs_lock);
439 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
440 mutex_exit(&specfs_lock);
441 return (NULL);
442 }
443 for (i = 0 ; i < max_devsw_convs; i++) {
444 if (devsw_conv[i].d_bmajor == bmajor) {
445 cmajor = devsw_conv[i].d_cmajor;
446 break;
447 }
448 }
449 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
450 name = devsw_conv[i].d_name;
451 mutex_exit(&specfs_lock);
452
453 return (name);
454 }
455
456 /*
457 * Convert from device name to block major number.
458 *
459 * => Caller must ensure that the device is not detached, and therefore
460 * that the major number is still valid when dereferenced.
461 */
462 int
463 devsw_name2blk(const char *name, char *devname, size_t devnamelen)
464 {
465 struct devsw_conv *conv;
466 int bmajor, i;
467
468 if (name == NULL)
469 return (-1);
470
471 mutex_enter(&specfs_lock);
472 for (i = 0 ; i < max_devsw_convs ; i++) {
473 size_t len;
474
475 conv = &devsw_conv[i];
476 if (conv->d_name == NULL)
477 continue;
478 len = strlen(conv->d_name);
479 if (strncmp(conv->d_name, name, len) != 0)
480 continue;
481 if (*(name +len) && !isdigit(*(name + len)))
482 continue;
483 bmajor = conv->d_bmajor;
484 if (bmajor < 0 || bmajor >= max_bdevsws ||
485 bdevsw[bmajor] == NULL)
486 break;
487 if (devname != NULL) {
488 #ifdef DEVSW_DEBUG
489 if (strlen(conv->d_name) >= devnamelen)
490 printf("devsw_name2blk: too short buffer");
491 #endif /* DEVSW_DEBUG */
492 strncpy(devname, conv->d_name, devnamelen);
493 devname[devnamelen - 1] = '\0';
494 }
495 mutex_exit(&specfs_lock);
496 return (bmajor);
497 }
498
499 mutex_exit(&specfs_lock);
500 return (-1);
501 }
502
503 /*
504 * Convert from device name to char major number.
505 *
506 * => Caller must ensure that the device is not detached, and therefore
507 * that the major number is still valid when dereferenced.
508 */
509 int
510 devsw_name2chr(const char *name, char *devname, size_t devnamelen)
511 {
512 struct devsw_conv *conv;
513 int cmajor, i;
514
515 if (name == NULL)
516 return (-1);
517
518 mutex_enter(&specfs_lock);
519 for (i = 0 ; i < max_devsw_convs ; i++) {
520 size_t len;
521
522 conv = &devsw_conv[i];
523 if (conv->d_name == NULL)
524 continue;
525 len = strlen(conv->d_name);
526 if (strncmp(conv->d_name, name, len) != 0)
527 continue;
528 if (*(name +len) && !isdigit(*(name + len)))
529 continue;
530 cmajor = conv->d_cmajor;
531 if (cmajor < 0 || cmajor >= max_cdevsws ||
532 cdevsw[cmajor] == NULL)
533 break;
534 if (devname != NULL) {
535 #ifdef DEVSW_DEBUG
536 if (strlen(conv->d_name) >= devnamelen)
537 printf("devsw_name2chr: too short buffer");
538 #endif /* DEVSW_DEBUG */
539 strncpy(devname, conv->d_name, devnamelen);
540 devname[devnamelen - 1] = '\0';
541 }
542 mutex_exit(&specfs_lock);
543 return (cmajor);
544 }
545
546 mutex_exit(&specfs_lock);
547 return (-1);
548 }
549
550 /*
551 * Convert from character dev_t to block dev_t.
552 *
553 * => Caller must ensure that the device is not detached, and therefore
554 * that the major number is still valid when dereferenced.
555 */
556 dev_t
557 devsw_chr2blk(dev_t cdev)
558 {
559 int bmajor, cmajor, i;
560 dev_t rv;
561
562 cmajor = major(cdev);
563 bmajor = -1;
564 rv = NODEV;
565
566 mutex_enter(&specfs_lock);
567 if (cmajor < 0 || cmajor >= max_cdevsws || cdevsw[cmajor] == NULL) {
568 mutex_exit(&specfs_lock);
569 return (NODEV);
570 }
571 for (i = 0 ; i < max_devsw_convs ; i++) {
572 if (devsw_conv[i].d_cmajor == cmajor) {
573 bmajor = devsw_conv[i].d_bmajor;
574 break;
575 }
576 }
577 if (bmajor >= 0 && bmajor < max_bdevsws && bdevsw[bmajor] != NULL)
578 rv = makedev(bmajor, minor(cdev));
579 mutex_exit(&specfs_lock);
580
581 return (rv);
582 }
583
584 /*
585 * Convert from block dev_t to character dev_t.
586 *
587 * => Caller must ensure that the device is not detached, and therefore
588 * that the major number is still valid when dereferenced.
589 */
590 dev_t
591 devsw_blk2chr(dev_t bdev)
592 {
593 int bmajor, cmajor, i;
594 dev_t rv;
595
596 bmajor = major(bdev);
597 cmajor = -1;
598 rv = NODEV;
599
600 mutex_enter(&specfs_lock);
601 if (bmajor < 0 || bmajor >= max_bdevsws || bdevsw[bmajor] == NULL) {
602 mutex_exit(&specfs_lock);
603 return (NODEV);
604 }
605 for (i = 0 ; i < max_devsw_convs ; i++) {
606 if (devsw_conv[i].d_bmajor == bmajor) {
607 cmajor = devsw_conv[i].d_cmajor;
608 break;
609 }
610 }
611 if (cmajor >= 0 && cmajor < max_cdevsws && cdevsw[cmajor] != NULL)
612 rv = makedev(cmajor, minor(bdev));
613 mutex_exit(&specfs_lock);
614
615 return (rv);
616 }
617
618 /*
619 * Device access methods.
620 */
621
622 #define DEV_LOCK(d) \
623 if ((mpflag = (d->d_flag & D_MPSAFE)) == 0) { \
624 KERNEL_LOCK(1, NULL); \
625 }
626
627 #define DEV_UNLOCK(d) \
628 if (mpflag == 0) { \
629 KERNEL_UNLOCK_ONE(NULL); \
630 }
631
632 int
633 bdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
634 {
635 const struct bdevsw *d;
636 int rv, mpflag;
637
638 /*
639 * For open we need to lock, in order to synchronize
640 * with attach/detach.
641 */
642 mutex_enter(&specfs_lock);
643 d = bdevsw_lookup(dev);
644 mutex_exit(&specfs_lock);
645 if (d == NULL)
646 return ENXIO;
647
648 DEV_LOCK(d);
649 rv = (*d->d_open)(dev, flag, devtype, l);
650 DEV_UNLOCK(d);
651
652 return rv;
653 }
654
655 int
656 bdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
657 {
658 const struct bdevsw *d;
659 int rv, mpflag;
660
661 if ((d = bdevsw_lookup(dev)) == NULL)
662 return ENXIO;
663
664 DEV_LOCK(d);
665 rv = (*d->d_close)(dev, flag, devtype, l);
666 DEV_UNLOCK(d);
667
668 return rv;
669 }
670
671 void
672 bdev_strategy(struct buf *bp)
673 {
674 const struct bdevsw *d;
675 int mpflag;
676
677 if ((d = bdevsw_lookup(bp->b_dev)) == NULL)
678 panic("bdev_strategy");
679
680 DEV_LOCK(d);
681 (*d->d_strategy)(bp);
682 DEV_UNLOCK(d);
683 }
684
685 int
686 bdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
687 {
688 const struct bdevsw *d;
689 int rv, mpflag;
690
691 if ((d = bdevsw_lookup(dev)) == NULL)
692 return ENXIO;
693
694 DEV_LOCK(d);
695 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
696 DEV_UNLOCK(d);
697
698 return rv;
699 }
700
701 int
702 bdev_dump(dev_t dev, daddr_t addr, void *data, size_t sz)
703 {
704 const struct bdevsw *d;
705 int rv;
706
707 /*
708 * Dump can be called without the device open. Since it can
709 * currently only be called with the system paused (and in a
710 * potentially unstable state), we don't perform any locking.
711 */
712 if ((d = bdevsw_lookup(dev)) == NULL)
713 return ENXIO;
714
715 /* DEV_LOCK(d); */
716 rv = (*d->d_dump)(dev, addr, data, sz);
717 /* DEV_UNLOCK(d); */
718
719 return rv;
720 }
721
722 int
723 bdev_type(dev_t dev)
724 {
725 const struct bdevsw *d;
726
727 if ((d = bdevsw_lookup(dev)) == NULL)
728 return D_OTHER;
729 return d->d_flag & D_TYPEMASK;
730 }
731
732 int
733 cdev_open(dev_t dev, int flag, int devtype, lwp_t *l)
734 {
735 const struct cdevsw *d;
736 int rv, mpflag;
737
738 /*
739 * For open we need to lock, in order to synchronize
740 * with attach/detach.
741 */
742 mutex_enter(&specfs_lock);
743 d = cdevsw_lookup(dev);
744 mutex_exit(&specfs_lock);
745 if (d == NULL)
746 return ENXIO;
747
748 DEV_LOCK(d);
749 rv = (*d->d_open)(dev, flag, devtype, l);
750 DEV_UNLOCK(d);
751
752 return rv;
753 }
754
755 int
756 cdev_close(dev_t dev, int flag, int devtype, lwp_t *l)
757 {
758 const struct cdevsw *d;
759 int rv, mpflag;
760
761 if ((d = cdevsw_lookup(dev)) == NULL)
762 return ENXIO;
763
764 DEV_LOCK(d);
765 rv = (*d->d_close)(dev, flag, devtype, l);
766 DEV_UNLOCK(d);
767
768 return rv;
769 }
770
771 int
772 cdev_read(dev_t dev, struct uio *uio, int flag)
773 {
774 const struct cdevsw *d;
775 int rv, mpflag;
776
777 if ((d = cdevsw_lookup(dev)) == NULL)
778 return ENXIO;
779
780 DEV_LOCK(d);
781 rv = (*d->d_read)(dev, uio, flag);
782 DEV_UNLOCK(d);
783
784 return rv;
785 }
786
787 int
788 cdev_write(dev_t dev, struct uio *uio, int flag)
789 {
790 const struct cdevsw *d;
791 int rv, mpflag;
792
793 if ((d = cdevsw_lookup(dev)) == NULL)
794 return ENXIO;
795
796 DEV_LOCK(d);
797 rv = (*d->d_write)(dev, uio, flag);
798 DEV_UNLOCK(d);
799
800 return rv;
801 }
802
803 int
804 cdev_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
805 {
806 const struct cdevsw *d;
807 int rv, mpflag;
808
809 if ((d = cdevsw_lookup(dev)) == NULL)
810 return ENXIO;
811
812 DEV_LOCK(d);
813 rv = (*d->d_ioctl)(dev, cmd, data, flag, l);
814 DEV_UNLOCK(d);
815
816 return rv;
817 }
818
819 void
820 cdev_stop(struct tty *tp, int flag)
821 {
822 const struct cdevsw *d;
823 int mpflag;
824
825 if ((d = cdevsw_lookup(tp->t_dev)) == NULL)
826 return;
827
828 DEV_LOCK(d);
829 (*d->d_stop)(tp, flag);
830 DEV_UNLOCK(d);
831 }
832
833 struct tty *
834 cdev_tty(dev_t dev)
835 {
836 const struct cdevsw *d;
837
838 if ((d = cdevsw_lookup(dev)) == NULL)
839 return NULL;
840
841 /* XXX Check if necessary. */
842 if (d->d_tty == NULL)
843 return NULL;
844
845 return (*d->d_tty)(dev);
846 }
847
848 int
849 cdev_poll(dev_t dev, int flag, lwp_t *l)
850 {
851 const struct cdevsw *d;
852 int rv, mpflag;
853
854 if ((d = cdevsw_lookup(dev)) == NULL)
855 return POLLERR;
856
857 DEV_LOCK(d);
858 rv = (*d->d_poll)(dev, flag, l);
859 DEV_UNLOCK(d);
860
861 return rv;
862 }
863
864 paddr_t
865 cdev_mmap(dev_t dev, off_t off, int flag)
866 {
867 const struct cdevsw *d;
868 paddr_t rv;
869 int mpflag;
870
871 if ((d = cdevsw_lookup(dev)) == NULL)
872 return (paddr_t)-1LL;
873
874 DEV_LOCK(d);
875 rv = (*d->d_mmap)(dev, off, flag);
876 DEV_UNLOCK(d);
877
878 return rv;
879 }
880
881 int
882 cdev_kqfilter(dev_t dev, struct knote *kn)
883 {
884 const struct cdevsw *d;
885 int rv, mpflag;
886
887 if ((d = cdevsw_lookup(dev)) == NULL)
888 return ENXIO;
889
890 DEV_LOCK(d);
891 rv = (*d->d_kqfilter)(dev, kn);
892 DEV_UNLOCK(d);
893
894 return rv;
895 }
896
897 int
898 cdev_type(dev_t dev)
899 {
900 const struct cdevsw *d;
901
902 if ((d = cdevsw_lookup(dev)) == NULL)
903 return D_OTHER;
904 return d->d_flag & D_TYPEMASK;
905 }
Cache object: bfb04afc14d523083e073f7a690e1370
|