FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lkm.c
1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1994 Christopher G. Demetriou
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Terrence R. Lambert.
17 * 4. The name Terrence R. Lambert may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY TERRENCE R. LAMBERT ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE TERRENCE R. LAMBERT BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 */
35
36 #include "opt_devfs.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/tty.h>
42 #include <sys/conf.h>
43 #include <sys/fcntl.h>
44 #include <sys/kernel.h>
45 #include <sys/mount.h>
46 #include <sys/sysctl.h>
47 #include <sys/sysent.h>
48 #include <sys/exec.h>
49 #include <sys/lkm.h>
50 #include <sys/vnode.h>
51 #ifdef DEVFS
52 #include <sys/devfsext.h>
53 #endif /*DEVFS*/
54
55 #include <vm/vm.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58
59
60 #define PAGESIZE 1024 /* kmem_alloc() allocation quantum */
61
62 #define LKM_ALLOC 0x01
63 #define LKM_WANT 0x02
64
65 #define LKMS_IDLE 0x00
66 #define LKMS_RESERVED 0x01
67 #define LKMS_LOADING 0x02
68 #define LKMS_LOADED 0x04
69 #define LKMS_UNLOADING 0x08
70
71 static int lkm_v = 0;
72 static int lkm_state = LKMS_IDLE;
73
74 #ifndef MAXLKMS
75 #define MAXLKMS 20
76 #endif
77
78 static struct lkm_table lkmods[MAXLKMS]; /* table of loaded modules */
79 static struct lkm_table *curp; /* global for in-progress ops */
80
81 static int _lkm_dev __P((struct lkm_table *lkmtp, int cmd));
82 static int _lkm_exec __P((struct lkm_table *lkmtp, int cmd));
83 static int _lkm_vfs __P((struct lkm_table *lkmtp, int cmd));
84 static int _lkm_syscall __P((struct lkm_table *lkmtp, int cmd));
85 static void lkmunreserve __P((void));
86
87 static d_open_t lkmcopen;
88 static d_close_t lkmcclose;
89 static d_ioctl_t lkmcioctl;
90
91 #define CDEV_MAJOR 32
92 static struct cdevsw lkmc_cdevsw =
93 { lkmcopen, lkmcclose, noread, nowrite, /*32*/
94 lkmcioctl, nostop, nullreset, nodevtotty,
95 seltrue, nommap, NULL, "lkm", NULL, -1 };
96
97
98 /*ARGSUSED*/
99 static int
100 lkmcopen(dev, flag, devtype, p)
101 dev_t dev;
102 int flag;
103 int devtype;
104 struct proc *p;
105 {
106 int error;
107
108 if (minor(dev) != 0)
109 return(ENXIO); /* bad minor # */
110
111 /*
112 * Use of the loadable kernel module device must be exclusive; we
113 * may try to remove this restriction later, but it's really no
114 * hardship.
115 */
116 while (lkm_v & LKM_ALLOC) {
117 if (flag & FNONBLOCK) /* don't hang */
118 return(EBUSY);
119 lkm_v |= LKM_WANT;
120 /*
121 * Sleep pending unlock; we use tsleep() to allow
122 * an alarm out of the open.
123 */
124 error = tsleep((caddr_t)&lkm_v, TTIPRI|PCATCH, "lkmopn", 0);
125 if (error)
126 return(error); /* leave LKM_WANT set -- no problem */
127 }
128 lkm_v |= LKM_ALLOC;
129
130 return(0); /* pseudo-device open */
131 }
132
133 /*
134 * Unreserve the memory associated with the current loaded module; done on
135 * a coerced close of the lkm device (close on premature exit of modload)
136 * or explicitly by modload as a result of a link failure.
137 */
138 static void
139 lkmunreserve()
140 {
141
142 if (lkm_state == LKMS_IDLE)
143 return;
144
145 /*
146 * Actually unreserve the memory
147 */
148 if (curp && curp->area) {
149 kmem_free(kernel_map, curp->area, curp->size);/**/
150 curp->area = 0;
151 if (curp->private.lkm_any != NULL)
152 curp->private.lkm_any = NULL;
153 }
154
155 lkm_state = LKMS_IDLE;
156 }
157
158 static int
159 lkmcclose(dev, flag, mode, p)
160 dev_t dev;
161 int flag;
162 int mode;
163 struct proc *p;
164 {
165
166 if (!(lkm_v & LKM_ALLOC)) {
167 #ifdef DEBUG
168 printf("LKM: close before open!\n");
169 #endif /* DEBUG */
170 return(EBADF);
171 }
172
173 /* do this before waking the herd... */
174 if (curp && !curp->used) {
175 /*
176 * If we close before setting used, we have aborted
177 * by way of error or by way of close-on-exit from
178 * a premature exit of "modload".
179 */
180 lkmunreserve(); /* coerce state to LKM_IDLE */
181 }
182
183 lkm_v &= ~LKM_ALLOC;
184 wakeup((caddr_t)&lkm_v); /* thundering herd "problem" here */
185
186 return(0); /* pseudo-device closed */
187 }
188
189 /*ARGSUSED*/
190 static int
191 lkmcioctl(dev, cmd, data, flag, p)
192 dev_t dev;
193 u_long cmd;
194 caddr_t data;
195 int flag;
196 struct proc *p;
197 {
198 int err = 0;
199 int i;
200 struct lmc_resrv *resrvp;
201 struct lmc_loadbuf *loadbufp;
202 struct lmc_unload *unloadp;
203 struct lmc_stat *statp;
204 char istr[MAXLKMNAME];
205
206 switch(cmd) {
207 case LMRESERV: /* reserve pages for a module */
208 if ((flag & FWRITE) == 0 || securelevel > 0)
209 /* only allow this if writing and insecure */
210 return EPERM;
211
212 resrvp = (struct lmc_resrv *)data;
213
214 /*
215 * Find a free slot.
216 */
217 for (i = 0; i < MAXLKMS; i++)
218 if (!lkmods[i].used)
219 break;
220 if (i == MAXLKMS) {
221 err = ENOMEM; /* no slots available */
222 break;
223 }
224 curp = &lkmods[i];
225 curp->id = i; /* self reference slot offset */
226
227 resrvp->slot = i; /* return slot */
228
229 /*
230 * Get memory for module
231 */
232 curp->size = resrvp->size;
233
234 curp->area = kmem_alloc(kernel_map, curp->size);/**/
235
236 curp->offset = 0; /* load offset */
237
238 resrvp->addr = curp->area; /* ret kernel addr */
239
240 #ifdef DEBUG
241 printf("LKM: LMRESERV (actual = 0x%08lx)\n", curp->area);
242 printf("LKM: LMRESERV (adjusted = 0x%08lx)\n",
243 trunc_page(curp->area));
244 #endif /* DEBUG */
245 lkm_state = LKMS_RESERVED;
246 break;
247
248 case LMLOADBUF: /* Copy in; stateful, follows LMRESERV */
249 if ((flag & FWRITE) == 0 || securelevel > 0)
250 /* only allow this if writing and insecure */
251 return EPERM;
252
253 loadbufp = (struct lmc_loadbuf *)data;
254 i = loadbufp->cnt;
255 if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
256 || i < 0
257 || i > MODIOBUF
258 || i > curp->size - curp->offset) {
259 err = ENOMEM;
260 break;
261 }
262
263 /* copy in buffer full of data */
264 err = copyin((caddr_t)loadbufp->data,
265 (caddr_t)(uintptr_t)(curp->area + curp->offset), i);
266 if (err)
267 break;
268
269 if ((curp->offset + i) < curp->size) {
270 lkm_state = LKMS_LOADING;
271 #ifdef DEBUG
272 printf(
273 "LKM: LMLOADBUF (loading @ %lu of %lu, i = %d)\n",
274 curp->offset, curp->size, i);
275 #endif /* DEBUG */
276 } else {
277 lkm_state = LKMS_LOADED;
278 #ifdef DEBUG
279 printf("LKM: LMLOADBUF (loaded)\n");
280 #endif /* DEBUG */
281 }
282 curp->offset += i;
283 break;
284
285 case LMUNRESRV: /* discard reserved pages for a module */
286 if ((flag & FWRITE) == 0 || securelevel > 0)
287 /* only allow this if writing and insecure */
288 return EPERM;
289
290 lkmunreserve(); /* coerce state to LKM_IDLE */
291 #ifdef DEBUG
292 printf("LKM: LMUNRESERV\n");
293 #endif /* DEBUG */
294 break;
295
296 case LMREADY: /* module loaded: call entry */
297 if ((flag & FWRITE) == 0 || securelevel > 0)
298 /* only allow this if writing or insecure */
299 return EPERM;
300
301 switch (lkm_state) {
302 case LKMS_LOADED:
303 break;
304 case LKMS_LOADING:
305 /* The remainder must be bss, so we clear it */
306 bzero((caddr_t)(uintptr_t)(curp->area + curp->offset),
307 curp->size - curp->offset);
308 break;
309 default:
310
311 #ifdef DEBUG
312 printf("lkm_state is %02x\n", lkm_state);
313 #endif /* DEBUG */
314 return ENXIO;
315 }
316
317 /* XXX gack */
318 curp->entry = (int (*) __P((struct lkm_table *, int, int)))
319 (*(uintfptr_t *)data);
320
321 /* call entry(load)... (assigns "private" portion) */
322 err = (*(curp->entry))(curp, LKM_E_LOAD, LKM_VERSION);
323 if (err) {
324 /*
325 * Module may refuse loading or may have a
326 * version mismatch...
327 */
328 lkm_state = LKMS_UNLOADING; /* for lkmunreserve */
329 lkmunreserve(); /* free memory */
330 curp->used = 0; /* free slot */
331 break;
332 }
333 /*
334 * It's possible for a user to load a module that doesn't
335 * initialize itself correctly. (You can even get away with
336 * using it for a while.) Unfortunately, we are faced with
337 * the following problems:
338 * - we can't tell a good module from a bad one until
339 * after we've run its entry function (if the private
340 * section is uninitalized after we return from the
341 * entry, then something's fishy)
342 * - now that we've called the entry function, we can't
343 * forcibly unload the module without risking a crash
344 * - since we don't know what the module's entry function
345 * did, we can't easily clean up the mess it may have
346 * made, so we can't know just how unstable the system
347 * may be
348 * So, being stuck between a rock and a hard place, we
349 * have no choice but to do this...
350 */
351 if (curp->private.lkm_any == NULL)
352 panic("loadable module initialization failed");
353
354 curp->used = 1;
355 #ifdef DEBUG
356 printf("LKM: LMREADY\n");
357 #endif /* DEBUG */
358 lkm_state = LKMS_IDLE;
359 break;
360
361 case LMUNLOAD: /* unload a module */
362 if ((flag & FWRITE) == 0 || securelevel > 0)
363 /* only allow this if writing and insecure */
364 return EPERM;
365
366 unloadp = (struct lmc_unload *)data;
367
368 if ((i = unloadp->id) == -1) { /* unload by name */
369 /*
370 * Copy name and lookup id from all loaded
371 * modules. May fail.
372 */
373 err =copyinstr(unloadp->name, istr, MAXLKMNAME-1, NULL);
374 if (err)
375 break;
376
377 /*
378 * look up id...
379 */
380 for (i = 0; i < MAXLKMS; i++) {
381 if (!lkmods[i].used)
382 continue;
383 if (!strcmp(istr,
384 lkmods[i].private.lkm_any->lkm_name))
385 break;
386 }
387 }
388
389 /*
390 * Range check the value; on failure, return EINVAL
391 */
392 if (i < 0 || i >= MAXLKMS) {
393 err = EINVAL;
394 break;
395 }
396
397 curp = &lkmods[i];
398
399 if (!curp->used) {
400 err = ENOENT;
401 break;
402 }
403
404 /* call entry(unload) */
405 if ((*(curp->entry))(curp, LKM_E_UNLOAD, LKM_VERSION)) {
406 err = EBUSY;
407 break;
408 }
409
410 lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve */
411 lkmunreserve(); /* free memory */
412 curp->used = 0; /* free slot */
413 break;
414
415 case LMSTAT: /* stat a module by id/name */
416 /* allow readers and writers to stat */
417
418 statp = (struct lmc_stat *)data;
419
420 if ((i = statp->id) == -1) { /* stat by name */
421 /*
422 * Copy name and lookup id from all loaded
423 * modules.
424 */
425 copystr(statp->name, istr, MAXLKMNAME-1, NULL);
426 /*
427 * look up id...
428 */
429 for (i = 0; i < MAXLKMS; i++) {
430 if (!lkmods[i].used)
431 continue;
432 if (!strcmp(istr,
433 lkmods[i].private.lkm_any->lkm_name))
434 break;
435 }
436
437 if (i == MAXLKMS) { /* Not found */
438 err = ENOENT;
439 break;
440 }
441 }
442
443 /*
444 * Range check the value; on failure, return EINVAL
445 */
446 if (i < 0 || i >= MAXLKMS) {
447 err = EINVAL;
448 break;
449 }
450
451 curp = &lkmods[i];
452
453 if (!curp->used) { /* Not found */
454 err = ENOENT;
455 break;
456 }
457
458 /*
459 * Copy out stat information for this module...
460 */
461 statp->id = curp->id;
462 statp->offset = curp->private.lkm_any->lkm_offset;
463 statp->type = curp->private.lkm_any->lkm_type;
464 statp->area = curp->area;
465 statp->size = curp->size / PAGESIZE;
466 statp->private = (uintptr_t)curp->private.lkm_any;
467 statp->ver = curp->private.lkm_any->lkm_ver;
468 copystr(curp->private.lkm_any->lkm_name,
469 statp->name,
470 MAXLKMNAME - 2,
471 NULL);
472
473 break;
474
475 default: /* bad ioctl()... */
476 err = ENOTTY;
477 break;
478 }
479
480 return (err);
481 }
482
483 int
484 lkmexists(lkmtp)
485 struct lkm_table *lkmtp;
486 {
487 int i;
488
489 /*
490 * see if name exists...
491 */
492 for (i = 0; i < MAXLKMS; i++) {
493 /*
494 * An unused module and the one we are testing are not
495 * considered.
496 */
497 if (!lkmods[i].used || &lkmods[i] == lkmtp)
498 continue;
499 if (!strcmp(lkmtp->private.lkm_any->lkm_name,
500 lkmods[i].private.lkm_any->lkm_name))
501 return(1); /* already loaded... */
502 }
503
504 return(0); /* module not loaded... */
505 }
506
507 /*
508 * For the loadable system call described by the structure pointed to
509 * by lkmtp, load/unload/stat it depending on the cmd requested.
510 */
511 static int
512 _lkm_syscall(lkmtp, cmd)
513 struct lkm_table *lkmtp;
514 int cmd;
515 {
516 struct lkm_syscall *args = lkmtp->private.lkm_syscall;
517 int i;
518 int err = 0;
519
520 switch(cmd) {
521 case LKM_E_LOAD:
522 /* don't load twice! */
523 if (lkmexists(lkmtp))
524 return(EEXIST);
525
526 if (args->lkm_offset == LKM_ANON)
527 i = NO_SYSCALL;
528 else
529 i = args->lkm_offset;
530
531 err = syscall_register(&i, args->lkm_sysent,
532 &(args->lkm_oldent));
533 if (err)
534 return(err);
535
536 /* done! */
537 args->lkm_offset = i; /* slot in sysent[] */
538
539 break;
540
541 case LKM_E_UNLOAD:
542 /* current slot... */
543 i = args->lkm_offset;
544
545 err = syscall_deregister(&i, &(args->lkm_oldent));
546 if (err)
547 return(err);
548 break;
549
550 case LKM_E_STAT: /* no special handling... */
551 break;
552 }
553
554 return(err);
555 }
556
557 /*
558 * For the loadable virtual file system described by the structure pointed
559 * to by lkmtp, load/unload/stat it depending on the cmd requested.
560 */
561 static int
562 _lkm_vfs(lkmtp, cmd)
563 struct lkm_table *lkmtp;
564 int cmd;
565 {
566 struct lkm_vfs *args = lkmtp->private.lkm_vfs;
567 struct vfsconf *vfc = args->lkm_vfsconf;
568 int error, i;
569
570 switch(cmd) {
571 case LKM_E_LOAD:
572 /* don't load twice! */
573 if (lkmexists(lkmtp))
574 return(EEXIST);
575
576 for(i = 0; args->lkm_vnodeops->ls_items[i]; i++)
577 vfs_add_vnodeops((void*)args->lkm_vnodeops->ls_items[i]);
578 error = vfs_register(vfc);
579 if (error)
580 return(error);
581
582 args->lkm_offset = vfc->vfc_typenum;
583
584 /* done! */
585 break;
586
587 case LKM_E_UNLOAD:
588 /* current slot... */
589 i = args->lkm_offset;
590
591 error = vfs_unregister(vfc);
592 if (error)
593 return(error);
594
595 for(i = 0; args->lkm_vnodeops->ls_items[i]; i++)
596 vfs_rm_vnodeops((void*)args->lkm_vnodeops->ls_items[i]);
597
598 break;
599
600 case LKM_E_STAT: /* no special handling... */
601 break;
602 }
603 return (0);
604 }
605
606 /*
607 * For the loadable device driver described by the structure pointed to
608 * by lkmtp, load/unload/stat it depending on the cmd requested.
609 */
610 static int
611 _lkm_dev(lkmtp, cmd)
612 struct lkm_table *lkmtp;
613 int cmd;
614 {
615 struct lkm_dev *args = lkmtp->private.lkm_dev;
616 int i;
617 dev_t descrip;
618 int err = 0;
619
620 switch(cmd) {
621 case LKM_E_LOAD:
622 /* don't load twice! */
623 if (lkmexists(lkmtp))
624 return(EEXIST);
625 switch(args->lkm_devtype) {
626 case LM_DT_CHAR:
627 if ((i = args->lkm_offset) == LKM_ANON)
628 descrip = (dev_t) -1;
629 else
630 descrip = makedev(args->lkm_offset,0);
631 if ( err = cdevsw_add(&descrip, args->lkm_dev.cdev,
632 &(args->lkm_olddev.cdev))) {
633 break;
634 }
635 args->lkm_offset = major(descrip) ;
636 break;
637
638 default:
639 err = ENODEV;
640 break;
641 }
642 break;
643
644 case LKM_E_UNLOAD:
645 /* current slot... */
646 i = args->lkm_offset;
647 descrip = makedev(i,0);
648
649 switch(args->lkm_devtype) {
650 case LM_DT_CHAR:
651 /* replace current slot contents with old contents */
652 cdevsw_add(&descrip, args->lkm_olddev.cdev,NULL);
653 break;
654
655 default:
656 err = ENODEV;
657 break;
658 }
659 break;
660
661 case LKM_E_STAT: /* no special handling... */
662 break;
663 }
664
665 return(err);
666 }
667
668 #ifdef STREAMS
669 /*
670 * For the loadable streams module described by the structure pointed to
671 * by lkmtp, load/unload/stat it depending on the cmd requested.
672 */
673 static int
674 _lkm_strmod(lkmtp, cmd)
675 struct lkm_table *lkmtp;
676 int cmd;
677 {
678 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
679 int i;
680 int err = 0;
681
682 switch(cmd) {
683 case LKM_E_LOAD:
684 /* don't load twice! */
685 if (lkmexists(lkmtp))
686 return(EEXIST);
687 break;
688
689 case LKM_E_UNLOAD:
690 break;
691
692 case LKM_E_STAT: /* no special handling... */
693 break;
694 }
695
696 return(err);
697 }
698 #endif /* STREAMS */
699
700 /*
701 * For the loadable execution class described by the structure pointed to
702 * by lkmtp, load/unload/stat it depending on the cmd requested.
703 */
704 static int
705 _lkm_exec(lkmtp, cmd)
706 struct lkm_table *lkmtp;
707 int cmd;
708 {
709 struct lkm_exec *args = lkmtp->private.lkm_exec;
710 int err = 0;
711
712 switch(cmd) {
713 case LKM_E_LOAD:
714 /* don't load twice! */
715 if (lkmexists(lkmtp))
716 return(EEXIST);
717 if (args->lkm_offset != LKM_ANON) { /* auto */
718 err = EINVAL;
719 break;
720 }
721
722 err = exec_register(args->lkm_exec);
723
724 /* done! */
725 args->lkm_offset = 0;
726
727 break;
728
729 case LKM_E_UNLOAD:
730
731 err = exec_unregister(args->lkm_exec);
732
733 break;
734
735 case LKM_E_STAT: /* no special handling... */
736 break;
737 }
738 return(err);
739 }
740
741 /*
742 * This code handles the per-module type "wiring-in" of loadable modules
743 * into existing kernel tables. For "LM_MISC" modules, wiring and unwiring
744 * is assumed to be done in their entry routines internal to the module
745 * itself.
746 */
747 int
748 lkmdispatch(lkmtp, cmd)
749 struct lkm_table *lkmtp;
750 int cmd;
751 {
752 int err = 0; /* default = success */
753
754 switch(lkmtp->private.lkm_any->lkm_type) {
755 case LM_SYSCALL:
756 err = _lkm_syscall(lkmtp, cmd);
757 break;
758
759 case LM_VFS:
760 err = _lkm_vfs(lkmtp, cmd);
761 break;
762
763 case LM_DEV:
764 err = _lkm_dev(lkmtp, cmd);
765 break;
766
767 #ifdef STREAMS
768 case LM_STRMOD:
769 {
770 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
771 }
772 break;
773
774 #endif /* STREAMS */
775
776 case LM_EXEC:
777 err = _lkm_exec(lkmtp, cmd);
778 break;
779
780 case LM_MISC: /* ignore content -- no "misc-specific" procedure */
781 if (lkmexists(lkmtp))
782 err = EEXIST;
783 break;
784
785 default:
786 err = ENXIO; /* unknown type */
787 break;
788 }
789
790 return(err);
791 }
792
793 int
794 lkm_nullcmd(lkmtp, cmd)
795 struct lkm_table *lkmtp;
796 int cmd;
797 {
798
799 return (0);
800 }
801
802 #ifdef DEVFS
803 static void *lkmc_devfs_token;
804 #endif
805
806 static int
807 lkm_modevent(module_t mod, int type, void *data)
808 {
809 dev_t dev;
810 static struct cdevsw *oldcdevsw;
811
812 switch (type) {
813 case MOD_LOAD:
814 dev = makedev(CDEV_MAJOR, 0);
815 cdevsw_add(&dev, &lkmc_cdevsw, &oldcdevsw);
816 #ifdef DEVFS
817 lkmc_devfs_token = devfs_add_devswf(&lkmc_cdevsw, 0, DV_CHR,
818 UID_ROOT, GID_WHEEL, 0644,
819 "lkm");
820 #endif
821 break;
822 case MOD_UNLOAD:
823 #ifdef DEVFS
824 devfs_remove_dev(lkmc_devfs_token);
825 #endif
826 cdevsw_add(&dev, oldcdevsw, NULL);
827 break;
828 default:
829 break;
830 }
831 return 0;
832 }
833 static moduledata_t lkm_mod = {
834 "lkm",
835 lkm_modevent,
836 NULL
837 };
838 DECLARE_MODULE(lkm, lkm_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR);
Cache object: b3e0c643aa55e1b3159609bd8750c5e1
|