FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lkm.c
1 /*-
2 * Copyright (c) 1992 Terrence R. Lambert.
3 * Copyright (c) 1994 Christopher G. Demetriou
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by Terrence R. Lambert.
17 * 4. The name Terrence R. Lambert may not be used to endorse or promote
18 * products derived from this software without specific prior written
19 * permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY TERRENCE R. LAMBERT ``AS IS'' AND ANY
22 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE TERRENCE R. LAMBERT BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: src/sys/kern/kern_lkm.c,v 1.32.2.3 1999/09/05 08:14:55 peter Exp $
34 */
35
36 #include "opt_no_lkm.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/ioctl.h>
42 #include <sys/tty.h>
43 #include <sys/conf.h>
44 #include <sys/file.h>
45 #include <sys/proc.h>
46 #include <sys/uio.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/mount.h>
50 #include <sys/sysent.h>
51 #include <sys/exec.h>
52 #include <sys/imgact.h>
53 #include <sys/lkm.h>
54 #include <sys/vnode.h>
55 #ifdef DEVFS
56 #include <sys/devfsext.h>
57 #endif /*DEVFS*/
58
59 #include <vm/vm.h>
60 #include <vm/vm_param.h>
61 #include <vm/vm_kern.h>
62 #include <vm/vm_extern.h>
63
64
65 #define PAGESIZE 1024 /* kmem_alloc() allocation quantum */
66
67 #define LKM_ALLOC 0x01
68 #define LKM_WANT 0x02
69
70 #define LKMS_IDLE 0x00
71 #define LKMS_RESERVED 0x01
72 #define LKMS_LOADING 0x02
73 #define LKMS_LOADED 0x04
74 #define LKMS_UNLOADING 0x08
75
76 static int lkm_v = 0;
77 static int lkm_state = LKMS_IDLE;
78
79 #ifndef MAXLKMS
80 #define MAXLKMS 20
81 #endif
82
83 static struct lkm_table lkmods[MAXLKMS]; /* table of loaded modules */
84 static struct lkm_table *curp; /* global for in-progress ops */
85
86 static int _lkm_dev __P((struct lkm_table *lkmtp, int cmd));
87 static int _lkm_exec __P((struct lkm_table *lkmtp, int cmd));
88 static int _lkm_vfs __P((struct lkm_table *lkmtp, int cmd));
89 static int _lkm_syscall __P((struct lkm_table *lkmtp, int cmd));
90 static void lkmunreserve __P((void));
91
92 static d_open_t lkmcopen;
93 static d_close_t lkmcclose;
94 static d_ioctl_t lkmcioctl;
95
96 #define CDEV_MAJOR 32
97 static struct cdevsw lkmc_cdevsw =
98 { lkmcopen, lkmcclose, noread, nowrite, /*32*/
99 lkmcioctl, nostop, nullreset, nodevtotty,
100 noselect, nommap, NULL, "lkm", NULL, -1 };
101
102
103 /*ARGSUSED*/
104 static int
105 lkmcopen(dev, flag, devtype, p)
106 dev_t dev;
107 int flag;
108 int devtype;
109 struct proc *p;
110 {
111 int error;
112
113 if (minor(dev) != 0)
114 return(ENXIO); /* bad minor # */
115
116 /*
117 * Use of the loadable kernel module device must be exclusive; we
118 * may try to remove this restriction later, but it's really no
119 * hardship.
120 */
121 while (lkm_v & LKM_ALLOC) {
122 if (flag & FNONBLOCK) /* don't hang */
123 return(EBUSY);
124 lkm_v |= LKM_WANT;
125 /*
126 * Sleep pending unlock; we use tsleep() to allow
127 * an alarm out of the open.
128 */
129 error = tsleep((caddr_t)&lkm_v, TTIPRI|PCATCH, "lkmopn", 0);
130 if (error)
131 return(error); /* leave LKM_WANT set -- no problem */
132 }
133 lkm_v |= LKM_ALLOC;
134
135 return(0); /* pseudo-device open */
136 }
137
138 /*
139 * Unreserve the memory associated with the current loaded module; done on
140 * a coerced close of the lkm device (close on premature exit of modload)
141 * or explicitly by modload as a result of a link failure.
142 */
143 static void
144 lkmunreserve()
145 {
146
147 if (lkm_state == LKMS_IDLE)
148 return;
149
150 /*
151 * Actually unreserve the memory
152 */
153 if (curp && curp->area) {
154 kmem_free(kernel_map, curp->area, curp->size);/**/
155 curp->area = 0;
156 if (curp->private.lkm_any != NULL)
157 curp->private.lkm_any = NULL;
158 }
159
160 lkm_state = LKMS_IDLE;
161 }
162
163 static int
164 lkmcclose(dev, flag, mode, p)
165 dev_t dev;
166 int flag;
167 int mode;
168 struct proc *p;
169 {
170
171 if (!(lkm_v & LKM_ALLOC)) {
172 #ifdef DEBUG
173 printf("LKM: close before open!\n");
174 #endif /* DEBUG */
175 return(EBADF);
176 }
177
178 /* do this before waking the herd... */
179 if (curp && !curp->used) {
180 /*
181 * If we close before setting used, we have aborted
182 * by way of error or by way of close-on-exit from
183 * a premature exit of "modload".
184 */
185 lkmunreserve(); /* coerce state to LKM_IDLE */
186 }
187
188 lkm_v &= ~LKM_ALLOC;
189 wakeup((caddr_t)&lkm_v); /* thundering herd "problem" here */
190
191 return(0); /* pseudo-device closed */
192 }
193
194 /*ARGSUSED*/
195 static int
196 lkmcioctl(dev, cmd, data, flag, p)
197 dev_t dev;
198 int cmd;
199 caddr_t data;
200 int flag;
201 struct proc *p;
202 {
203 int err = 0;
204 int i;
205 struct lmc_resrv *resrvp;
206 struct lmc_loadbuf *loadbufp;
207 struct lmc_unload *unloadp;
208 struct lmc_stat *statp;
209 char istr[MAXLKMNAME];
210
211 switch(cmd) {
212 case LMRESERV: /* reserve pages for a module */
213 if ((flag & FWRITE) == 0 || securelevel > 0)
214 /* only allow this if writing and insecure */
215 return EPERM;
216
217 resrvp = (struct lmc_resrv *)data;
218
219 /*
220 * Find a free slot.
221 */
222 for (i = 0; i < MAXLKMS; i++)
223 if (!lkmods[i].used)
224 break;
225 if (i == MAXLKMS) {
226 err = ENOMEM; /* no slots available */
227 break;
228 }
229 curp = &lkmods[i];
230 curp->id = i; /* self reference slot offset */
231
232 resrvp->slot = i; /* return slot */
233
234 /*
235 * Get memory for module
236 */
237 curp->size = resrvp->size;
238
239 curp->area = kmem_alloc(kernel_map, curp->size);/**/
240
241 curp->offset = 0; /* load offset */
242
243 resrvp->addr = curp->area; /* ret kernel addr */
244
245 #ifdef DEBUG
246 printf("LKM: LMRESERV (actual = 0x%08x)\n", curp->area);
247 printf("LKM: LMRESERV (adjusted = 0x%08x)\n",
248 trunc_page(curp->area));
249 #endif /* DEBUG */
250 lkm_state = LKMS_RESERVED;
251 break;
252
253 case LMLOADBUF: /* Copy in; stateful, follows LMRESERV */
254 if ((flag & FWRITE) == 0 || securelevel > 0)
255 /* only allow this if writing and insecure */
256 return EPERM;
257
258 loadbufp = (struct lmc_loadbuf *)data;
259 i = loadbufp->cnt;
260 if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
261 || i < 0
262 || i > MODIOBUF
263 || i > curp->size - curp->offset) {
264 err = ENOMEM;
265 break;
266 }
267
268 /* copy in buffer full of data */
269 err = copyin((caddr_t)loadbufp->data,
270 (caddr_t)curp->area + curp->offset, i);
271 if (err)
272 break;
273
274 if ((curp->offset + i) < curp->size) {
275 lkm_state = LKMS_LOADING;
276 #ifdef DEBUG
277 printf("LKM: LMLOADBUF (loading @ %d of %d, i = %d)\n",
278 curp->offset, curp->size, i);
279 #endif /* DEBUG */
280 } else {
281 lkm_state = LKMS_LOADED;
282 #ifdef DEBUG
283 printf("LKM: LMLOADBUF (loaded)\n");
284 #endif /* DEBUG */
285 }
286 curp->offset += i;
287 break;
288
289 case LMUNRESRV: /* discard reserved pages for a module */
290 if ((flag & FWRITE) == 0 || securelevel > 0)
291 /* only allow this if writing and insecure */
292 return EPERM;
293
294 lkmunreserve(); /* coerce state to LKM_IDLE */
295 #ifdef DEBUG
296 printf("LKM: LMUNRESERV\n");
297 #endif /* DEBUG */
298 break;
299
300 case LMREADY: /* module loaded: call entry */
301 if ((flag & FWRITE) == 0 || securelevel > 0)
302 /* only allow this if writing or insecure */
303 return EPERM;
304
305 switch (lkm_state) {
306 case LKMS_LOADED:
307 break;
308 case LKMS_LOADING:
309 /* The remainder must be bss, so we clear it */
310 bzero((caddr_t)curp->area + curp->offset,
311 curp->size - curp->offset);
312 break;
313 default:
314
315 #ifdef DEBUG
316 printf("lkm_state is %02x\n", lkm_state);
317 #endif /* DEBUG */
318 return ENXIO;
319 }
320
321 /* XXX gack */
322 curp->entry = (int (*) __P((struct lkm_table *, int, int)))
323 (*((int *)data));
324
325 /* call entry(load)... (assigns "private" portion) */
326 err = (*(curp->entry))(curp, LKM_E_LOAD, LKM_VERSION);
327 if (err) {
328 /*
329 * Module may refuse loading or may have a
330 * version mismatch...
331 */
332 lkm_state = LKMS_UNLOADING; /* for lkmunreserve */
333 lkmunreserve(); /* free memory */
334 curp->used = 0; /* free slot */
335 break;
336 }
337 /*
338 * It's possible for a user to load a module that doesn't
339 * initialize itself correctly. (You can even get away with
340 * using it for a while.) Unfortunately, we are faced with
341 * the following problems:
342 * - we can't tell a good module from a bad one until
343 * after we've run its entry function (if the private
344 * section is uninitalized after we return from the
345 * entry, then something's fishy)
346 * - now that we've called the entry function, we can't
347 * forcibly unload the module without risking a crash
348 * - since we don't know what the module's entry function
349 * did, we can't easily clean up the mess it may have
350 * made, so we can't know just how unstable the system
351 * may be
352 * So, being stuck between a rock and a hard place, we
353 * have no choice but to do this...
354 */
355 if (curp->private.lkm_any == NULL)
356 panic("loadable module initialization failed");
357
358 curp->used = 1;
359 #ifdef DEBUG
360 printf("LKM: LMREADY\n");
361 #endif /* DEBUG */
362 lkm_state = LKMS_IDLE;
363 break;
364
365 case LMUNLOAD: /* unload a module */
366 if ((flag & FWRITE) == 0 || securelevel > 0)
367 /* only allow this if writing and insecure */
368 return EPERM;
369
370 unloadp = (struct lmc_unload *)data;
371
372 if ((i = unloadp->id) == -1) { /* unload by name */
373 /*
374 * Copy name and lookup id from all loaded
375 * modules. May fail.
376 */
377 err =copyinstr(unloadp->name, istr, MAXLKMNAME-1, NULL);
378 if (err)
379 break;
380
381 /*
382 * look up id...
383 */
384 for (i = 0; i < MAXLKMS; i++) {
385 if (!lkmods[i].used)
386 continue;
387 if (!strcmp(istr,
388 lkmods[i].private.lkm_any->lkm_name))
389 break;
390 }
391 }
392
393 /*
394 * Range check the value; on failure, return EINVAL
395 */
396 if (i < 0 || i >= MAXLKMS) {
397 err = EINVAL;
398 break;
399 }
400
401 curp = &lkmods[i];
402
403 if (!curp->used) {
404 err = ENOENT;
405 break;
406 }
407
408 /* call entry(unload) */
409 if ((*(curp->entry))(curp, LKM_E_UNLOAD, LKM_VERSION)) {
410 err = EBUSY;
411 break;
412 }
413
414 lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve */
415 lkmunreserve(); /* free memory */
416 curp->used = 0; /* free slot */
417 break;
418
419 case LMSTAT: /* stat a module by id/name */
420 /* allow readers and writers to stat */
421
422 statp = (struct lmc_stat *)data;
423
424 if ((i = statp->id) == -1) { /* stat by name */
425 /*
426 * Copy name and lookup id from all loaded
427 * modules.
428 */
429 copystr(statp->name, istr, MAXLKMNAME-1, NULL);
430 /*
431 * look up id...
432 */
433 for (i = 0; i < MAXLKMS; i++) {
434 if (!lkmods[i].used)
435 continue;
436 if (!strcmp(istr,
437 lkmods[i].private.lkm_any->lkm_name))
438 break;
439 }
440
441 if (i == MAXLKMS) { /* Not found */
442 err = ENOENT;
443 break;
444 }
445 }
446
447 /*
448 * Range check the value; on failure, return EINVAL
449 */
450 if (i < 0 || i >= MAXLKMS) {
451 err = EINVAL;
452 break;
453 }
454
455 curp = &lkmods[i];
456
457 if (!curp->used) { /* Not found */
458 err = ENOENT;
459 break;
460 }
461
462 /*
463 * Copy out stat information for this module...
464 */
465 statp->id = curp->id;
466 statp->offset = curp->private.lkm_any->lkm_offset;
467 statp->type = curp->private.lkm_any->lkm_type;
468 statp->area = curp->area;
469 statp->size = curp->size / PAGESIZE;
470 statp->private = (unsigned long)curp->private.lkm_any;
471 statp->ver = curp->private.lkm_any->lkm_ver;
472 copystr(curp->private.lkm_any->lkm_name,
473 statp->name,
474 MAXLKMNAME - 2,
475 NULL);
476
477 break;
478
479 default: /* bad ioctl()... */
480 err = ENOTTY;
481 break;
482 }
483
484 return (err);
485 }
486
487 /*
488 * Acts like "nosys" but can be identified in sysent for dynamic call
489 * number assignment for a limited number of calls.
490 *
491 * Place holder for system call slots reserved for loadable modules.
492 */
493 int
494 lkmnosys(p, args, retval)
495 struct proc *p;
496 struct nosys_args *args;
497 int *retval;
498 {
499
500 return(nosys(p, args, retval));
501 }
502
503 int
504 lkmexists(lkmtp)
505 struct lkm_table *lkmtp;
506 {
507 int i;
508
509 /*
510 * see if name exists...
511 */
512 for (i = 0; i < MAXLKMS; i++) {
513 /*
514 * An unused module and the one we are testing are not
515 * considered.
516 */
517 if (!lkmods[i].used || &lkmods[i] == lkmtp)
518 continue;
519 if (!strcmp(lkmtp->private.lkm_any->lkm_name,
520 lkmods[i].private.lkm_any->lkm_name))
521 return(1); /* already loaded... */
522 }
523
524 return(0); /* module not loaded... */
525 }
526
527 /*
528 * For the loadable system call described by the structure pointed to
529 * by lkmtp, load/unload/stat it depending on the cmd requested.
530 */
531 static int
532 _lkm_syscall(lkmtp, cmd)
533 struct lkm_table *lkmtp;
534 int cmd;
535 {
536 struct lkm_syscall *args = lkmtp->private.lkm_syscall;
537 int i;
538 int err = 0;
539
540 switch(cmd) {
541 case LKM_E_LOAD:
542 /* don't load twice! */
543 if (lkmexists(lkmtp))
544 return(EEXIST);
545 if ((i = args->lkm_offset) == -1) { /* auto */
546 /*
547 * Search the table looking for a slot...
548 */
549 for (i = 0; i < aout_sysvec.sv_size; i++)
550 if (aout_sysvec.sv_table[i].sy_call ==
551 (sy_call_t *)lkmnosys)
552 break; /* found it! */
553 /* out of allocable slots? */
554 if (i == aout_sysvec.sv_size) {
555 err = ENFILE;
556 break;
557 }
558 } else { /* assign */
559 if (i < 0 || i >= aout_sysvec.sv_size) {
560 err = EINVAL;
561 break;
562 }
563 }
564
565 /* save old */
566 bcopy(&aout_sysvec.sv_table[i],
567 &(args->lkm_oldent),
568 sizeof(struct sysent));
569
570 /* replace with new */
571 bcopy(args->lkm_sysent,
572 &aout_sysvec.sv_table[i],
573 sizeof(struct sysent));
574
575 /* done! */
576 args->lkm_offset = i; /* slot in sysent[] */
577
578 break;
579
580 case LKM_E_UNLOAD:
581 /* current slot... */
582 i = args->lkm_offset;
583
584 /* replace current slot contents with old contents */
585 bcopy(&(args->lkm_oldent),
586 &aout_sysvec.sv_table[i],
587 sizeof(struct sysent));
588
589 break;
590
591 case LKM_E_STAT: /* no special handling... */
592 break;
593 }
594
595 return(err);
596 }
597
598 /*
599 * For the loadable virtual file system described by the structure pointed
600 * to by lkmtp, load/unload/stat it depending on the cmd requested.
601 */
602 static int
603 _lkm_vfs(lkmtp, cmd)
604 struct lkm_table *lkmtp;
605 int cmd;
606 {
607 struct lkm_vfs *args = lkmtp->private.lkm_vfs;
608 struct vfsconf *vfc = args->lkm_vfsconf;
609 int i;
610 int err = 0;
611
612 switch(cmd) {
613 case LKM_E_LOAD:
614 /* don't load twice! */
615 if (lkmexists(lkmtp))
616 return(EEXIST);
617
618 for(i = 0; i < MOUNT_MAXTYPE; i++) {
619 if(!strcmp(vfc->vfc_name, vfsconf[i]->vfc_name)) {
620 return EEXIST;
621 }
622 }
623
624 i = args->lkm_offset = vfc->vfc_index;
625 if (i < 0) {
626 for (i = MOUNT_MAXTYPE - 1; i >= 0; i--) {
627 if(vfsconf[i] == &void_vfsconf)
628 break;
629 }
630 }
631 if (i < 0) {
632 return EINVAL;
633 }
634 args->lkm_offset = vfc->vfc_index = i;
635
636 vfsconf[i] = vfc;
637 vfssw[i] = vfc->vfc_vfsops;
638
639 /* like in vfs_op_init */
640 for(i = 0; args->lkm_vnodeops->ls_items[i]; i++) {
641 const struct vnodeopv_desc *opv =
642 args->lkm_vnodeops->ls_items[i];
643 *(opv->opv_desc_vector_p) = NULL;
644 }
645 vfs_opv_init((struct vnodeopv_desc **)args->lkm_vnodeops->ls_items);
646
647 /*
648 * Call init function for this VFS...
649 */
650 (*(vfssw[vfc->vfc_index]->vfs_init))();
651
652 /* done! */
653 break;
654
655 case LKM_E_UNLOAD:
656 /* current slot... */
657 i = args->lkm_offset;
658
659 if (vfsconf[i]->vfc_refcount) {
660 return EBUSY;
661 }
662
663 /* replace current slot contents with old contents */
664 vfssw[i] = (struct vfsops *)0;
665 vfsconf[i] = &void_vfsconf;
666
667 break;
668
669 case LKM_E_STAT: /* no special handling... */
670 break;
671 }
672 return(err);
673 }
674
675 /*
676 * For the loadable device driver described by the structure pointed to
677 * by lkmtp, load/unload/stat it depending on the cmd requested.
678 */
679 static int
680 _lkm_dev(lkmtp, cmd)
681 struct lkm_table *lkmtp;
682 int cmd;
683 {
684 struct lkm_dev *args = lkmtp->private.lkm_dev;
685 int i;
686 dev_t descrip;
687 int err = 0;
688
689 switch(cmd) {
690 case LKM_E_LOAD:
691 /* don't load twice! */
692 if (lkmexists(lkmtp))
693 return(EEXIST);
694 switch(args->lkm_devtype) {
695 case LM_DT_BLOCK:
696 if ((i = args->lkm_offset) == -1)
697 descrip = (dev_t) -1;
698 else
699 descrip = makedev(args->lkm_offset,0);
700 if ( err = bdevsw_add(&descrip, args->lkm_dev.bdev,
701 &(args->lkm_olddev.bdev))) {
702 break;
703 }
704 args->lkm_offset = major(descrip) ;
705 break;
706
707 case LM_DT_CHAR:
708 if ((i = args->lkm_offset) == -1)
709 descrip = (dev_t) -1;
710 else
711 descrip = makedev(args->lkm_offset,0);
712 if ( err = cdevsw_add(&descrip, args->lkm_dev.cdev,
713 &(args->lkm_olddev.cdev))) {
714 break;
715 }
716 args->lkm_offset = major(descrip) ;
717 break;
718
719 default:
720 err = ENODEV;
721 break;
722 }
723 break;
724
725 case LKM_E_UNLOAD:
726 /* current slot... */
727 i = args->lkm_offset;
728 descrip = makedev(i,0);
729
730 switch(args->lkm_devtype) {
731 case LM_DT_BLOCK:
732 /* replace current slot contents with old contents */
733 bdevsw_add(&descrip, args->lkm_olddev.bdev,NULL);
734 break;
735
736 case LM_DT_CHAR:
737 /* replace current slot contents with old contents */
738 cdevsw_add(&descrip, args->lkm_olddev.cdev,NULL);
739 break;
740
741 default:
742 err = ENODEV;
743 break;
744 }
745 break;
746
747 case LKM_E_STAT: /* no special handling... */
748 break;
749 }
750
751 return(err);
752 }
753
754 #ifdef STREAMS
755 /*
756 * For the loadable streams module described by the structure pointed to
757 * by lkmtp, load/unload/stat it depending on the cmd requested.
758 */
759 static int
760 _lkm_strmod(lkmtp, cmd)
761 struct lkm_table *lkmtp;
762 int cmd;
763 {
764 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
765 int i;
766 int err = 0;
767
768 switch(cmd) {
769 case LKM_E_LOAD:
770 /* don't load twice! */
771 if (lkmexists(lkmtp))
772 return(EEXIST);
773 break;
774
775 case LKM_E_UNLOAD:
776 break;
777
778 case LKM_E_STAT: /* no special handling... */
779 break;
780 }
781
782 return(err);
783 }
784 #endif /* STREAMS */
785
786 /*
787 * For the loadable execution class described by the structure pointed to
788 * by lkmtp, load/unload/stat it depending on the cmd requested.
789 */
790 static int
791 _lkm_exec(lkmtp, cmd)
792 struct lkm_table *lkmtp;
793 int cmd;
794 {
795 struct lkm_exec *args = lkmtp->private.lkm_exec;
796 int i;
797 int err = 0;
798 const struct execsw **execsw =
799 (const struct execsw **)&execsw_set.ls_items[0];
800
801 switch(cmd) {
802 case LKM_E_LOAD:
803 /* don't load twice! */
804 if (lkmexists(lkmtp))
805 return(EEXIST);
806 if ((i = args->lkm_offset) == -1) { /* auto */
807 /*
808 * Search the table looking for a slot...
809 */
810 for (i = 0; execsw[i] != NULL; i++)
811 if (execsw[i]->ex_imgact == NULL)
812 break; /* found it! */
813 /* out of allocable slots? */
814 if (execsw[i] == NULL) {
815 err = ENFILE;
816 break;
817 }
818 } else { /* assign */
819 err = EINVAL;
820 break;
821 }
822
823 /* save old */
824 bcopy(&execsw[i], &(args->lkm_oldexec), sizeof(struct execsw*));
825
826 /* replace with new */
827 bcopy(&(args->lkm_exec), &execsw[i], sizeof(struct execsw*));
828
829 /* done! */
830 args->lkm_offset = i; /* slot in execsw[] */
831
832 break;
833
834 case LKM_E_UNLOAD:
835 /* current slot... */
836 i = args->lkm_offset;
837
838 /* replace current slot contents with old contents */
839 bcopy(&(args->lkm_oldexec), &execsw[i], sizeof(struct execsw*));
840
841 break;
842
843 case LKM_E_STAT: /* no special handling... */
844 break;
845 }
846 return(err);
847 }
848
849 /* XXX: This is bogus. we should find a better method RSN! */
850 static const struct execsw lkm_exec_dummy1 = { NULL, "lkm" };
851 static const struct execsw lkm_exec_dummy2 = { NULL, "lkm" };
852 static const struct execsw lkm_exec_dummy3 = { NULL, "lkm" };
853 static const struct execsw lkm_exec_dummy4 = { NULL, "lkm" };
854 TEXT_SET(execsw_set, lkm_exec_dummy1);
855 TEXT_SET(execsw_set, lkm_exec_dummy2);
856 TEXT_SET(execsw_set, lkm_exec_dummy3);
857 TEXT_SET(execsw_set, lkm_exec_dummy4);
858
859 /*
860 * This code handles the per-module type "wiring-in" of loadable modules
861 * into existing kernel tables. For "LM_MISC" modules, wiring and unwiring
862 * is assumed to be done in their entry routines internal to the module
863 * itself.
864 */
865 int
866 lkmdispatch(lkmtp, cmd)
867 struct lkm_table *lkmtp;
868 int cmd;
869 {
870 int err = 0; /* default = success */
871
872 switch(lkmtp->private.lkm_any->lkm_type) {
873 case LM_SYSCALL:
874 err = _lkm_syscall(lkmtp, cmd);
875 break;
876
877 case LM_VFS:
878 err = _lkm_vfs(lkmtp, cmd);
879 break;
880
881 case LM_DEV:
882 err = _lkm_dev(lkmtp, cmd);
883 break;
884
885 #ifdef STREAMS
886 case LM_STRMOD:
887 {
888 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
889 }
890 break;
891
892 #endif /* STREAMS */
893
894 case LM_EXEC:
895 err = _lkm_exec(lkmtp, cmd);
896 break;
897
898 case LM_MISC: /* ignore content -- no "misc-specific" procedure */
899 if (lkmexists(lkmtp))
900 err = EEXIST;
901 break;
902
903 default:
904 err = ENXIO; /* unknown type */
905 break;
906 }
907
908 return(err);
909 }
910
911 int
912 lkm_nullcmd(lkmtp, cmd)
913 struct lkm_table *lkmtp;
914 int cmd;
915 {
916
917 return (0);
918 }
919
920 static lkm_devsw_installed = 0;
921 #ifdef DEVFS
922 static void *lkmc_devfs_token;
923 #endif
924
925 static void lkm_drvinit(void *unused)
926 {
927 dev_t dev;
928
929 if( ! lkm_devsw_installed ) {
930 dev = makedev(CDEV_MAJOR, 0);
931 cdevsw_add(&dev,&lkmc_cdevsw, NULL);
932 lkm_devsw_installed = 1;
933 #ifdef DEVFS
934 lkmc_devfs_token = devfs_add_devswf(&lkmc_cdevsw, 0, DV_CHR,
935 UID_ROOT, GID_WHEEL, 0644,
936 "lkm");
937 #endif
938 }
939 }
940
941 #ifndef NO_LKM
942 SYSINIT(lkmdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,lkm_drvinit,NULL)
943 #endif
Cache object: b4272cc627710f43a481f8af57bf8579
|