FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lkm.c
1 /* $NetBSD: kern_lkm.c,v 1.95 2006/11/01 10:17:58 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1992 Terrence R. Lambert.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Terrence R. Lambert.
19 * 4. The name Terrence R. Lambert may not be used to endorse or promote
20 * products derived from this software without specific prior written
21 * permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TERRENCE R. LAMBERT ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE TERRENCE R. LAMBERT BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 /*
37 * XXX it's not really safe to unload *any* of the types which are
38 * currently loadable; e.g. you could unload a syscall which was being
39 * blocked in, etc. In the long term, a solution should be come up
40 * with, but "not right now." -- cgd
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: kern_lkm.c,v 1.95 2006/11/01 10:17:58 yamt Exp $");
45
46 #include "opt_ddb.h"
47 #include "opt_malloclog.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/ioctl.h>
52 #include <sys/tty.h>
53 #include <sys/file.h>
54 #include <sys/proc.h>
55 #include <sys/uio.h>
56 #include <sys/kernel.h>
57 #include <sys/vnode.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/exec.h>
61 #include <sys/sa.h>
62 #include <sys/syscallargs.h>
63 #include <sys/conf.h>
64 #include <sys/ksyms.h>
65 #include <sys/device.h>
66 #include <sys/once.h>
67 #include <sys/kauth.h>
68
69 #include <sys/lkm.h>
70 #include <sys/syscall.h>
71 #ifdef DDB
72 #include <machine/db_machdep.h>
73 #include <ddb/db_sym.h>
74 #endif
75
76 #include <uvm/uvm_extern.h>
77
78 struct vm_map *lkm_map;
79
80 #define LKM_SPACE_ALLOC(size, exec) \
81 uvm_km_alloc(lkm_map, (size), 0, \
82 UVM_KMF_WIRED | ((exec) ? UVM_KMF_EXEC : 0))
83 #define LKM_SPACE_FREE(addr, size) \
84 uvm_km_free(lkm_map, (addr), (size), UVM_KMF_WIRED)
85
86 #if !defined(DEBUG) && defined(LKMDEBUG)
87 # define DEBUG
88 #endif
89
90 #ifdef DEBUG
91 # define LKMDB_INFO 0x01
92 # define LKMDB_LOAD 0x02
93 int lkmdebug = 0;
94 #endif
95
96 #define LKM_ALLOC 0x01
97
98 #define LKMS_IDLE 0x00
99 #define LKMS_RESERVED 0x01
100 #define LKMS_LOADING 0x02
101 #define LKMS_UNLOADING 0x08
102
103 static int lkm_v = 0;
104 static int lkm_state = LKMS_IDLE;
105
106 static TAILQ_HEAD(lkms_head, lkm_table) lkmods = /* table of loaded modules */
107 TAILQ_HEAD_INITIALIZER(lkmods);
108 static struct lkm_table *curp; /* global for in-progress ops */
109
110 static struct lkm_table *lkmlookup(int, char *, int, int *);
111 static struct lkm_table *lkmalloc(void);
112 static void lkmfree(void);
113 static void lkmunreserve(int);
114 static int _lkm_syscall(struct lkm_table *, int);
115 static int _lkm_vfs(struct lkm_table *, int);
116 static int _lkm_dev(struct lkm_table *, int);
117 #ifdef STREAMS
118 static int _lkm_strmod(struct lkm_table *, int);
119 #endif
120 static int _lkm_exec(struct lkm_table *, int);
121 static int _lkm_compat(struct lkm_table *, int);
122 static int _lkm_drv(struct lkm_table *, int);
123
124 static int _lkm_checkver(struct lkm_table *);
125
126 dev_type_open(lkmopen);
127 dev_type_close(lkmclose);
128 dev_type_ioctl(lkmioctl);
129
130 const struct cdevsw lkm_cdevsw = {
131 lkmopen, lkmclose, noread, nowrite, lkmioctl,
132 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
133 };
134
135 static ONCE_DECL(lkm_init_once);
136
137 static int
138 lkm_init(void)
139 {
140 /*
141 * If machine-dependent code hasn't initialized the lkm_map
142 * then just use kernel_map.
143 */
144 if (lkm_map == NULL)
145 lkm_map = kernel_map;
146
147 return 0;
148 }
149
150 /*ARGSUSED*/
151 int
152 lkmopen(dev_t dev, int flag, int devtype, struct lwp *l)
153 {
154 int error;
155
156 RUN_ONCE(&lkm_init_once, lkm_init);
157
158 if (minor(dev) != 0)
159 return (ENXIO); /* bad minor # */
160
161 /*
162 * Use of the loadable kernel module device must be exclusive; we
163 * may try to remove this restriction later, but it's really no
164 * hardship.
165 */
166 while (lkm_v & LKM_ALLOC) {
167 if (flag & FNONBLOCK) /* don't hang */
168 return (EBUSY);
169 /*
170 * Sleep pending unlock; we use tsleep() to allow
171 * an alarm out of the open.
172 */
173 error = tsleep((caddr_t)&lkm_v, TTIPRI|PCATCH, "lkmopn", 0);
174 if (error)
175 return (error);
176 }
177 lkm_v |= LKM_ALLOC;
178
179 return (0); /* pseudo-device open */
180 }
181
182 /*
183 * Look up for a LKM in the list.
184 */
185 static struct lkm_table *
186 lkmlookup(int i, char *name, int need_copyin, int *error)
187 {
188 struct lkm_table *p;
189 char istr[MAXLKMNAME];
190
191 /*
192 * p being NULL here implies the list is empty, so any lookup is
193 * invalid (name based or otherwise). Since the list of modules is
194 * kept sorted by id, lowest to highest, the id of the last entry
195 * will be the highest in use.
196 */
197 p = TAILQ_LAST(&lkmods, lkms_head);
198 if (p == NULL || i > p->id) {
199 *error = EINVAL;
200 return (NULL);
201 }
202
203 if (i < 0) { /* unload by name */
204 /*
205 * Copy name and lookup id from all loaded
206 * modules. May fail.
207 */
208 if (need_copyin) {
209 *error = copyinstr(name, istr, MAXLKMNAME - 1, NULL);
210 if (*error)
211 return (NULL);
212 } else
213 strncpy(istr, name, MAXLKMNAME - 1);
214 istr[MAXLKMNAME - 1] = '\0';
215
216 TAILQ_FOREACH(p, &lkmods, link) {
217 if (strcmp(istr, p->private.lkm_any->lkm_name) == 0)
218 break;
219 }
220 } else
221 TAILQ_FOREACH(p, &lkmods, link)
222 if (i == p->id)
223 break;
224
225 if (p == NULL)
226 *error = ENOENT;
227
228 return (p);
229 }
230
231 /*
232 * Allocates memory for a new LKM table entry and inserts in the list.
233 * Returns NULL on failure.
234 */
235 static struct lkm_table *
236 lkmalloc(void)
237 {
238 struct lkm_table *p, *ret;
239 int id = 0;
240
241 ret = malloc(sizeof(struct lkm_table), M_DEVBUF, M_NOWAIT);
242 if (ret == NULL)
243 return (NULL);
244 ret->refcnt = 0;
245 ret->forced = 0;
246
247 /* find the first unused id */
248 TAILQ_FOREACH(p, &lkmods, link) {
249 if (id != p->id)
250 break;
251 id++;
252 }
253 ret->id = id;
254
255 if (p == NULL)
256 TAILQ_INSERT_TAIL(&lkmods, ret, link);
257 else
258 TAILQ_INSERT_BEFORE(p, ret, link);
259
260 return (ret);
261 }
262
263 /*
264 * Frees the current LKM table entry.
265 */
266 static void
267 lkmfree(void)
268 {
269 TAILQ_REMOVE(&lkmods, curp, link);
270 free(curp, M_DEVBUF);
271 curp = NULL;
272 }
273
274 /*
275 * Unreserve the memory associated with the current loaded module; done on
276 * a coerced close of the lkm device (close on premature exit of modload)
277 * or explicitly by modload as a result of a link failure.
278 */
279 static void
280 lkmunreserve(int delsymtab)
281 {
282
283 if (lkm_state == LKMS_IDLE)
284 return;
285
286 if (curp && curp->syms) {
287 if (delsymtab)
288 ksyms_delsymtab(curp->private.lkm_any->lkm_name);
289 LKM_SPACE_FREE(curp->syms, curp->sym_size);
290 curp->syms = 0;
291 }
292 /*
293 * Actually unreserve the memory
294 */
295 if (curp && curp->area) {
296 LKM_SPACE_FREE(curp->area, curp->size);
297 curp->area = 0;
298 }
299
300 if (curp && curp->forced)
301 curp->forced = 0;
302
303 lkm_state = LKMS_IDLE;
304 }
305
306 int
307 lkmclose(dev_t dev, int flag, int mode, struct lwp *l)
308 {
309
310 if (!(lkm_v & LKM_ALLOC)) {
311 #ifdef DEBUG
312 if (lkmdebug & LKMDB_INFO)
313 printf("LKM: close before open!\n");
314 #endif /* DEBUG */
315 return (EBADF);
316 }
317
318 /* do this before waking the herd... */
319 if (curp != NULL && curp->refcnt == 0) {
320 /*
321 * If we close before setting used, we have aborted
322 * by way of error or by way of close-on-exit from
323 * a premature exit of "modload".
324 */
325 lkmunreserve(1); /* coerce state to LKM_IDLE */
326 lkmfree();
327 }
328
329 lkm_v &= ~LKM_ALLOC;
330 wakeup((caddr_t)&lkm_v); /* thundering herd "problem" here */
331
332 return (0); /* pseudo-device closed */
333 }
334
335 /*ARGSUSED*/
336 int
337 lkmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
338 {
339 int i, error = 0;
340 struct lmc_resrv *resrvp;
341 struct lmc_loadbuf *loadbufp;
342 struct lmc_unload *unloadp;
343 struct lmc_stat *statp;
344
345 switch(cmd) {
346 case LMRESERV: /* reserve pages for a module */
347 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
348 0, (void *)cmd, NULL, NULL))
349 return EPERM;
350
351 if ((flag & FWRITE) == 0) /* only allow this if writing */
352 return EPERM;
353
354 resrvp = (struct lmc_resrv *)data;
355
356 curp = lkmalloc();
357 if (curp == NULL) {
358 error = ENOMEM;
359 break;
360 }
361 resrvp->slot = curp->id; /* return slot */
362
363 /*
364 * Get memory for module
365 */
366 curp->size = resrvp->size;
367 curp->area = LKM_SPACE_ALLOC(curp->size, 1);
368 curp->offset = 0; /* load offset */
369
370 resrvp->addr = curp->area; /* ret kernel addr */
371
372 if (resrvp->sym_size) {
373 curp->sym_size = resrvp->sym_size;
374 curp->sym_symsize = resrvp->sym_symsize;
375 curp->syms = (u_long)LKM_SPACE_ALLOC(curp->sym_size, 0);
376 curp->sym_offset = 0;
377 resrvp->sym_addr = curp->syms; /* ret symbol addr */
378 } else {
379 curp->sym_size = 0;
380 curp->syms = 0;
381 curp->sym_offset = 0;
382 resrvp->sym_addr = 0;
383 }
384
385 #ifdef DEBUG
386 if (lkmdebug & LKMDB_INFO) {
387 printf("LKM: LMRESERV (actual = 0x%08lx)\n",
388 curp->area);
389 printf("LKM: LMRESERV (syms = 0x%08lx)\n",
390 curp->syms);
391 printf("LKM: LMRESERV (adjusted = 0x%08lx)\n",
392 trunc_page(curp->area));
393 }
394 #endif /* DEBUG */
395 lkm_state = LKMS_RESERVED;
396 break;
397
398 case LMLOADBUF: /* Copy in; stateful, follows LMRESERV */
399 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
400 0, (void *)cmd, NULL, NULL))
401 return EPERM;
402
403 if ((flag & FWRITE) == 0) /* only allow this if writing */
404 return EPERM;
405
406 loadbufp = (struct lmc_loadbuf *)data;
407 i = loadbufp->cnt;
408 if ((lkm_state != LKMS_RESERVED && lkm_state != LKMS_LOADING)
409 || i < 0
410 || i > MODIOBUF
411 || i > curp->size - curp->offset) {
412 error = ENOMEM;
413 break;
414 }
415
416 /* copy in buffer full of data */
417 error = copyin(loadbufp->data,
418 (caddr_t)curp->area + curp->offset, i);
419 if (error)
420 break;
421
422 #ifdef PMAP_NEED_PROCWR
423 pmap_procwr(&proc0, curp->area + curp->offset, i);
424 #endif
425 if ((curp->offset + i) < curp->size) {
426 lkm_state = LKMS_LOADING;
427 #ifdef DEBUG
428 if (lkmdebug & LKMDB_LOAD)
429 printf("LKM: LMLOADBUF (loading @ %ld of %ld, i = %d)\n",
430 curp->offset, curp->size, i);
431 #endif /* DEBUG */
432 }
433 curp->offset += i;
434 break;
435
436 case LMLOADSYMS: /* Copy in; stateful, follows LMRESERV*/
437 if ((flag & FWRITE) == 0) /* only allow this if writing */
438 return EPERM;
439
440 loadbufp = (struct lmc_loadbuf *)data;
441 i = loadbufp->cnt;
442 if ((lkm_state != LKMS_LOADING)
443 || i < 0
444 || i > MODIOBUF
445 || i > curp->sym_size - curp->sym_offset) {
446 error = ENOMEM;
447 break;
448 }
449
450 /* copy in buffer full of data*/
451 if ((error = copyin(loadbufp->data,
452 (caddr_t)(curp->syms) + curp->sym_offset,
453 i)) != 0)
454 break;
455
456 if ((curp->sym_offset + i) < curp->sym_size) {
457 lkm_state = LKMS_LOADING;
458 #ifdef DEBUG
459 if (lkmdebug & LKMDB_LOAD)
460 printf( "LKM: LMLOADSYMS (loading @ %ld of %ld, i = %d)\n",
461 curp->sym_offset, curp->sym_size, i);
462 #endif /* DEBUG*/
463 }
464 curp->sym_offset += i;
465 break;
466
467 case LMUNRESRV: /* discard reserved pages for a module */
468 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
469 0, (void *)cmd, NULL, NULL))
470 return EPERM;
471
472 if ((flag & FWRITE) == 0) /* only allow this if writing */
473 return EPERM;
474
475 lkmunreserve(0); /* coerce state to LKM_IDLE */
476 if (curp != NULL)
477 lkmfree();
478 #ifdef DEBUG
479 if (lkmdebug & LKMDB_INFO)
480 printf("LKM: LMUNRESERV\n");
481 #endif /* DEBUG */
482 break;
483
484 case LMREADY: /* module loaded: call entry */
485 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
486 0, (void *)cmd, NULL, NULL))
487 return EPERM;
488
489 if ((flag & FWRITE) == 0) /* only allow this if writing */
490 return EPERM;
491
492 if (lkm_state != LKMS_LOADING) {
493 #ifdef DEBUG
494 if (lkmdebug & LKMDB_INFO)
495 printf("lkm_state is %02x\n", lkm_state);
496 #endif /* DEBUG */
497 return ENXIO;
498 }
499
500 if (curp->size - curp->offset > 0) {
501 /* The remainder must be bss, so we clear it */
502 memset((caddr_t)curp->area + curp->offset, 0,
503 curp->size - curp->offset);
504 }
505
506 #ifdef DDB
507 /*
508 * Temporarily load the symbol table before the entry
509 * routine is called, so that the symbols are available
510 * for DDB backtrace and breakpoints.
511 */
512 if (curp->syms && curp->sym_offset >= curp->sym_size) {
513 error = ksyms_addsymtab("/lkmtemp/",
514 (char *)curp->syms, curp->sym_symsize,
515 (char *)curp->syms + curp->sym_symsize,
516 curp->sym_size - curp->sym_symsize);
517
518 if (error)
519 goto rdyfail;
520
521 #ifdef DEBUG
522 if (lkmdebug & LKMDB_INFO)
523 printf( "DDB symbols added!\n" );
524 #endif
525 }
526 #endif /* DDB */
527
528 curp->entry = (int (*)(struct lkm_table *, int, int))
529 (*((long *) (data)));
530
531 /* call entry(load)... (assigns "private" portion) */
532 error = (*(curp->entry))(curp, LKM_E_LOAD, LKM_VERSION);
533
534 if (curp->syms && curp->sym_offset >= curp->sym_size) {
535 #ifdef DDB
536 ksyms_delsymtab("/lkmtemp/");
537 #endif
538
539 if (!error) {
540 error = ksyms_addsymtab(curp->private.lkm_any->lkm_name,
541 (char *)curp->syms, curp->sym_symsize,
542 (char *)curp->syms + curp->sym_symsize,
543 curp->sym_size - curp->sym_symsize);
544 }
545 }
546
547 if (error) {
548 #ifdef DDB
549 rdyfail:
550 #endif
551 /*
552 * Module may refuse loading or may have a
553 * version mismatch...
554 */
555 lkm_state = LKMS_UNLOADING; /* for lkmunreserve */
556 lkmunreserve(0); /* free memory */
557 lkmfree(); /* free slot */
558 #ifdef DEBUG
559 if (lkmdebug & LKMDB_INFO)
560 printf("lkm entry point failed with error %d\n",
561 error);
562 #endif /* DEBUG */
563 break;
564 }
565 curp->refcnt++;
566
567 #ifdef DEBUG
568 if (lkmdebug & LKMDB_INFO)
569 printf("LKM: LMREADY\n");
570 #endif /* DEBUG */
571 lkm_state = LKMS_IDLE;
572 break;
573
574 case LMUNLOAD: /* unload a module */
575 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
576 0, (void *)cmd, NULL, NULL))
577 return EPERM;
578
579 if ((flag & FWRITE) == 0) /* only allow this if writing */
580 return EPERM;
581
582 unloadp = (struct lmc_unload *)data;
583
584 curp = lkmlookup(unloadp->id, unloadp->name, 1, &error);
585 if (curp == NULL)
586 break;
587
588 /* call entry(unload) */
589 if ((*(curp->entry))(curp, LKM_E_UNLOAD, LKM_VERSION)) {
590 error = EBUSY;
591 break;
592 }
593
594 lkm_state = LKMS_UNLOADING; /* non-idle for lkmunreserve */
595 lkmunreserve(1); /* free memory */
596 lkmfree(); /* free slot */
597 break;
598
599 case LMSTAT: /* stat a module by id/name */
600 /* allow readers and writers to stat */
601
602 statp = (struct lmc_stat *)data;
603
604 if ((curp = lkmlookup(statp->id, statp->name, 0, &error)) == NULL)
605 break;
606
607 if ((error = (*curp->entry)(curp, LKM_E_STAT, LKM_VERSION)))
608 break;
609
610 /*
611 * Copy out stat information for this module...
612 */
613 statp->id = curp->id;
614 statp->offset = curp->private.lkm_any->lkm_offset;
615 statp->type = curp->private.lkm_any->lkm_type;
616 statp->area = curp->area;
617 statp->size = curp->size / 1024;
618 statp->private = (unsigned long)curp->private.lkm_any;
619 statp->ver = LKM_VERSION;
620 copystr(curp->private.lkm_any->lkm_name,
621 statp->name,
622 MAXLKMNAME - 2,
623 (size_t *)0);
624
625 break;
626
627 #ifdef LMFORCE
628 case LMFORCE: /* stateful, optionally follows LMRESERV */
629 if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LKM,
630 0, (void *)cmd, NULL, NULL))
631 return EPERM;
632
633 if ((flag & FWRITE) == 0) /* only allow this if writing */
634 return EPERM;
635
636 if (lkm_state != LKMS_RESERVED) {
637 error = EPERM;
638 break;
639 }
640
641 curp->forced = (*(u_long *)data != 0);
642 break;
643 #endif /* LMFORCE */
644
645 default: /* bad ioctl()... */
646 error = ENOTTY;
647 break;
648 }
649
650 return (error);
651 }
652
653 /*
654 * Acts like "nosys" but can be identified in sysent for dynamic call
655 * number assignment for a limited number of calls.
656 *
657 * Place holder for system call slots reserved for loadable modules.
658 */
659 int
660 sys_lkmnosys(struct lwp *l, void *v, register_t *retval)
661 {
662
663 return (sys_nosys(l, v, retval));
664 }
665
666 /*
667 * A placeholder function for load/unload/stat calls; simply returns zero.
668 * Used where people don't want to specify a special function.
669 */
670 int
671 lkm_nofunc(struct lkm_table *lkmtp, int cmd)
672 {
673
674 return (0);
675 }
676
677 int
678 lkmexists(struct lkm_table *lkmtp)
679 {
680 struct lkm_table *p;
681
682 /* see if name exists... */
683 TAILQ_FOREACH(p, &lkmods, link) {
684 if (strcmp(lkmtp->private.lkm_any->lkm_name,
685 p->private.lkm_any->lkm_name) == 0 && (p->refcnt != 0))
686 return (1); /* already loaded... */
687 }
688
689 return (0); /* module not loaded... */
690 }
691
692 /*
693 * For the loadable system call described by the structure pointed to
694 * by lkmtp, load/unload/stat it depending on the cmd requested.
695 */
696 static int
697 _lkm_syscall(struct lkm_table *lkmtp, int cmd)
698 {
699 struct lkm_syscall *args = lkmtp->private.lkm_syscall;
700 int i;
701 int error = 0;
702
703 switch(cmd) {
704 case LKM_E_LOAD:
705 /* don't load twice! */
706 if (lkmexists(lkmtp))
707 return (EEXIST);
708
709 if ((i = args->mod.lkm_offset) == -1) { /* auto */
710 /*
711 * Search the table looking for a slot...
712 */
713 for (i = 0; i < SYS_MAXSYSCALL; i++)
714 if (sysent[i].sy_call == sys_lkmnosys)
715 break; /* found it! */
716 /* out of allocable slots? */
717 if (i == SYS_MAXSYSCALL) {
718 error = ENFILE;
719 break;
720 }
721 } else { /* assign */
722 if (i < 0 || i >= SYS_MAXSYSCALL) {
723 error = EINVAL;
724 break;
725 }
726 }
727
728 /* save old */
729 memcpy(&args->lkm_oldent, &sysent[i], sizeof(struct sysent));
730
731 /* replace with new */
732 memcpy(&sysent[i], args->lkm_sysent, sizeof(struct sysent));
733
734 /* done! */
735 args->mod.lkm_offset = i; /* slot in sysent[] */
736
737 break;
738
739 case LKM_E_UNLOAD:
740 /* current slot... */
741 i = args->mod.lkm_offset;
742
743 /* replace current slot contents with old contents */
744 memcpy(&sysent[i], &args->lkm_oldent, sizeof(struct sysent));
745
746 break;
747
748 case LKM_E_STAT: /* no special handling... */
749 break;
750 }
751
752 return (error);
753 }
754
755 /*
756 * For the loadable virtual file system described by the structure pointed
757 * to by lkmtp, load/unload/stat it depending on the cmd requested.
758 */
759 static int
760 _lkm_vfs(struct lkm_table *lkmtp, int cmd)
761 {
762 struct lkm_vfs *args = lkmtp->private.lkm_vfs;
763 int error = 0;
764
765 switch(cmd) {
766 case LKM_E_LOAD:
767 /* don't load twice! */
768 if (lkmexists(lkmtp))
769 return (EEXIST);
770
771 /* Establish the file system. */
772 if ((error = vfs_attach(args->lkm_vfsops)) != 0)
773 return (error);
774
775 /* done! */
776 break;
777
778 case LKM_E_UNLOAD:
779 /* Disestablish the file system. */
780 if ((error = vfs_detach(args->lkm_vfsops)) != 0)
781 return (error);
782 break;
783
784 case LKM_E_STAT: /* no special handling... */
785 break;
786 }
787
788 return (error);
789 }
790
791 /*
792 * For the loadable device driver described by the structure pointed to
793 * by lkmtp, load/unload/stat it depending on the cmd requested.
794 */
795 static int
796 _lkm_dev(struct lkm_table *lkmtp, int cmd)
797 {
798 struct lkm_dev *args = lkmtp->private.lkm_dev;
799 int error;
800
801 switch(cmd) {
802 case LKM_E_LOAD:
803 /* don't load twice! */
804 if (lkmexists(lkmtp))
805 return (EEXIST);
806
807 error = devsw_attach(args->lkm_devname,
808 args->lkm_bdev, &args->lkm_bdevmaj,
809 args->lkm_cdev, &args->lkm_cdevmaj);
810 if (error != 0)
811 return (error);
812
813 args->mod.lkm_offset =
814 LKM_MAKEMAJOR(args->lkm_bdevmaj, args->lkm_cdevmaj);
815 break;
816
817 case LKM_E_UNLOAD:
818 devsw_detach(args->lkm_bdev, args->lkm_cdev);
819 args->lkm_bdevmaj = -1;
820 args->lkm_cdevmaj = -1;
821 break;
822
823 case LKM_E_STAT: /* no special handling... */
824 break;
825 }
826
827 return (0);
828 }
829
830 #ifdef STREAMS
831 /*
832 * For the loadable streams module described by the structure pointed to
833 * by lkmtp, load/unload/stat it depending on the cmd requested.
834 */
835 static int
836 _lkm_strmod(struct lkm_table *lkmtp, int cmd)
837 {
838 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
839 int i;
840 int error = 0;
841
842 switch(cmd) {
843 case LKM_E_LOAD:
844 /* don't load twice! */
845 if (lkmexists(lkmtp))
846 return (EEXIST);
847 break;
848
849 case LKM_E_UNLOAD:
850 break;
851
852 case LKM_E_STAT: /* no special handling... */
853 break;
854 }
855
856 return (error);
857 }
858 #endif /* STREAMS */
859
860 /*
861 * For the loadable execution class described by the structure pointed to
862 * by lkmtp, load/unload/stat it depending on the cmd requested.
863 */
864 static int
865 _lkm_exec(struct lkm_table *lkmtp, int cmd)
866 {
867 struct lkm_exec *args = lkmtp->private.lkm_exec;
868 int error = 0;
869
870 switch(cmd) {
871 case LKM_E_LOAD:
872 /* don't load twice! */
873 if (lkmexists(lkmtp))
874 return (EEXIST);
875
876 /* this would also fill in the emulation pointer in
877 * args->lkm_execsw */
878 error = exec_add(args->lkm_execsw, args->lkm_emul);
879 break;
880
881 case LKM_E_UNLOAD:
882 error = exec_remove(args->lkm_execsw);
883 break;
884
885 case LKM_E_STAT: /* no special handling... */
886 break;
887 }
888
889 return (error);
890 }
891
892 /*
893 * For the loadable compat/emulation class described by the structure pointed to
894 * by lkmtp, load/unload/stat it depending on the cmd requested.
895 */
896 static int
897 _lkm_compat(struct lkm_table *lkmtp, int cmd)
898 {
899 struct lkm_compat *args = lkmtp->private.lkm_compat;
900 int error = 0;
901
902 switch(cmd) {
903 case LKM_E_LOAD:
904 /* don't load twice! */
905 if (lkmexists(lkmtp))
906 return (EEXIST);
907
908 error = emul_register(args->lkm_compat, 0);
909 break;
910
911 case LKM_E_UNLOAD:
912 error = emul_unregister(args->lkm_compat->e_name);
913 break;
914
915 case LKM_E_STAT: /* no special handling... */
916 break;
917 }
918
919 return (error);
920 }
921
922 static int
923 drvlkm_load(struct cfdriver **cd, const struct cfattachlkminit *cai,
924 struct cfdata *cf)
925 {
926 const struct cfattachlkminit *cfai;
927 int i, error, j;
928
929 for (i = 0; cd[i]; i++) {
930 error = config_cfdriver_attach(cd[i]);
931 if (!error)
932 continue;
933 if (error != EEXIST) {
934 printf("%s: unable to register driver\n",
935 cd[i]->cd_name);
936 /* XXX roll back previous attachments */
937 goto out;
938 }
939 printf("driver %s already present\n", cd[i]->cd_name);
940 /*
941 * get existing drivers out of the list so we won't try
942 * to detach them
943 */
944 for (j = i; cd[j]; j++)
945 cd[j] = cd[j + 1];
946 i--; /* continue at same index */
947 }
948
949 for (cfai = cai; cfai->cfai_name; cfai++) {
950 for (i = 0; cfai->cfai_list[i]; i++) {
951 error = config_cfattach_attach(cfai->cfai_name,
952 cfai->cfai_list[i]);
953 if (!error)
954 continue;
955 if (error != EEXIST) {
956 printf("%s: unable to register cfattach\n",
957 cfai->cfai_list[i]->ca_name);
958 /* XXX roll back previous attachments */
959 goto out;
960 }
961 printf("driver attachment %s for %s already present\n",
962 cfai->cfai_list[i]->ca_name, cfai->cfai_name);
963 /*
964 * get existing attachments out of the list so we
965 * won't try to detach them
966 */
967 for (j = i; cfai->cfai_list[j]; j++)
968 cfai->cfai_list[j] = cfai->cfai_list[j + 1];
969 i--; /* continue at same index */
970 }
971 }
972
973 error = config_cfdata_attach(cf, 1);
974 /* XXX roll back cfdriver / cfattach attachments in error case */
975
976 out:
977 return (error);
978 }
979
980 static int
981 drvlkm_unload(struct cfdriver **cd, const struct cfattachlkminit *cai,
982 struct cfdata *cf)
983 {
984 const struct cfattachlkminit *cfai;
985 int i, error;
986
987 error = config_cfdata_detach(cf);
988 if (error)
989 return (error);
990
991 for (cfai = cai; cfai->cfai_name; cfai++) {
992 for (i = 0; cfai->cfai_list[i]; i++) {
993 error = config_cfattach_detach(cfai->cfai_name,
994 cfai->cfai_list[i]);
995 if (error) {
996 printf("%s: unable to deregister cfattach\n",
997 cfai->cfai_list[i]->ca_name);
998 return (error);
999 }
1000 }
1001 }
1002
1003 for (i = 0; cd[i]; i++) {
1004 error = config_cfdriver_detach(cd[i]);
1005 if (error) {
1006 printf("%s: unable to deregister cfdriver\n",
1007 cd[i]->cd_name);
1008 return (error);
1009 }
1010 }
1011
1012 return (0);
1013 }
1014
1015 static int
1016 _lkm_drv(struct lkm_table *lkmtp, int cmd)
1017 {
1018 struct lkm_drv *args = lkmtp->private.lkm_drv;
1019 int error = 0;
1020
1021 switch(cmd) {
1022 case LKM_E_LOAD:
1023 /* don't load twice! */
1024 if (lkmexists(lkmtp))
1025 return (EEXIST);
1026
1027 error = drvlkm_load(args->lkm_cd,
1028 args->lkm_cai,
1029 args->lkm_cf);
1030 break;
1031
1032 case LKM_E_UNLOAD:
1033 error = drvlkm_unload(args->lkm_cd,
1034 args->lkm_cai,
1035 args->lkm_cf);
1036 break;
1037
1038 case LKM_E_STAT: /* no special handling... */
1039 break;
1040 }
1041
1042 return (error);
1043 }
1044
1045 /*
1046 * This code handles the per-module type "wiring-in" of loadable modules
1047 * into existing kernel tables. For "LM_MISC" modules, wiring and unwiring
1048 * is assumed to be done in their entry routines internal to the module
1049 * itself.
1050 */
1051 int
1052 lkmdispatch(struct lkm_table *lkmtp, int cmd)
1053 {
1054 int error = 0; /* default = success */
1055 #ifdef DEBUG
1056 if (lkmdebug & LKMDB_INFO)
1057 printf( "lkmdispatch: %p %d\n", lkmtp, cmd );
1058 #endif
1059
1060 /* If loading, check the LKM is compatible */
1061 if (cmd == LKM_E_LOAD) {
1062 if (_lkm_checkver(lkmtp))
1063 return (EPROGMISMATCH);
1064 }
1065
1066 switch(lkmtp->private.lkm_any->lkm_type) {
1067 case LM_SYSCALL:
1068 error = _lkm_syscall(lkmtp, cmd);
1069 break;
1070
1071 case LM_VFS:
1072 error = _lkm_vfs(lkmtp, cmd);
1073 break;
1074
1075 case LM_DEV:
1076 error = _lkm_dev(lkmtp, cmd);
1077 break;
1078
1079 #ifdef STREAMS
1080 case LM_STRMOD:
1081 {
1082 struct lkm_strmod *args = lkmtp->private.lkm_strmod;
1083 }
1084 break;
1085
1086 #endif /* STREAMS */
1087
1088 case LM_EXEC:
1089 error = _lkm_exec(lkmtp, cmd);
1090 break;
1091
1092 case LM_COMPAT:
1093 error = _lkm_compat(lkmtp, cmd);
1094 break;
1095
1096 case LM_MISC: /* ignore content -- no "misc-specific" procedure */
1097 break;
1098
1099 case LM_DRV:
1100 error = _lkm_drv(lkmtp, cmd);
1101 break;
1102
1103 default:
1104 error = ENXIO; /* unknown type */
1105 break;
1106 }
1107
1108 return (error);
1109 }
1110
1111 /*
1112 * Check LKM version against current kernel.
1113 */
1114 static int
1115 _lkm_checkver(struct lkm_table *lkmtp)
1116 {
1117 struct lkm_any *mod = lkmtp->private.lkm_any;
1118
1119 if (mod->lkm_modver != LKM_VERSION) {
1120 printf("LKM '%s': LKM version mismatch - LKM %d, kernel %d\n",
1121 mod->lkm_name, mod->lkm_modver, LKM_VERSION);
1122 return (1);
1123 }
1124
1125 if (lkmtp->forced) {
1126 printf("LKM '%s': forced load, skipping compatibility checks\n",
1127 mod->lkm_name);
1128 return (0);
1129 }
1130
1131 if (mod->lkm_sysver != __NetBSD_Version__) {
1132 printf("LKM '%s': kernel version mismatch - LKM %d, kernel %d\n",
1133 mod->lkm_name, mod->lkm_sysver, __NetBSD_Version__);
1134 return (2);
1135 }
1136
1137 /*
1138 * Following might eventually be changed to take into account envdep,
1139 * if it's non-NULL.
1140 */
1141 if (strcmp(mod->lkm_envver, _LKM_ENV_VERSION) != 0) {
1142 const char *kenv = _LKM_ENV_VERSION;
1143 const char *envver = mod->lkm_envver;
1144
1145 if (kenv[0] == ',')
1146 kenv++;
1147 if (envver[0] == ',')
1148 envver++;
1149
1150 printf("LKM '%s': environment compile options mismatch - LKM '%s', kernel '%s'\n",
1151 mod->lkm_name, envver, kenv);
1152 return (3);
1153 }
1154
1155 /*
1156 * Basic parameters match, LKM is hopefully compatible.
1157 * Cross fingers and approve.
1158 */
1159 return (0);
1160 }
Cache object: 9393d415278292a57a1f612119912d4a
|