1 /*-
2 * Copyright (c) 2005
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/unistd.h>
39 #include <sys/types.h>
40
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/module.h>
46 #include <sys/conf.h>
47 #include <sys/mbuf.h>
48 #include <sys/bus.h>
49 #include <sys/proc.h>
50 #include <sys/sched.h>
51 #include <sys/smp.h>
52
53 #include <sys/queue.h>
54
55 #ifdef __i386__
56 #include <machine/segments.h>
57 #endif
58
59 #ifdef __amd64__
60 #include <machine/fpu.h>
61 #endif
62
63 #include <dev/usb/usb.h>
64
65 #include <compat/ndis/pe_var.h>
66 #include <compat/ndis/cfg_var.h>
67 #include <compat/ndis/resource_var.h>
68 #include <compat/ndis/ntoskrnl_var.h>
69 #include <compat/ndis/ndis_var.h>
70 #include <compat/ndis/hal_var.h>
71 #include <compat/ndis/usbd_var.h>
72
73 #ifdef __amd64__
74 struct fpu_cc_ent {
75 struct fpu_kern_ctx *ctx;
76 LIST_ENTRY(fpu_cc_ent) entries;
77 };
78 static LIST_HEAD(fpu_ctx_free, fpu_cc_ent) fpu_free_head =
79 LIST_HEAD_INITIALIZER(fpu_free_head);
80 static LIST_HEAD(fpu_ctx_busy, fpu_cc_ent) fpu_busy_head =
81 LIST_HEAD_INITIALIZER(fpu_busy_head);
82 static struct mtx fpu_free_mtx;
83 static struct mtx fpu_busy_mtx;
84 #endif
85
86 static struct mtx drvdb_mtx;
87 static STAILQ_HEAD(drvdb, drvdb_ent) drvdb_head;
88
89 static driver_object fake_pci_driver; /* serves both PCI and cardbus */
90 static driver_object fake_pccard_driver;
91
92 #ifdef __i386__
93 static void x86_oldldt(void *);
94 static void x86_newldt(void *);
95
96 struct tid {
97 void *tid_except_list; /* 0x00 */
98 uint32_t tid_oldfs; /* 0x04 */
99 uint32_t tid_selector; /* 0x08 */
100 struct tid *tid_self; /* 0x0C */
101 int tid_cpu; /* 0x10 */
102 };
103
104 static struct tid *my_tids;
105 #endif /* __i386__ */
106
107 #define DUMMY_REGISTRY_PATH "\\\\some\\bogus\\path"
108
109 int
110 windrv_libinit(void)
111 {
112 STAILQ_INIT(&drvdb_head);
113 mtx_init(&drvdb_mtx, "Windows driver DB lock",
114 "Windows internal lock", MTX_DEF);
115
116 #ifdef __amd64__
117 LIST_INIT(&fpu_free_head);
118 LIST_INIT(&fpu_busy_head);
119 mtx_init(&fpu_free_mtx, "free fpu context list lock", NULL, MTX_DEF);
120 mtx_init(&fpu_busy_mtx, "busy fpu context list lock", NULL, MTX_DEF);
121 #endif
122
123 /*
124 * PCI and pccard devices don't need to use IRPs to
125 * interact with their bus drivers (usually), so our
126 * emulated PCI and pccard drivers are just stubs.
127 * USB devices, on the other hand, do all their I/O
128 * by exchanging IRPs with the USB bus driver, so
129 * for that we need to provide emulator dispatcher
130 * routines, which are in a separate module.
131 */
132
133 windrv_bus_attach(&fake_pci_driver, "PCI Bus");
134 windrv_bus_attach(&fake_pccard_driver, "PCCARD Bus");
135
136 #ifdef __i386__
137
138 /*
139 * In order to properly support SMP machines, we have
140 * to modify the GDT on each CPU, since we never know
141 * on which one we'll end up running.
142 */
143
144 my_tids = ExAllocatePoolWithTag(NonPagedPool,
145 sizeof(struct tid) * mp_ncpus, 0);
146 if (my_tids == NULL)
147 panic("failed to allocate thread info blocks");
148 smp_rendezvous(NULL, x86_newldt, NULL, NULL);
149 #endif
150 return (0);
151 }
152
153 int
154 windrv_libfini(void)
155 {
156 struct drvdb_ent *d;
157 #ifdef __amd64__
158 struct fpu_cc_ent *ent;
159 #endif
160
161 mtx_lock(&drvdb_mtx);
162 while(STAILQ_FIRST(&drvdb_head) != NULL) {
163 d = STAILQ_FIRST(&drvdb_head);
164 STAILQ_REMOVE_HEAD(&drvdb_head, link);
165 free(d, M_DEVBUF);
166 }
167 mtx_unlock(&drvdb_mtx);
168
169 RtlFreeUnicodeString(&fake_pci_driver.dro_drivername);
170 RtlFreeUnicodeString(&fake_pccard_driver.dro_drivername);
171
172 mtx_destroy(&drvdb_mtx);
173
174 #ifdef __i386__
175 smp_rendezvous(NULL, x86_oldldt, NULL, NULL);
176 ExFreePool(my_tids);
177 #endif
178 #ifdef __amd64__
179 while ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
180 LIST_REMOVE(ent, entries);
181 fpu_kern_free_ctx(ent->ctx);
182 free(ent, M_DEVBUF);
183 }
184 mtx_destroy(&fpu_free_mtx);
185
186 ent = LIST_FIRST(&fpu_busy_head);
187 KASSERT(ent == NULL, ("busy fpu context list is not empty"));
188 mtx_destroy(&fpu_busy_mtx);
189 #endif
190 return (0);
191 }
192
193 /*
194 * Given the address of a driver image, find its corresponding
195 * driver_object.
196 */
197
198 driver_object *
199 windrv_lookup(img, name)
200 vm_offset_t img;
201 char *name;
202 {
203 struct drvdb_ent *d;
204 unicode_string us;
205 ansi_string as;
206
207 bzero((char *)&us, sizeof(us));
208
209 /* Damn unicode. */
210
211 if (name != NULL) {
212 RtlInitAnsiString(&as, name);
213 if (RtlAnsiStringToUnicodeString(&us, &as, TRUE))
214 return (NULL);
215 }
216
217 mtx_lock(&drvdb_mtx);
218 STAILQ_FOREACH(d, &drvdb_head, link) {
219 if (d->windrv_object->dro_driverstart == (void *)img ||
220 (bcmp((char *)d->windrv_object->dro_drivername.us_buf,
221 (char *)us.us_buf, us.us_len) == 0 && us.us_len)) {
222 mtx_unlock(&drvdb_mtx);
223 if (name != NULL)
224 ExFreePool(us.us_buf);
225 return (d->windrv_object);
226 }
227 }
228 mtx_unlock(&drvdb_mtx);
229
230 if (name != NULL)
231 RtlFreeUnicodeString(&us);
232
233 return (NULL);
234 }
235
236 struct drvdb_ent *
237 windrv_match(matchfunc, ctx)
238 matchfuncptr matchfunc;
239 void *ctx;
240 {
241 struct drvdb_ent *d;
242 int match;
243
244 mtx_lock(&drvdb_mtx);
245 STAILQ_FOREACH(d, &drvdb_head, link) {
246 if (d->windrv_devlist == NULL)
247 continue;
248 match = matchfunc(d->windrv_bustype, d->windrv_devlist, ctx);
249 if (match == TRUE) {
250 mtx_unlock(&drvdb_mtx);
251 return (d);
252 }
253 }
254 mtx_unlock(&drvdb_mtx);
255
256 return (NULL);
257 }
258
259 /*
260 * Remove a driver_object from our datatabase and destroy it. Throw
261 * away any custom driver extension info that may have been added.
262 */
263
264 int
265 windrv_unload(mod, img, len)
266 module_t mod;
267 vm_offset_t img;
268 int len;
269 {
270 struct drvdb_ent *db, *r = NULL;
271 driver_object *drv;
272 device_object *d, *pdo;
273 device_t dev;
274 list_entry *e;
275
276 drv = windrv_lookup(img, NULL);
277
278 /*
279 * When we unload a driver image, we need to force a
280 * detach of any devices that might be using it. We
281 * need the PDOs of all attached devices for this.
282 * Getting at them is a little hard. We basically
283 * have to walk the device lists of all our bus
284 * drivers.
285 */
286
287 mtx_lock(&drvdb_mtx);
288 STAILQ_FOREACH(db, &drvdb_head, link) {
289 /*
290 * Fake bus drivers have no devlist info.
291 * If this driver has devlist info, it's
292 * a loaded Windows driver and has no PDOs,
293 * so skip it.
294 */
295 if (db->windrv_devlist != NULL)
296 continue;
297 pdo = db->windrv_object->dro_devobj;
298 while (pdo != NULL) {
299 d = pdo->do_attacheddev;
300 if (d->do_drvobj != drv) {
301 pdo = pdo->do_nextdev;
302 continue;
303 }
304 dev = pdo->do_devext;
305 pdo = pdo->do_nextdev;
306 mtx_unlock(&drvdb_mtx);
307 device_detach(dev);
308 mtx_lock(&drvdb_mtx);
309 }
310 }
311
312 STAILQ_FOREACH(db, &drvdb_head, link) {
313 if (db->windrv_object->dro_driverstart == (void *)img) {
314 r = db;
315 STAILQ_REMOVE(&drvdb_head, db, drvdb_ent, link);
316 break;
317 }
318 }
319 mtx_unlock(&drvdb_mtx);
320
321 if (r == NULL)
322 return (ENOENT);
323
324 if (drv == NULL)
325 return (ENOENT);
326
327 /*
328 * Destroy any custom extensions that may have been added.
329 */
330 drv = r->windrv_object;
331 while (!IsListEmpty(&drv->dro_driverext->dre_usrext)) {
332 e = RemoveHeadList(&drv->dro_driverext->dre_usrext);
333 ExFreePool(e);
334 }
335
336 /* Free the driver extension */
337 free(drv->dro_driverext, M_DEVBUF);
338
339 /* Free the driver name */
340 RtlFreeUnicodeString(&drv->dro_drivername);
341
342 /* Free driver object */
343 free(drv, M_DEVBUF);
344
345 /* Free our DB handle */
346 free(r, M_DEVBUF);
347
348 return (0);
349 }
350
351 #define WINDRV_LOADED htonl(0x42534F44)
352
353 #ifdef __amd64__
354 static void
355 patch_user_shared_data_address(vm_offset_t img, size_t len)
356 {
357 unsigned long i, n, max_addr, *addr;
358
359 n = len - sizeof(unsigned long);
360 max_addr = KI_USER_SHARED_DATA + sizeof(kuser_shared_data);
361 for (i = 0; i < n; i++) {
362 addr = (unsigned long *)(img + i);
363 if (*addr >= KI_USER_SHARED_DATA && *addr < max_addr) {
364 *addr -= KI_USER_SHARED_DATA;
365 *addr += (unsigned long)&kuser_shared_data;
366 }
367 }
368 }
369 #endif
370
371 /*
372 * Loader routine for actual Windows driver modules, ultimately
373 * calls the driver's DriverEntry() routine.
374 */
375
376 int
377 windrv_load(mod, img, len, bustype, devlist, regvals)
378 module_t mod;
379 vm_offset_t img;
380 int len;
381 interface_type bustype;
382 void *devlist;
383 ndis_cfg *regvals;
384 {
385 image_import_descriptor imp_desc;
386 image_optional_header opt_hdr;
387 driver_entry entry;
388 struct drvdb_ent *new;
389 struct driver_object *drv;
390 int status;
391 uint32_t *ptr;
392 ansi_string as;
393
394 /*
395 * First step: try to relocate and dynalink the executable
396 * driver image.
397 */
398
399 ptr = (uint32_t *)(img + 8);
400 if (*ptr == WINDRV_LOADED)
401 goto skipreloc;
402
403 /* Perform text relocation */
404 if (pe_relocate(img))
405 return (ENOEXEC);
406
407 /* Dynamically link the NDIS.SYS routines -- required. */
408 if (pe_patch_imports(img, "NDIS", ndis_functbl))
409 return (ENOEXEC);
410
411 /* Dynamically link the HAL.dll routines -- optional. */
412 if (pe_get_import_descriptor(img, &imp_desc, "HAL") == 0) {
413 if (pe_patch_imports(img, "HAL", hal_functbl))
414 return (ENOEXEC);
415 }
416
417 /* Dynamically link ntoskrnl.exe -- optional. */
418 if (pe_get_import_descriptor(img, &imp_desc, "ntoskrnl") == 0) {
419 if (pe_patch_imports(img, "ntoskrnl", ntoskrnl_functbl))
420 return (ENOEXEC);
421 }
422
423 #ifdef __amd64__
424 patch_user_shared_data_address(img, len);
425 #endif
426
427 /* Dynamically link USBD.SYS -- optional */
428 if (pe_get_import_descriptor(img, &imp_desc, "USBD") == 0) {
429 if (pe_patch_imports(img, "USBD", usbd_functbl))
430 return (ENOEXEC);
431 }
432
433 *ptr = WINDRV_LOADED;
434
435 skipreloc:
436
437 /* Next step: find the driver entry point. */
438
439 pe_get_optional_header(img, &opt_hdr);
440 entry = (driver_entry)pe_translate_addr(img, opt_hdr.ioh_entryaddr);
441
442 /* Next step: allocate and store a driver object. */
443
444 new = malloc(sizeof(struct drvdb_ent), M_DEVBUF, M_NOWAIT|M_ZERO);
445 if (new == NULL)
446 return (ENOMEM);
447
448 drv = malloc(sizeof(driver_object), M_DEVBUF, M_NOWAIT|M_ZERO);
449 if (drv == NULL) {
450 free (new, M_DEVBUF);
451 return (ENOMEM);
452 }
453
454 /* Allocate a driver extension structure too. */
455
456 drv->dro_driverext = malloc(sizeof(driver_extension),
457 M_DEVBUF, M_NOWAIT|M_ZERO);
458
459 if (drv->dro_driverext == NULL) {
460 free(new, M_DEVBUF);
461 free(drv, M_DEVBUF);
462 return (ENOMEM);
463 }
464
465 InitializeListHead((&drv->dro_driverext->dre_usrext));
466
467 drv->dro_driverstart = (void *)img;
468 drv->dro_driversize = len;
469
470 RtlInitAnsiString(&as, DUMMY_REGISTRY_PATH);
471 if (RtlAnsiStringToUnicodeString(&drv->dro_drivername, &as, TRUE)) {
472 free(new, M_DEVBUF);
473 free(drv, M_DEVBUF);
474 return (ENOMEM);
475 }
476
477 new->windrv_object = drv;
478 new->windrv_regvals = regvals;
479 new->windrv_devlist = devlist;
480 new->windrv_bustype = bustype;
481
482 /* Now call the DriverEntry() function. */
483
484 status = MSCALL2(entry, drv, &drv->dro_drivername);
485
486 if (status != STATUS_SUCCESS) {
487 RtlFreeUnicodeString(&drv->dro_drivername);
488 free(drv, M_DEVBUF);
489 free(new, M_DEVBUF);
490 return (ENODEV);
491 }
492
493 mtx_lock(&drvdb_mtx);
494 STAILQ_INSERT_HEAD(&drvdb_head, new, link);
495 mtx_unlock(&drvdb_mtx);
496
497 return (0);
498 }
499
500 /*
501 * Make a new Physical Device Object for a device that was
502 * detected/plugged in. For us, the PDO is just a way to
503 * get at the device_t.
504 */
505
506 int
507 windrv_create_pdo(drv, bsddev)
508 driver_object *drv;
509 device_t bsddev;
510 {
511 device_object *dev;
512
513 /*
514 * This is a new physical device object, which technically
515 * is the "top of the stack." Consequently, we don't do
516 * an IoAttachDeviceToDeviceStack() here.
517 */
518
519 mtx_lock(&drvdb_mtx);
520 IoCreateDevice(drv, 0, NULL, FILE_DEVICE_UNKNOWN, 0, FALSE, &dev);
521 mtx_unlock(&drvdb_mtx);
522
523 /* Stash pointer to our BSD device handle. */
524
525 dev->do_devext = bsddev;
526
527 return (STATUS_SUCCESS);
528 }
529
530 void
531 windrv_destroy_pdo(drv, bsddev)
532 driver_object *drv;
533 device_t bsddev;
534 {
535 device_object *pdo;
536
537 pdo = windrv_find_pdo(drv, bsddev);
538
539 /* Remove reference to device_t */
540
541 pdo->do_devext = NULL;
542
543 mtx_lock(&drvdb_mtx);
544 IoDeleteDevice(pdo);
545 mtx_unlock(&drvdb_mtx);
546 }
547
548 /*
549 * Given a device_t, find the corresponding PDO in a driver's
550 * device list.
551 */
552
553 device_object *
554 windrv_find_pdo(drv, bsddev)
555 driver_object *drv;
556 device_t bsddev;
557 {
558 device_object *pdo;
559
560 mtx_lock(&drvdb_mtx);
561 pdo = drv->dro_devobj;
562 while (pdo != NULL) {
563 if (pdo->do_devext == bsddev) {
564 mtx_unlock(&drvdb_mtx);
565 return (pdo);
566 }
567 pdo = pdo->do_nextdev;
568 }
569 mtx_unlock(&drvdb_mtx);
570
571 return (NULL);
572 }
573
574 /*
575 * Add an internally emulated driver to the database. We need this
576 * to set up an emulated bus driver so that it can receive IRPs.
577 */
578
579 int
580 windrv_bus_attach(drv, name)
581 driver_object *drv;
582 char *name;
583 {
584 struct drvdb_ent *new;
585 ansi_string as;
586
587 new = malloc(sizeof(struct drvdb_ent), M_DEVBUF, M_NOWAIT|M_ZERO);
588 if (new == NULL)
589 return (ENOMEM);
590
591 RtlInitAnsiString(&as, name);
592 if (RtlAnsiStringToUnicodeString(&drv->dro_drivername, &as, TRUE))
593 {
594 free(new, M_DEVBUF);
595 return (ENOMEM);
596 }
597
598 /*
599 * Set up a fake image pointer to avoid false matches
600 * in windrv_lookup().
601 */
602 drv->dro_driverstart = (void *)0xFFFFFFFF;
603
604 new->windrv_object = drv;
605 new->windrv_devlist = NULL;
606 new->windrv_regvals = NULL;
607
608 mtx_lock(&drvdb_mtx);
609 STAILQ_INSERT_HEAD(&drvdb_head, new, link);
610 mtx_unlock(&drvdb_mtx);
611
612 return (0);
613 }
614
615 #ifdef __amd64__
616
617 extern void x86_64_wrap(void);
618 extern void x86_64_wrap_call(void);
619 extern void x86_64_wrap_end(void);
620
621 int
622 windrv_wrap(func, wrap, argcnt, ftype)
623 funcptr func;
624 funcptr *wrap;
625 int argcnt;
626 int ftype;
627 {
628 funcptr p;
629 vm_offset_t *calladdr;
630 vm_offset_t wrapstart, wrapend, wrapcall;
631
632 wrapstart = (vm_offset_t)&x86_64_wrap;
633 wrapend = (vm_offset_t)&x86_64_wrap_end;
634 wrapcall = (vm_offset_t)&x86_64_wrap_call;
635
636 /* Allocate a new wrapper instance. */
637
638 p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT);
639 if (p == NULL)
640 return (ENOMEM);
641
642 /* Copy over the code. */
643
644 bcopy((char *)wrapstart, p, (wrapend - wrapstart));
645
646 /* Insert the function address into the new wrapper instance. */
647
648 calladdr = (uint64_t *)((char *)p + (wrapcall - wrapstart) + 2);
649 *calladdr = (vm_offset_t)func;
650
651 *wrap = p;
652
653 return (0);
654 }
655
656 static struct fpu_cc_ent *
657 request_fpu_cc_ent(void)
658 {
659 struct fpu_cc_ent *ent;
660
661 mtx_lock(&fpu_free_mtx);
662 if ((ent = LIST_FIRST(&fpu_free_head)) != NULL) {
663 LIST_REMOVE(ent, entries);
664 mtx_unlock(&fpu_free_mtx);
665 mtx_lock(&fpu_busy_mtx);
666 LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
667 mtx_unlock(&fpu_busy_mtx);
668 return (ent);
669 }
670 mtx_unlock(&fpu_free_mtx);
671
672 if ((ent = malloc(sizeof(struct fpu_cc_ent), M_DEVBUF, M_NOWAIT |
673 M_ZERO)) != NULL) {
674 ent->ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
675 FPU_KERN_NOWAIT);
676 if (ent->ctx != NULL) {
677 mtx_lock(&fpu_busy_mtx);
678 LIST_INSERT_HEAD(&fpu_busy_head, ent, entries);
679 mtx_unlock(&fpu_busy_mtx);
680 } else {
681 free(ent, M_DEVBUF);
682 ent = NULL;
683 }
684 }
685
686 return (ent);
687 }
688
689 static void
690 release_fpu_cc_ent(struct fpu_cc_ent *ent)
691 {
692 mtx_lock(&fpu_busy_mtx);
693 LIST_REMOVE(ent, entries);
694 mtx_unlock(&fpu_busy_mtx);
695 mtx_lock(&fpu_free_mtx);
696 LIST_INSERT_HEAD(&fpu_free_head, ent, entries);
697 mtx_unlock(&fpu_free_mtx);
698 }
699
700 uint64_t
701 _x86_64_call1(void *fn, uint64_t a)
702 {
703 struct fpu_cc_ent *ent;
704 uint64_t ret;
705
706 if ((ent = request_fpu_cc_ent()) == NULL)
707 return (ENOMEM);
708 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
709 ret = x86_64_call1(fn, a);
710 fpu_kern_leave(curthread, ent->ctx);
711 release_fpu_cc_ent(ent);
712
713 return (ret);
714 }
715
716 uint64_t
717 _x86_64_call2(void *fn, uint64_t a, uint64_t b)
718 {
719 struct fpu_cc_ent *ent;
720 uint64_t ret;
721
722 if ((ent = request_fpu_cc_ent()) == NULL)
723 return (ENOMEM);
724 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
725 ret = x86_64_call2(fn, a, b);
726 fpu_kern_leave(curthread, ent->ctx);
727 release_fpu_cc_ent(ent);
728
729 return (ret);
730 }
731
732 uint64_t
733 _x86_64_call3(void *fn, uint64_t a, uint64_t b, uint64_t c)
734 {
735 struct fpu_cc_ent *ent;
736 uint64_t ret;
737
738 if ((ent = request_fpu_cc_ent()) == NULL)
739 return (ENOMEM);
740 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
741 ret = x86_64_call3(fn, a, b, c);
742 fpu_kern_leave(curthread, ent->ctx);
743 release_fpu_cc_ent(ent);
744
745 return (ret);
746 }
747
748 uint64_t
749 _x86_64_call4(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d)
750 {
751 struct fpu_cc_ent *ent;
752 uint64_t ret;
753
754 if ((ent = request_fpu_cc_ent()) == NULL)
755 return (ENOMEM);
756 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
757 ret = x86_64_call4(fn, a, b, c, d);
758 fpu_kern_leave(curthread, ent->ctx);
759 release_fpu_cc_ent(ent);
760
761 return (ret);
762 }
763
764 uint64_t
765 _x86_64_call5(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
766 uint64_t e)
767 {
768 struct fpu_cc_ent *ent;
769 uint64_t ret;
770
771 if ((ent = request_fpu_cc_ent()) == NULL)
772 return (ENOMEM);
773 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
774 ret = x86_64_call5(fn, a, b, c, d, e);
775 fpu_kern_leave(curthread, ent->ctx);
776 release_fpu_cc_ent(ent);
777
778 return (ret);
779 }
780
781 uint64_t
782 _x86_64_call6(void *fn, uint64_t a, uint64_t b, uint64_t c, uint64_t d,
783 uint64_t e, uint64_t f)
784 {
785 struct fpu_cc_ent *ent;
786 uint64_t ret;
787
788 if ((ent = request_fpu_cc_ent()) == NULL)
789 return (ENOMEM);
790 fpu_kern_enter(curthread, ent->ctx, FPU_KERN_NORMAL);
791 ret = x86_64_call6(fn, a, b, c, d, e, f);
792 fpu_kern_leave(curthread, ent->ctx);
793 release_fpu_cc_ent(ent);
794
795 return (ret);
796 }
797 #endif /* __amd64__ */
798
799
800 #ifdef __i386__
801
802 struct x86desc {
803 uint16_t x_lolimit;
804 uint16_t x_base0;
805 uint8_t x_base1;
806 uint8_t x_flags;
807 uint8_t x_hilimit;
808 uint8_t x_base2;
809 };
810
811 struct gdt {
812 uint16_t limit;
813 void *base;
814 } __attribute__((__packed__));
815
816 extern uint16_t x86_getfs(void);
817 extern void x86_setfs(uint16_t);
818 extern void *x86_gettid(void);
819 extern void x86_critical_enter(void);
820 extern void x86_critical_exit(void);
821 extern void x86_getldt(struct gdt *, uint16_t *);
822 extern void x86_setldt(struct gdt *, uint16_t);
823
824 #define SEL_LDT 4 /* local descriptor table */
825 #define SEL_TO_FS(x) (((x) << 3))
826
827 /*
828 * FreeBSD 6.0 and later has a special GDT segment reserved
829 * specifically for us, so if GNDIS_SEL is defined, use that.
830 * If not, use GTGATE_SEL, which is uninitialized and infrequently
831 * used.
832 */
833
834 #ifdef GNDIS_SEL
835 #define FREEBSD_EMPTYSEL GNDIS_SEL
836 #else
837 #define FREEBSD_EMPTYSEL GTGATE_SEL /* slot 7 */
838 #endif
839
840 /*
841 * The meanings of various bits in a descriptor vary a little
842 * depending on whether the descriptor will be used as a
843 * code, data or system descriptor. (And that in turn depends
844 * on which segment register selects the descriptor.)
845 * We're only trying to create a data segment, so the definitions
846 * below are the ones that apply to a data descriptor.
847 */
848
849 #define SEGFLAGLO_PRESENT 0x80 /* segment is present */
850 #define SEGFLAGLO_PRIVLVL 0x60 /* privlevel needed for this seg */
851 #define SEGFLAGLO_CD 0x10 /* 1 = code/data, 0 = system */
852 #define SEGFLAGLO_MBZ 0x08 /* must be zero */
853 #define SEGFLAGLO_EXPANDDOWN 0x04 /* limit expands down */
854 #define SEGFLAGLO_WRITEABLE 0x02 /* segment is writeable */
855 #define SEGGLAGLO_ACCESSED 0x01 /* segment has been accessed */
856
857 #define SEGFLAGHI_GRAN 0x80 /* granularity, 1 = byte, 0 = page */
858 #define SEGFLAGHI_BIG 0x40 /* 1 = 32 bit stack, 0 = 16 bit */
859
860 /*
861 * Context switch from UNIX to Windows. Save the existing value
862 * of %fs for this processor, then change it to point to our
863 * fake TID. Note that it is also possible to pin ourselves
864 * to our current CPU, though I'm not sure this is really
865 * necessary. It depends on whether or not an interrupt might
866 * preempt us while Windows code is running and we wind up
867 * scheduled onto another CPU as a result. So far, it doesn't
868 * seem like this is what happens.
869 */
870
871 void
872 ctxsw_utow(void)
873 {
874 struct tid *t;
875
876 t = &my_tids[curthread->td_oncpu];
877
878 /*
879 * Ugly hack. During system bootstrap (cold == 1), only CPU 0
880 * is running. So if we were loaded at bootstrap, only CPU 0
881 * will have our special GDT entry. This is a problem for SMP
882 * systems, so to deal with this, we check here to make sure
883 * the TID for this processor has been initialized, and if it
884 * hasn't, we need to do it right now or else things will
885 * explode.
886 */
887
888 if (t->tid_self != t)
889 x86_newldt(NULL);
890
891 x86_critical_enter();
892 t->tid_oldfs = x86_getfs();
893 t->tid_cpu = curthread->td_oncpu;
894 sched_pin();
895 x86_setfs(SEL_TO_FS(t->tid_selector));
896 x86_critical_exit();
897
898 /* Now entering Windows land, population: you. */
899 }
900
901 /*
902 * Context switch from Windows back to UNIX. Restore %fs to
903 * its previous value. This always occurs after a call to
904 * ctxsw_utow().
905 */
906
907 void
908 ctxsw_wtou(void)
909 {
910 struct tid *t;
911
912 x86_critical_enter();
913 t = x86_gettid();
914 x86_setfs(t->tid_oldfs);
915 sched_unpin();
916 x86_critical_exit();
917
918 /* Welcome back to UNIX land, we missed you. */
919
920 #ifdef EXTRA_SANITY
921 if (t->tid_cpu != curthread->td_oncpu)
922 panic("ctxsw GOT MOVED TO OTHER CPU!");
923 #endif
924 }
925
926 static int windrv_wrap_stdcall(funcptr, funcptr *, int);
927 static int windrv_wrap_fastcall(funcptr, funcptr *, int);
928 static int windrv_wrap_regparm(funcptr, funcptr *);
929
930 extern void x86_fastcall_wrap(void);
931 extern void x86_fastcall_wrap_call(void);
932 extern void x86_fastcall_wrap_arg(void);
933 extern void x86_fastcall_wrap_end(void);
934
935 static int
936 windrv_wrap_fastcall(func, wrap, argcnt)
937 funcptr func;
938 funcptr *wrap;
939 int8_t argcnt;
940 {
941 funcptr p;
942 vm_offset_t *calladdr;
943 uint8_t *argaddr;
944 vm_offset_t wrapstart, wrapend, wrapcall, wraparg;
945
946 wrapstart = (vm_offset_t)&x86_fastcall_wrap;
947 wrapend = (vm_offset_t)&x86_fastcall_wrap_end;
948 wrapcall = (vm_offset_t)&x86_fastcall_wrap_call;
949 wraparg = (vm_offset_t)&x86_fastcall_wrap_arg;
950
951 /* Allocate a new wrapper instance. */
952
953 p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT);
954 if (p == NULL)
955 return (ENOMEM);
956
957 /* Copy over the code. */
958
959 bcopy((char *)wrapstart, p, (wrapend - wrapstart));
960
961 /* Insert the function address into the new wrapper instance. */
962
963 calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1));
964 *calladdr = (vm_offset_t)func;
965
966 argcnt -= 2;
967 if (argcnt < 1)
968 argcnt = 0;
969
970 argaddr = (u_int8_t *)((char *)p + ((wraparg - wrapstart) + 1));
971 *argaddr = argcnt * sizeof(uint32_t);
972
973 *wrap = p;
974
975 return (0);
976 }
977
978 extern void x86_stdcall_wrap(void);
979 extern void x86_stdcall_wrap_call(void);
980 extern void x86_stdcall_wrap_arg(void);
981 extern void x86_stdcall_wrap_end(void);
982
983 static int
984 windrv_wrap_stdcall(func, wrap, argcnt)
985 funcptr func;
986 funcptr *wrap;
987 uint8_t argcnt;
988 {
989 funcptr p;
990 vm_offset_t *calladdr;
991 uint8_t *argaddr;
992 vm_offset_t wrapstart, wrapend, wrapcall, wraparg;
993
994 wrapstart = (vm_offset_t)&x86_stdcall_wrap;
995 wrapend = (vm_offset_t)&x86_stdcall_wrap_end;
996 wrapcall = (vm_offset_t)&x86_stdcall_wrap_call;
997 wraparg = (vm_offset_t)&x86_stdcall_wrap_arg;
998
999 /* Allocate a new wrapper instance. */
1000
1001 p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT);
1002 if (p == NULL)
1003 return (ENOMEM);
1004
1005 /* Copy over the code. */
1006
1007 bcopy((char *)wrapstart, p, (wrapend - wrapstart));
1008
1009 /* Insert the function address into the new wrapper instance. */
1010
1011 calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1));
1012 *calladdr = (vm_offset_t)func;
1013
1014 argaddr = (u_int8_t *)((char *)p + ((wraparg - wrapstart) + 1));
1015 *argaddr = argcnt * sizeof(uint32_t);
1016
1017 *wrap = p;
1018
1019 return (0);
1020 }
1021
1022 extern void x86_regparm_wrap(void);
1023 extern void x86_regparm_wrap_call(void);
1024 extern void x86_regparm_wrap_end(void);
1025
1026 static int
1027 windrv_wrap_regparm(func, wrap)
1028 funcptr func;
1029 funcptr *wrap;
1030 {
1031 funcptr p;
1032 vm_offset_t *calladdr;
1033 vm_offset_t wrapstart, wrapend, wrapcall;
1034
1035 wrapstart = (vm_offset_t)&x86_regparm_wrap;
1036 wrapend = (vm_offset_t)&x86_regparm_wrap_end;
1037 wrapcall = (vm_offset_t)&x86_regparm_wrap_call;
1038
1039 /* Allocate a new wrapper instance. */
1040
1041 p = malloc((wrapend - wrapstart), M_DEVBUF, M_NOWAIT);
1042 if (p == NULL)
1043 return (ENOMEM);
1044
1045 /* Copy over the code. */
1046
1047 bcopy(x86_regparm_wrap, p, (wrapend - wrapstart));
1048
1049 /* Insert the function address into the new wrapper instance. */
1050
1051 calladdr = (vm_offset_t *)((char *)p + ((wrapcall - wrapstart) + 1));
1052 *calladdr = (vm_offset_t)func;
1053
1054 *wrap = p;
1055
1056 return (0);
1057 }
1058
1059 int
1060 windrv_wrap(func, wrap, argcnt, ftype)
1061 funcptr func;
1062 funcptr *wrap;
1063 int argcnt;
1064 int ftype;
1065 {
1066 switch(ftype) {
1067 case WINDRV_WRAP_FASTCALL:
1068 return (windrv_wrap_fastcall(func, wrap, argcnt));
1069 case WINDRV_WRAP_STDCALL:
1070 return (windrv_wrap_stdcall(func, wrap, argcnt));
1071 case WINDRV_WRAP_REGPARM:
1072 return (windrv_wrap_regparm(func, wrap));
1073 case WINDRV_WRAP_CDECL:
1074 return (windrv_wrap_stdcall(func, wrap, 0));
1075 default:
1076 break;
1077 }
1078
1079 return (EINVAL);
1080 }
1081
1082 static void
1083 x86_oldldt(dummy)
1084 void *dummy;
1085 {
1086 struct x86desc *gdt;
1087 struct gdt gtable;
1088 uint16_t ltable;
1089
1090 mtx_lock_spin(&dt_lock);
1091
1092 /* Grab location of existing GDT. */
1093
1094 x86_getldt(>able, <able);
1095
1096 /* Find the slot we updated. */
1097
1098 gdt = gtable.base;
1099 gdt += FREEBSD_EMPTYSEL;
1100
1101 /* Empty it out. */
1102
1103 bzero((char *)gdt, sizeof(struct x86desc));
1104
1105 /* Restore GDT. */
1106
1107 x86_setldt(>able, ltable);
1108
1109 mtx_unlock_spin(&dt_lock);
1110 }
1111
1112 static void
1113 x86_newldt(dummy)
1114 void *dummy;
1115 {
1116 struct gdt gtable;
1117 uint16_t ltable;
1118 struct x86desc *l;
1119 struct thread *t;
1120
1121 t = curthread;
1122
1123 mtx_lock_spin(&dt_lock);
1124
1125 /* Grab location of existing GDT. */
1126
1127 x86_getldt(>able, <able);
1128
1129 /* Get pointer to the GDT table. */
1130
1131 l = gtable.base;
1132
1133 /* Get pointer to empty slot */
1134
1135 l += FREEBSD_EMPTYSEL;
1136
1137 /* Initialize TID for this CPU. */
1138
1139 my_tids[t->td_oncpu].tid_selector = FREEBSD_EMPTYSEL;
1140 my_tids[t->td_oncpu].tid_self = &my_tids[t->td_oncpu];
1141
1142 /* Set up new GDT entry. */
1143
1144 l->x_lolimit = sizeof(struct tid);
1145 l->x_hilimit = SEGFLAGHI_GRAN|SEGFLAGHI_BIG;
1146 l->x_base0 = (vm_offset_t)(&my_tids[t->td_oncpu]) & 0xFFFF;
1147 l->x_base1 = ((vm_offset_t)(&my_tids[t->td_oncpu]) >> 16) & 0xFF;
1148 l->x_base2 = ((vm_offset_t)(&my_tids[t->td_oncpu]) >> 24) & 0xFF;
1149 l->x_flags = SEGFLAGLO_PRESENT|SEGFLAGLO_CD|SEGFLAGLO_WRITEABLE;
1150
1151 /* Update the GDT. */
1152
1153 x86_setldt(>able, ltable);
1154
1155 mtx_unlock_spin(&dt_lock);
1156
1157 /* Whew. */
1158 }
1159
1160 #endif /* __i386__ */
1161
1162 int
1163 windrv_unwrap(func)
1164 funcptr func;
1165 {
1166 free(func, M_DEVBUF);
1167
1168 return (0);
1169 }
Cache object: 871051cc7d3ebcbffb3228dfa709a200
|