1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 #ifdef __FreeBSD__
35 __FBSDID("$FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.43.2.5 2005/03/31 04:24:36 wpaul Exp $");
36 #endif
37 #ifdef __NetBSD__
38 __KERNEL_RCSID(0, "$NetBSD: subr_ntoskrnl.c,v 1.12 2007/12/05 08:45:30 ad Exp $");
39 #endif
40
41 #ifdef __FreeBSD__
42 #include <sys/ctype.h>
43 #endif
44 #include <sys/unistd.h>
45 #include <sys/param.h>
46 #include <sys/types.h>
47 #include <sys/errno.h>
48 #include <sys/systm.h>
49 #include <sys/malloc.h>
50 #include <sys/lock.h>
51 #ifdef __FreeBSD__
52 #include <sys/mutex.h>
53 #endif
54
55 #include <sys/callout.h>
56 #if __FreeBSD_version > 502113
57 #include <sys/kdb.h>
58 #endif
59 #include <sys/kernel.h>
60 #include <sys/proc.h>
61 #include <sys/kthread.h>
62 #ifdef __FreeBSD__
63 #include <sys/module.h>
64 #else
65 #include <sys/lkm.h>
66 #endif
67
68 #include <sys/atomic.h>
69 #ifdef __FreeBSD__
70 #include <machine/clock.h>
71 #include <machine/bus_memio.h>
72 #include <machine/bus_pio.h>
73 #endif
74 #include <sys/bus.h>
75 #include <machine/stdarg.h>
76
77 #ifdef __FreeBSD__
78 #include <sys/bus.h>
79 #include <sys/rman.h>
80 #endif
81
82 #ifdef __NetBSD__
83 #include <uvm/uvm.h>
84 #include <uvm/uvm_param.h>
85 #include <uvm/uvm_pmap.h>
86 #include <sys/pool.h>
87 #include <sys/reboot.h> /* for AB_VERBOSE */
88 #else
89 #include <vm/vm.h>
90 #include <vm/vm_param.h>
91 #include <vm/pmap.h>
92 #include <vm/uma.h>
93 #endif
94
95 #include <compat/ndis/pe_var.h>
96 #include <compat/ndis/ntoskrnl_var.h>
97 #include <compat/ndis/hal_var.h>
98 #include <compat/ndis/resource_var.h>
99 #include <compat/ndis/ndis_var.h>
100 #ifdef __NetBSD__
101 #include <compat/ndis/nbcompat.h>
102 #endif
103
104 #define __regparm __attribute__((regparm(3)))
105
106 #ifdef __NetBSD__
107 /* Turn on DbgPrint() from Windows Driver*/
108 #define boothowto AB_VERBOSE
109 #endif
110
111 __stdcall static uint8_t RtlEqualUnicodeString(ndis_unicode_string *,
112 ndis_unicode_string *, uint8_t);
113 __stdcall static void RtlCopyUnicodeString(ndis_unicode_string *,
114 ndis_unicode_string *);
115 __stdcall static ndis_status RtlUnicodeStringToAnsiString(ndis_ansi_string *,
116 ndis_unicode_string *, uint8_t);
117 __stdcall static ndis_status RtlAnsiStringToUnicodeString(ndis_unicode_string *,
118 ndis_ansi_string *, uint8_t);
119 __stdcall static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
120 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
121 __stdcall static irp *IoBuildAsynchronousFsdRequest(uint32_t,
122 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
123 __stdcall static irp *IoBuildDeviceIoControlRequest(uint32_t,
124 device_object *, void *, uint32_t, void *, uint32_t,
125 uint8_t, nt_kevent *, io_status_block *);
126 __stdcall static irp *IoAllocateIrp(uint8_t, uint8_t);
127 __stdcall static void IoReuseIrp(irp *, uint32_t);
128 __stdcall static void IoFreeIrp(irp *);
129 __stdcall static void IoInitializeIrp(irp *, uint16_t, uint8_t);
130 __stdcall static irp *IoMakeAssociatedIrp(irp *, uint8_t);
131 __stdcall static uint32_t KeWaitForMultipleObjects(uint32_t,
132 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
133 int64_t *, wait_block *);
134 static void ntoskrnl_wakeup(void *);
135 static void ntoskrnl_timercall(void *);
136 static void ntoskrnl_run_dpc(void *);
137 __stdcall static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
138 __stdcall static uint16_t READ_REGISTER_USHORT(uint16_t *);
139 __stdcall static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
140 __stdcall static uint32_t READ_REGISTER_ULONG(uint32_t *);
141 __stdcall static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
142 __stdcall static uint8_t READ_REGISTER_UCHAR(uint8_t *);
143 __stdcall static int64_t _allmul(int64_t, int64_t);
144 __stdcall static int64_t _alldiv(int64_t, int64_t);
145 __stdcall static int64_t _allrem(int64_t, int64_t);
146 __regparm static int64_t _allshr(int64_t, uint8_t);
147 __regparm static int64_t _allshl(int64_t, uint8_t);
148 __stdcall static uint64_t _aullmul(uint64_t, uint64_t);
149 __stdcall static uint64_t _aulldiv(uint64_t, uint64_t);
150 __stdcall static uint64_t _aullrem(uint64_t, uint64_t);
151 __regparm static uint64_t _aullshr(uint64_t, uint8_t);
152 __regparm static uint64_t _aullshl(uint64_t, uint8_t);
153 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
154 static slist_entry *ntoskrnl_popsl(slist_header *);
155 __stdcall static void ExInitializePagedLookasideList(paged_lookaside_list *,
156 lookaside_alloc_func *, lookaside_free_func *,
157 uint32_t, size_t, uint32_t, uint16_t);
158 __stdcall static void ExDeletePagedLookasideList(paged_lookaside_list *);
159 __stdcall static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
160 lookaside_alloc_func *, lookaside_free_func *,
161 uint32_t, size_t, uint32_t, uint16_t);
162 __stdcall static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
163 __fastcall static slist_entry
164 *InterlockedPushEntrySList(REGARGS2(slist_header *head,
165 slist_entry *entry));
166 __fastcall static slist_entry *InterlockedPopEntrySList(REGARGS1(slist_header
167 *head));
168 __fastcall static slist_entry
169 *ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
170 slist_entry *entry), kspin_lock *lock);
171 __fastcall static slist_entry
172 *ExInterlockedPopEntrySList(REGARGS2(slist_header *head,
173 kspin_lock *lock));
174 __stdcall static uint16_t
175 ExQueryDepthSList(slist_header *);
176 __fastcall static uint32_t
177 InterlockedIncrement(REGARGS1(volatile uint32_t *addend));
178 __fastcall static uint32_t
179 InterlockedDecrement(REGARGS1(volatile uint32_t *addend));
180 __fastcall static void
181 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t));
182 __stdcall static uint32_t MmSizeOfMdl(void *, size_t);
183 __stdcall static void MmBuildMdlForNonPagedPool(mdl *);
184 __stdcall static void *MmMapLockedPages(mdl *, uint8_t);
185 __stdcall static void *MmMapLockedPagesSpecifyCache(mdl *,
186 uint8_t, uint32_t, void *, uint32_t, uint32_t);
187 __stdcall static void MmUnmapLockedPages(void *, mdl *);
188 __stdcall static size_t RtlCompareMemory(const void *, const void *, size_t);
189 __stdcall static void RtlInitAnsiString(ndis_ansi_string *, char *);
190 __stdcall static void RtlInitUnicodeString(ndis_unicode_string *,
191 uint16_t *);
192 __stdcall static void RtlFreeUnicodeString(ndis_unicode_string *);
193 __stdcall static void RtlFreeAnsiString(ndis_ansi_string *);
194 __stdcall static ndis_status RtlUnicodeStringToInteger(ndis_unicode_string *,
195 uint32_t, uint32_t *);
196 static int atoi (const char *);
197 static long atol (const char *);
198 static int rand(void);
199 static void srand(unsigned int);
200 static void ntoskrnl_time(uint64_t *);
201 __stdcall static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
202 static void ntoskrnl_thrfunc(void *);
203 __stdcall static ndis_status PsCreateSystemThread(ndis_handle *,
204 uint32_t, void *, ndis_handle, void *, void *, void *);
205 __stdcall static ndis_status PsTerminateSystemThread(ndis_status);
206 __stdcall static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
207 uint32_t, void *, uint32_t *);
208 __stdcall static void KeInitializeMutex(kmutant *, uint32_t);
209 __stdcall static uint32_t KeReleaseMutex(kmutant *, uint8_t);
210 __stdcall static uint32_t KeReadStateMutex(kmutant *);
211 __stdcall static ndis_status ObReferenceObjectByHandle(ndis_handle,
212 uint32_t, void *, uint8_t, void **, void **);
213 __fastcall static void ObfDereferenceObject(REGARGS1(void *object));
214 __stdcall static uint32_t ZwClose(ndis_handle);
215 static void *ntoskrnl_memset(void *, int, size_t);
216 static funcptr ntoskrnl_findwrap(funcptr);
217 static uint32_t DbgPrint(char *, ...);
218 __stdcall static void DbgBreakPoint(void);
219 __stdcall static void dummy(void);
220
221 #ifdef __FreeBSD__
222 static struct mtx ntoskrnl_dispatchlock;
223 #else /* __NetBSD__ */
224 static struct simplelock ntoskrnl_dispatchlock;
225 #define DISPATCH_LOCK() do {s = splnet(); simple_lock(&ntoskrnl_dispatchlock);} while(0)
226 #define DISPATCH_UNLOCK() do {simple_unlock(&ntoskrnl_dispatchlock); splx(s);} while(0)
227 #endif
228
229 static kspin_lock ntoskrnl_global;
230 static kspin_lock ntoskrnl_cancellock;
231 static int ntoskrnl_kth = 0;
232 static struct nt_objref_head ntoskrnl_reflist;
233 #ifdef __FreeBSD__
234 static uma_zone_t mdl_zone;
235 #else
236 static struct pool mdl_pool;
237 #endif
238
239 int
240 ntoskrnl_libinit()
241 {
242 image_patch_table *patch;
243 #ifdef __FreeBSD__
244 mtx_init(&ntoskrnl_dispatchlock,
245 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF);
246 #else /* __NetBSD__ */
247 simple_lock_init(&ntoskrnl_dispatchlock);
248 #endif
249 KeInitializeSpinLock(&ntoskrnl_global);
250 KeInitializeSpinLock(&ntoskrnl_cancellock);
251 TAILQ_INIT(&ntoskrnl_reflist);
252
253 patch = ntoskrnl_functbl;
254 while (patch->ipt_func != NULL) {
255 windrv_wrap((funcptr)patch->ipt_func,
256 (funcptr *)&patch->ipt_wrap);
257 patch++;
258 }
259
260 /*
261 * MDLs are supposed to be variable size (they describe
262 * buffers containing some number of pages, but we don't
263 * know ahead of time how many pages that will be). But
264 * always allocating them off the heap is very slow. As
265 * a compromize, we create an MDL UMA zone big enough to
266 * handle any buffer requiring up to 16 pages, and we
267 * use those for any MDLs for buffers of 16 pages or less
268 * in size. For buffers larger than that (which we assume
269 * will be few and far between, we allocate the MDLs off
270 * the heap.
271 */
272
273 #ifdef __FreeBSD__
274 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
275 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
276 #else
277 pool_init(&mdl_pool, MDL_ZONE_SIZE, 0, 0, 0, "winmdl", NULL,
278 IPL_VM);
279 #endif
280
281 return(0);
282 }
283
284 int
285 ntoskrnl_libfini()
286 {
287 image_patch_table *patch;
288
289 patch = ntoskrnl_functbl;
290 while (patch->ipt_func != NULL) {
291 windrv_unwrap(patch->ipt_wrap);
292 patch++;
293 }
294
295 #ifdef __FreeBSD__
296 uma_zdestroy(mdl_zone);
297 mtx_destroy(&ntoskrnl_dispatchlock);
298 #else
299 pool_destroy(&mdl_pool);
300 /* XXX destroy lock */
301 #endif
302
303 return(0);
304 }
305
306 /*
307 * We need to be able to reference this externally from the wrapper;
308 * GCC only generates a local implementation of memset.
309 */
310 static void *
311 ntoskrnl_memset(buf, ch, size)
312 void *buf;
313 int ch;
314 size_t size;
315 {
316 return(memset(buf, ch, size));
317 }
318
319 __stdcall static uint8_t
320 RtlEqualUnicodeString(str1, str2, caseinsensitive)
321 ndis_unicode_string *str1;
322 ndis_unicode_string *str2;
323 uint8_t caseinsensitive;
324 {
325 int i;
326
327 if (str1->us_len != str2->us_len)
328 return(FALSE);
329
330 for (i = 0; i < str1->us_len; i++) {
331 if (caseinsensitive == TRUE) {
332 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
333 toupper((char)(str2->us_buf[i] & 0xFF)))
334 return(FALSE);
335 } else {
336 if (str1->us_buf[i] != str2->us_buf[i])
337 return(FALSE);
338 }
339 }
340
341 return(TRUE);
342 }
343
344 __stdcall static void
345 RtlCopyUnicodeString(dest, src)
346 ndis_unicode_string *dest;
347 ndis_unicode_string *src;
348 {
349
350 if (dest->us_maxlen >= src->us_len)
351 dest->us_len = src->us_len;
352 else
353 dest->us_len = dest->us_maxlen;
354 memcpy(dest->us_buf, src->us_buf, dest->us_len);
355 return;
356 }
357
358 __stdcall static ndis_status
359 RtlUnicodeStringToAnsiString(dest, src, allocate)
360 ndis_ansi_string *dest;
361 ndis_unicode_string *src;
362 uint8_t allocate;
363 {
364 char *astr = NULL;
365
366 if (dest == NULL || src == NULL)
367 return(NDIS_STATUS_FAILURE);
368
369 if (allocate == TRUE) {
370 if (ndis_unicode_to_ascii(src->us_buf, src->us_len, &astr))
371 return(NDIS_STATUS_FAILURE);
372 dest->nas_buf = astr;
373 dest->nas_len = dest->nas_maxlen = strlen(astr);
374 } else {
375 dest->nas_len = src->us_len / 2; /* XXX */
376 if (dest->nas_maxlen < dest->nas_len)
377 dest->nas_len = dest->nas_maxlen;
378 ndis_unicode_to_ascii(src->us_buf, dest->nas_len * 2,
379 &dest->nas_buf);
380 }
381 return (NDIS_STATUS_SUCCESS);
382 }
383
384 __stdcall static ndis_status
385 RtlAnsiStringToUnicodeString(dest, src, allocate)
386 ndis_unicode_string *dest;
387 ndis_ansi_string *src;
388 uint8_t allocate;
389 {
390 uint16_t *ustr = NULL;
391
392 if (dest == NULL || src == NULL)
393 return(NDIS_STATUS_FAILURE);
394
395 if (allocate == TRUE) {
396 if (ndis_ascii_to_unicode(src->nas_buf, &ustr))
397 return(NDIS_STATUS_FAILURE);
398 dest->us_buf = ustr;
399 dest->us_len = dest->us_maxlen = strlen(src->nas_buf) * 2;
400 } else {
401 dest->us_len = src->nas_len * 2; /* XXX */
402 if (dest->us_maxlen < dest->us_len)
403 dest->us_len = dest->us_maxlen;
404 ndis_ascii_to_unicode(src->nas_buf, &dest->us_buf);
405 }
406 return (NDIS_STATUS_SUCCESS);
407 }
408
409 __stdcall void *
410 ExAllocatePoolWithTag(
411 uint32_t pooltype,
412 size_t len,
413 uint32_t tag)
414 {
415 void *buf;
416
417 buf = malloc(len, M_DEVBUF, M_NOWAIT);
418 if (buf == NULL)
419 return(NULL);
420 return(buf);
421 }
422
423 __stdcall void
424 ExFreePool(buf)
425 void *buf;
426 {
427 free(buf, M_DEVBUF);
428 return;
429 }
430
431 __stdcall uint32_t
432 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
433 driver_object *drv;
434 void *clid;
435 uint32_t extlen;
436 void **ext;
437 {
438 custom_extension *ce;
439
440 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
441 + extlen, 0);
442
443 if (ce == NULL)
444 return(STATUS_INSUFFICIENT_RESOURCES);
445
446 ce->ce_clid = clid;
447 INSERT_LIST_TAIL((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
448
449 *ext = (void *)(ce + 1);
450
451 return(STATUS_SUCCESS);
452 }
453
454 __stdcall void *
455 IoGetDriverObjectExtension(drv, clid)
456 driver_object *drv;
457 void *clid;
458 {
459 list_entry *e;
460 custom_extension *ce;
461
462 printf("in IoGetDriverObjectExtension\n");
463
464 e = drv->dro_driverext->dre_usrext.nle_flink;
465 while (e != &drv->dro_driverext->dre_usrext) {
466 ce = (custom_extension *)e;
467 if (ce->ce_clid == clid)
468 printf("found\n");
469 return((void *)(ce + 1));
470 e = e->nle_flink;
471 }
472 printf("not found\n");
473 return(NULL);
474 }
475
476
477 __stdcall uint32_t
478 IoCreateDevice(
479 driver_object *drv,
480 uint32_t devextlen,
481 unicode_string *devname,
482 uint32_t devtype,
483 uint32_t devchars,
484 uint8_t exclusive,
485 device_object **newdev)
486 {
487 device_object *dev;
488
489 #ifdef NDIS_LKM
490 printf("In IoCreateDevice: drv = %x, devextlen = %x\n", drv, devextlen);
491 #endif
492
493 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
494 #ifdef NDIS_LKM
495 printf("dev = %x\n", dev);
496 #endif
497 if (dev == NULL)
498 return(STATUS_INSUFFICIENT_RESOURCES);
499
500 dev->do_type = devtype;
501 dev->do_drvobj = drv;
502 dev->do_currirp = NULL;
503 dev->do_flags = 0;
504
505 if (devextlen) {
506 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
507 devextlen, 0);
508
509 if (dev->do_devext == NULL) {
510 ExFreePool(dev);
511 return(STATUS_INSUFFICIENT_RESOURCES);
512 }
513
514 bzero(dev->do_devext, devextlen);
515 } else
516 dev->do_devext = NULL;
517
518 dev->do_size = sizeof(device_object) + devextlen;
519 dev->do_refcnt = 1;
520 dev->do_attacheddev = NULL;
521 dev->do_nextdev = NULL;
522 dev->do_devtype = devtype;
523 dev->do_stacksize = 1;
524 dev->do_alignreq = 1;
525 dev->do_characteristics = devchars;
526 dev->do_iotimer = NULL;
527 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
528
529 /*
530 * Vpd is used for disk/tape devices,
531 * but we don't support those. (Yet.)
532 */
533 dev->do_vpb = NULL;
534
535 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
536 sizeof(devobj_extension), 0);
537
538 if (dev->do_devobj_ext == NULL) {
539 if (dev->do_devext != NULL)
540 ExFreePool(dev->do_devext);
541 ExFreePool(dev);
542 return(STATUS_INSUFFICIENT_RESOURCES);
543 }
544
545 dev->do_devobj_ext->dve_type = 0;
546 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
547 dev->do_devobj_ext->dve_devobj = dev;
548
549 /*
550 * Attach this device to the driver object's list
551 * of devices. Note: this is not the same as attaching
552 * the device to the device stack. The driver's AddDevice
553 * routine must explicitly call IoAddDeviceToDeviceStack()
554 * to do that.
555 */
556
557 if (drv->dro_devobj == NULL) {
558 drv->dro_devobj = dev;
559 dev->do_nextdev = NULL;
560 } else {
561 dev->do_nextdev = drv->dro_devobj;
562 drv->dro_devobj = dev;
563 }
564
565 *newdev = dev;
566
567 return(STATUS_SUCCESS);
568 }
569
570 __stdcall void
571 IoDeleteDevice(dev)
572 device_object *dev;
573 {
574 device_object *prev;
575
576 if (dev == NULL)
577 return;
578
579 if (dev->do_devobj_ext != NULL)
580 ExFreePool(dev->do_devobj_ext);
581
582 if (dev->do_devext != NULL)
583 ExFreePool(dev->do_devext);
584
585 /* Unlink the device from the driver's device list. */
586
587 prev = dev->do_drvobj->dro_devobj;
588 if (prev == dev)
589 dev->do_drvobj->dro_devobj = dev->do_nextdev;
590 else {
591 while (prev->do_nextdev != dev)
592 prev = prev->do_nextdev;
593 prev->do_nextdev = dev->do_nextdev;
594 }
595
596 ExFreePool(dev);
597
598 return;
599 }
600
601 __stdcall device_object *
602 IoGetAttachedDevice(dev)
603 device_object *dev;
604 {
605 device_object *d;
606
607 if (dev == NULL)
608 return (NULL);
609
610 d = dev;
611
612 while (d->do_attacheddev != NULL)
613 d = d->do_attacheddev;
614
615 return (d);
616 }
617
618 __stdcall static irp *
619 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
620 uint32_t func;
621 device_object *dobj;
622 void *buf;
623 uint32_t len;
624 uint64_t *off;
625 nt_kevent *event;
626 io_status_block *status;
627 {
628 irp *ip;
629
630 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
631 if (ip == NULL)
632 return(NULL);
633 ip->irp_usrevent = event;
634
635 return(ip);
636 }
637
638 __stdcall static irp *
639 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
640 uint32_t func;
641 device_object *dobj;
642 void *buf;
643 uint32_t len;
644 uint64_t *off;
645 io_status_block *status;
646 {
647 irp *ip;
648 io_stack_location *sl;
649
650 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
651 if (ip == NULL)
652 return(NULL);
653
654 ip->irp_usriostat = status;
655 ip->irp_tail.irp_overlay.irp_thread = NULL;
656
657 sl = IoGetNextIrpStackLocation(ip);
658 sl->isl_major = func;
659 sl->isl_minor = 0;
660 sl->isl_flags = 0;
661 sl->isl_ctl = 0;
662 sl->isl_devobj = dobj;
663 sl->isl_fileobj = NULL;
664 sl->isl_completionfunc = NULL;
665
666 ip->irp_userbuf = buf;
667
668 if (dobj->do_flags & DO_BUFFERED_IO) {
669 ip->irp_assoc.irp_sysbuf =
670 ExAllocatePoolWithTag(NonPagedPool, len, 0);
671 if (ip->irp_assoc.irp_sysbuf == NULL) {
672 IoFreeIrp(ip);
673 return(NULL);
674 }
675 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
676 }
677
678 if (dobj->do_flags & DO_DIRECT_IO) {
679 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
680 if (ip->irp_mdl == NULL) {
681 if (ip->irp_assoc.irp_sysbuf != NULL)
682 ExFreePool(ip->irp_assoc.irp_sysbuf);
683 IoFreeIrp(ip);
684 return(NULL);
685 }
686 ip->irp_userbuf = NULL;
687 ip->irp_assoc.irp_sysbuf = NULL;
688 }
689
690 if (func == IRP_MJ_READ) {
691 sl->isl_parameters.isl_read.isl_len = len;
692 if (off != NULL)
693 sl->isl_parameters.isl_read.isl_byteoff = *off;
694 else
695 sl->isl_parameters.isl_read.isl_byteoff = 0;
696 }
697
698 if (func == IRP_MJ_WRITE) {
699 sl->isl_parameters.isl_write.isl_len = len;
700 if (off != NULL)
701 sl->isl_parameters.isl_write.isl_byteoff = *off;
702 else
703 sl->isl_parameters.isl_write.isl_byteoff = 0;
704 }
705
706 return(ip);
707 }
708
709 __stdcall static irp *
710 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
711 isinternal, event, status)
712 uint32_t iocode;
713 device_object *dobj;
714 void *ibuf;
715 uint32_t ilen;
716 void *obuf;
717 uint32_t olen;
718 uint8_t isinternal;
719 nt_kevent *event;
720 io_status_block *status;
721 {
722 irp *ip;
723 io_stack_location *sl;
724 uint32_t buflen;
725
726 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
727 if (ip == NULL)
728 return(NULL);
729 ip->irp_usrevent = event;
730 ip->irp_usriostat = status;
731 ip->irp_tail.irp_overlay.irp_thread = NULL;
732
733 sl = IoGetNextIrpStackLocation(ip);
734 sl->isl_major = isinternal == TRUE ?
735 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
736 sl->isl_minor = 0;
737 sl->isl_flags = 0;
738 sl->isl_ctl = 0;
739 sl->isl_devobj = dobj;
740 sl->isl_fileobj = NULL;
741 sl->isl_completionfunc = NULL;
742 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
743 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
744 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
745
746 switch(IO_METHOD(iocode)) {
747 case METHOD_BUFFERED:
748 if (ilen > olen)
749 buflen = ilen;
750 else
751 buflen = olen;
752 if (buflen) {
753 ip->irp_assoc.irp_sysbuf =
754 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
755 if (ip->irp_assoc.irp_sysbuf == NULL) {
756 IoFreeIrp(ip);
757 return(NULL);
758 }
759 }
760 if (ilen && ibuf != NULL) {
761 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
762 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
763 buflen - ilen);
764 } else
765 bzero(ip->irp_assoc.irp_sysbuf, ilen);
766 ip->irp_userbuf = obuf;
767 break;
768 case METHOD_IN_DIRECT:
769 case METHOD_OUT_DIRECT:
770 if (ilen && ibuf != NULL) {
771 ip->irp_assoc.irp_sysbuf =
772 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
773 if (ip->irp_assoc.irp_sysbuf == NULL) {
774 IoFreeIrp(ip);
775 return(NULL);
776 }
777 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
778 }
779 if (olen && obuf != NULL) {
780 ip->irp_mdl = IoAllocateMdl(obuf, olen,
781 FALSE, FALSE, ip);
782 /*
783 * Normally we would MmProbeAndLockPages()
784 * here, but we don't have to in our
785 * imlementation.
786 */
787 }
788 break;
789 case METHOD_NEITHER:
790 ip->irp_userbuf = obuf;
791 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
792 break;
793 default:
794 break;
795 }
796
797 /*
798 * Ideally, we should associate this IRP with the calling
799 * thread here.
800 */
801
802 return (ip);
803 }
804
805 __stdcall static irp *
806 IoAllocateIrp(
807 uint8_t stsize,
808 uint8_t chargequota)
809 {
810 irp *i;
811
812 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
813 if (i == NULL)
814 return (NULL);
815
816 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
817
818 return (i);
819 }
820
821 __stdcall static irp *
822 IoMakeAssociatedIrp(ip, stsize)
823 irp *ip;
824 uint8_t stsize;
825 {
826 irp *associrp;
827 #ifdef __NetBSD__
828 int s;
829 #endif
830
831 associrp = IoAllocateIrp(stsize, FALSE);
832 if (associrp == NULL)
833 return(NULL);
834
835 #ifdef __NetBSD__
836 DISPATCH_LOCK();
837 #else
838 mtx_lock(&ntoskrnl_dispatchlock);
839 #endif
840
841 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
842 associrp->irp_tail.irp_overlay.irp_thread =
843 ip->irp_tail.irp_overlay.irp_thread;
844 associrp->irp_assoc.irp_master = ip;
845
846 #ifdef __FreeBSD__
847 mtx_unlock(&ntoskrnl_dispatchlock);
848 #else /* __NetBSD__ */
849 DISPATCH_UNLOCK();
850 #endif
851
852 return(associrp);
853 }
854
855 __stdcall static void
856 IoFreeIrp(ip)
857 irp *ip;
858 {
859 ExFreePool(ip);
860 return;
861 }
862
863 __stdcall static void
864 IoInitializeIrp(io, psize, ssize)
865 irp *io;
866 uint16_t psize;
867 uint8_t ssize;
868 {
869 bzero((char *)io, IoSizeOfIrp(ssize));
870 io->irp_size = psize;
871 io->irp_stackcnt = ssize;
872 io->irp_currentstackloc = ssize;
873 INIT_LIST_HEAD(&io->irp_thlist);
874 io->irp_tail.irp_overlay.irp_csl =
875 (io_stack_location *)(io + 1) + ssize;
876
877 return;
878 }
879
880 __stdcall static void
881 IoReuseIrp(ip, status)
882 irp *ip;
883 uint32_t status;
884 {
885 uint8_t allocflags;
886
887 allocflags = ip->irp_allocflags;
888 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
889 ip->irp_iostat.isb_status = status;
890 ip->irp_allocflags = allocflags;
891
892 return;
893 }
894
895 __stdcall void
896 IoAcquireCancelSpinLock(irql)
897 uint8_t *irql;
898 {
899 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
900 return;
901 }
902
903 __stdcall void
904 IoReleaseCancelSpinLock(irql)
905 uint8_t irql;
906 {
907 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
908 return;
909 }
910
911 __stdcall uint8_t
912 IoCancelIrp(irp *ip)
913 {
914 cancel_func cfunc;
915
916 IoAcquireCancelSpinLock(&ip->irp_cancelirql);
917 cfunc = IoSetCancelRoutine(ip, NULL);
918 ip->irp_cancel = TRUE;
919 if (ip->irp_cancelfunc == NULL) {
920 IoReleaseCancelSpinLock(ip->irp_cancelirql);
921 return(FALSE);
922 }
923 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
924 return(TRUE);
925 }
926
927 __fastcall uint32_t
928 IofCallDriver(REGARGS2(device_object *dobj, irp *ip))
929 {
930 driver_object *drvobj;
931 io_stack_location *sl;
932 uint32_t status;
933 driver_dispatch disp;
934
935 drvobj = dobj->do_drvobj;
936
937 if (ip->irp_currentstackloc <= 0)
938 panic("IoCallDriver(): out of stack locations");
939
940 IoSetNextIrpStackLocation(ip);
941 sl = IoGetCurrentIrpStackLocation(ip);
942
943 sl->isl_devobj = dobj;
944
945 disp = drvobj->dro_dispatch[sl->isl_major];
946 status = MSCALL2(disp, dobj, ip);
947
948 return(status);
949 }
950
951 __fastcall void
952 IofCompleteRequest(REGARGS2(irp *ip, uint8_t prioboost))
953 {
954 uint32_t i;
955 uint32_t status;
956 device_object *dobj;
957 io_stack_location *sl;
958 completion_func cf;
959
960 ip->irp_pendingreturned =
961 IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
962 sl = (io_stack_location *)(ip + 1);
963
964 for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
965 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
966 IoSkipCurrentIrpStackLocation(ip);
967 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
968 } else
969 dobj = NULL;
970
971 if (sl[i].isl_completionfunc != NULL &&
972 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
973 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
974 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
975 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
976 (ip->irp_cancel == TRUE &&
977 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
978 cf = sl->isl_completionfunc;
979 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
980 if (status == STATUS_MORE_PROCESSING_REQUIRED)
981 return;
982 }
983
984 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
985 SL_PENDING_RETURNED)
986 ip->irp_pendingreturned = TRUE;
987 }
988
989 /* Handle any associated IRPs. */
990
991 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
992 uint32_t masterirpcnt;
993 irp *masterirp;
994 mdl *m;
995
996 masterirp = ip->irp_assoc.irp_master;
997 masterirpcnt = FASTCALL1(InterlockedDecrement,
998 &masterirp->irp_assoc.irp_irpcnt);
999
1000 while ((m = ip->irp_mdl) != NULL) {
1001 ip->irp_mdl = m->mdl_next;
1002 IoFreeMdl(m);
1003 }
1004 IoFreeIrp(ip);
1005 if (masterirpcnt == 0)
1006 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1007 return;
1008 }
1009
1010 /* With any luck, these conditions will never arise. */
1011
1012 if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1013 if (ip->irp_usriostat != NULL)
1014 *ip->irp_usriostat = ip->irp_iostat;
1015 if (ip->irp_usrevent != NULL)
1016 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1017 if (ip->irp_flags & IRP_PAGING_IO) {
1018 if (ip->irp_mdl != NULL)
1019 IoFreeMdl(ip->irp_mdl);
1020 IoFreeIrp(ip);
1021 }
1022 }
1023
1024 return;
1025 }
1026
1027 __stdcall device_object *
1028 IoAttachDeviceToDeviceStack(src, dst)
1029 device_object *src;
1030 device_object *dst;
1031 {
1032 device_object *attached;
1033 #ifdef __NetBSD__
1034 int s;
1035 #endif
1036
1037 #ifdef __NetBSD__
1038 DISPATCH_LOCK();
1039 #else
1040 mtx_lock(&ntoskrnl_dispatchlock);
1041 #endif
1042
1043 attached = IoGetAttachedDevice(dst);
1044 attached->do_attacheddev = src;
1045 src->do_attacheddev = NULL;
1046 src->do_stacksize = attached->do_stacksize + 1;
1047
1048 #ifdef __FreeBSD__
1049 mtx_unlock(&ntoskrnl_dispatchlock);
1050 #else /* __NetBSD__ */
1051 DISPATCH_UNLOCK();
1052 #endif
1053
1054 return(attached);
1055 }
1056
1057 __stdcall void
1058 IoDetachDevice(topdev)
1059 device_object *topdev;
1060 {
1061 device_object *tail;
1062 #ifdef __NetBSD__
1063 int s;
1064 #endif
1065
1066 #ifdef __NetBSD__
1067 DISPATCH_LOCK();
1068 #else
1069 mtx_lock(&ntoskrnl_dispatchlock);
1070 #endif
1071
1072 /* First, break the chain. */
1073 tail = topdev->do_attacheddev;
1074 if (tail == NULL) {
1075 #ifdef __FreeBSD__
1076 mtx_unlock(&ntoskrnl_dispatchlock);
1077 #else /* __NetBSD__ */
1078 DISPATCH_UNLOCK();
1079 #endif
1080 return;
1081 }
1082 topdev->do_attacheddev = tail->do_attacheddev;
1083 topdev->do_refcnt--;
1084
1085 /* Now reduce the stacksize count for the tail objects. */
1086
1087 tail = topdev->do_attacheddev;
1088 while (tail != NULL) {
1089 tail->do_stacksize--;
1090 tail = tail->do_attacheddev;
1091 }
1092
1093 #ifdef __FreeBSD__
1094 mtx_unlock(&ntoskrnl_dispatchlock);
1095 #else /* __NetBSD__ */
1096 DISPATCH_UNLOCK();
1097 #endif
1098
1099 return;
1100 }
1101
1102 /* Always called with dispatcher lock held. */
1103 static void
1104 ntoskrnl_wakeup(arg)
1105 void *arg;
1106 {
1107 nt_dispatch_header *obj;
1108 wait_block *w;
1109 list_entry *e;
1110 #ifdef __FreeBSD__
1111 struct thread *td;
1112 #endif
1113
1114 obj = arg;
1115
1116 obj->dh_sigstate = TRUE;
1117 e = obj->dh_waitlisthead.nle_flink;
1118 while (e != &obj->dh_waitlisthead) {
1119 w = (wait_block *)e;
1120 /* TODO: is this correct? */
1121 #ifdef __FreeBSD__
1122 td = w->wb_kthread;
1123 ndis_thresume(td->td_proc);
1124 #else
1125 ndis_thresume(curproc);
1126 #endif
1127 /*
1128 * For synchronization objects, only wake up
1129 * the first waiter.
1130 */
1131 if (obj->dh_type == EVENT_TYPE_SYNC)
1132 break;
1133 e = e->nle_flink;
1134 }
1135
1136 return;
1137 }
1138
1139 static void
1140 ntoskrnl_time(tval)
1141 uint64_t *tval;
1142 {
1143 struct timespec ts;
1144 #ifdef __NetBSD__
1145 struct timeval tv;
1146 microtime(&tv);
1147 TIMEVAL_TO_TIMESPEC(&tv,&ts);
1148 #else
1149 nanotime(&ts);
1150 #endif
1151
1152 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1153 (uint64_t)11644473600ULL;
1154
1155 return;
1156 }
1157
1158 /*
1159 * KeWaitForSingleObject() is a tricky beast, because it can be used
1160 * with several different object types: semaphores, timers, events,
1161 * mutexes and threads. Semaphores don't appear very often, but the
1162 * other object types are quite common. KeWaitForSingleObject() is
1163 * what's normally used to acquire a mutex, and it can be used to
1164 * wait for a thread termination.
1165 *
1166 * The Windows NDIS API is implemented in terms of Windows kernel
1167 * primitives, and some of the object manipulation is duplicated in
1168 * NDIS. For example, NDIS has timers and events, which are actually
1169 * Windows kevents and ktimers. Now, you're supposed to only use the
1170 * NDIS variants of these objects within the confines of the NDIS API,
1171 * but there are some naughty developers out there who will use
1172 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1173 * have to support that as well. Conseqently, our NDIS timer and event
1174 * code has to be closely tied into our ntoskrnl timer and event code,
1175 * just as it is in Windows.
1176 *
1177 * KeWaitForSingleObject() may do different things for different kinds
1178 * of objects:
1179 *
1180 * - For events, we check if the event has been signalled. If the
1181 * event is already in the signalled state, we just return immediately,
1182 * otherwise we wait for it to be set to the signalled state by someone
1183 * else calling KeSetEvent(). Events can be either synchronization or
1184 * notification events.
1185 *
1186 * - For timers, if the timer has already fired and the timer is in
1187 * the signalled state, we just return, otherwise we wait on the
1188 * timer. Unlike an event, timers get signalled automatically when
1189 * they expire rather than someone having to trip them manually.
1190 * Timers initialized with KeInitializeTimer() are always notification
1191 * events: KeInitializeTimerEx() lets you initialize a timer as
1192 * either a notification or synchronization event.
1193 *
1194 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1195 * on the mutex until it's available and then grab it. When a mutex is
1196 * released, it enters the signaled state, which wakes up one of the
1197 * threads waiting to acquire it. Mutexes are always synchronization
1198 * events.
1199 *
1200 * - For threads, the only thing we do is wait until the thread object
1201 * enters a signalled state, which occurs when the thread terminates.
1202 * Threads are always notification events.
1203 *
1204 * A notification event wakes up all threads waiting on an object. A
1205 * synchronization event wakes up just one. Also, a synchronization event
1206 * is auto-clearing, which means we automatically set the event back to
1207 * the non-signalled state once the wakeup is done.
1208 */
1209
1210 __stdcall uint32_t
1211 KeWaitForSingleObject(
1212 nt_dispatch_header *obj,
1213 uint32_t reason,
1214 uint32_t mode,
1215 uint8_t alertable,
1216 int64_t *duetime)
1217 {
1218 #ifdef __FreeBSD__
1219 struct thread *td = curthread;
1220 #endif
1221 kmutant *km;
1222 wait_block w;
1223 struct timeval tv;
1224 int error = 0;
1225 uint64_t curtime;
1226 #ifdef __NetBSD__
1227 int s;
1228 #endif
1229
1230 if (obj == NULL)
1231 return(STATUS_INVALID_PARAMETER);
1232
1233 #ifdef __NetBSD__
1234 DISPATCH_LOCK();
1235 #else
1236 mtx_lock(&ntoskrnl_dispatchlock);
1237 #endif
1238
1239 /*
1240 * See if the object is a mutex. If so, and we already own
1241 * it, then just increment the acquisition count and return.
1242 *
1243 * For any other kind of object, see if it's already in the
1244 * signalled state, and if it is, just return. If the object
1245 * is marked as a synchronization event, reset the state to
1246 * unsignalled.
1247 */
1248
1249 if (obj->dh_size == OTYPE_MUTEX) {
1250 km = (kmutant *)obj;
1251 if (km->km_ownerthread == NULL ||
1252 #ifdef __FreeBSD__
1253 km->km_ownerthread == curthread->td_proc) {
1254 #else
1255 km->km_ownerthread == curproc) {
1256 #endif
1257 obj->dh_sigstate = FALSE;
1258 km->km_acquirecnt++;
1259 #ifdef __FreeBSD__
1260 km->km_ownerthread = curthread->td_proc;
1261 #else
1262 km->km_ownerthread = curproc;
1263 #endif
1264
1265 #ifdef __FreeBSD__
1266 mtx_unlock(&ntoskrnl_dispatchlock);
1267 #else /* __NetBSD__ */
1268 DISPATCH_UNLOCK();
1269 #endif
1270 return (STATUS_SUCCESS);
1271 }
1272 } else if (obj->dh_sigstate == TRUE) {
1273 if (obj->dh_type == EVENT_TYPE_SYNC)
1274 obj->dh_sigstate = FALSE;
1275
1276 #ifdef __FreeBSD__
1277 mtx_unlock(&ntoskrnl_dispatchlock);
1278 #else /* __NetBSD__ */
1279 DISPATCH_UNLOCK();
1280 #endif
1281 return (STATUS_SUCCESS);
1282 }
1283
1284 w.wb_object = obj;
1285 #ifdef __FreeBSD__
1286 w.wb_kthread = td;
1287 #endif
1288
1289 INSERT_LIST_TAIL((&obj->dh_waitlisthead), (&w.wb_waitlist));
1290
1291 /*
1292 * The timeout value is specified in 100 nanosecond units
1293 * and can be a positive or negative number. If it's positive,
1294 * then the duetime is absolute, and we need to convert it
1295 * to an absolute offset relative to now in order to use it.
1296 * If it's negative, then the duetime is relative and we
1297 * just have to convert the units.
1298 */
1299
1300 if (duetime != NULL) {
1301 if (*duetime < 0) {
1302 tv.tv_sec = - (*duetime) / 10000000;
1303 tv.tv_usec = (- (*duetime) / 10) -
1304 (tv.tv_sec * 1000000);
1305 } else {
1306 ntoskrnl_time(&curtime);
1307 if (*duetime < curtime)
1308 tv.tv_sec = tv.tv_usec = 0;
1309 else {
1310 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1311 tv.tv_usec = ((*duetime) - curtime) / 10 -
1312 (tv.tv_sec * 1000000);
1313 }
1314 }
1315 }
1316
1317 #ifdef __FreeBSD__
1318 error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1319 duetime == NULL ? 0 : tvtohz(&tv));
1320 #else
1321 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1322 duetime == NULL ? 0 : tvtohz(&tv));
1323 #endif
1324
1325 /* We timed out. Leave the object alone and return status. */
1326
1327 if (error == EWOULDBLOCK) {
1328 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1329 #ifdef __FreeBSD__
1330 mtx_unlock(&ntoskrnl_dispatchlock);
1331 #else /* __NetBSD__ */
1332 DISPATCH_UNLOCK();
1333 #endif
1334 return(STATUS_TIMEOUT);
1335 }
1336
1337 /*
1338 * Mutexes are always synchronization objects, which means
1339 * if several threads are waiting to acquire it, only one will
1340 * be woken up. If that one is us, and the mutex is up for grabs,
1341 * grab it.
1342 */
1343
1344 if (obj->dh_size == OTYPE_MUTEX) {
1345 km = (kmutant *)obj;
1346 if (km->km_ownerthread == NULL) {
1347 #ifdef __FreeBSD__
1348 km->km_ownerthread = curthread->td_proc;
1349 #else
1350 km->km_ownerthread = curproc;
1351 #endif
1352 km->km_acquirecnt++;
1353 }
1354 }
1355
1356 if (obj->dh_type == EVENT_TYPE_SYNC)
1357 obj->dh_sigstate = FALSE;
1358 REMOVE_LIST_ENTRY((&w.wb_waitlist));
1359
1360 #ifdef __FreeBSD__
1361 mtx_unlock(&ntoskrnl_dispatchlock);
1362 #else /* __NetBSD__ */
1363 DISPATCH_UNLOCK();
1364 #endif
1365
1366 return(STATUS_SUCCESS);
1367 }
1368
1369 __stdcall static uint32_t
1370 KeWaitForMultipleObjects(
1371 uint32_t cnt,
1372 nt_dispatch_header *obj[],
1373 uint32_t wtype,
1374 uint32_t reason,
1375 uint32_t mode,
1376 uint8_t alertable,
1377 int64_t *duetime,
1378 wait_block *wb_array)
1379 {
1380 #ifdef __FreeBSD__
1381 struct thread *td = curthread;
1382 #endif
1383 kmutant *km;
1384 wait_block _wb_array[THREAD_WAIT_OBJECTS];
1385 wait_block *w;
1386 struct timeval tv;
1387 int i, wcnt = 0, widx = 0, error = 0;
1388 uint64_t curtime;
1389 struct timespec t1, t2;
1390 #ifdef __NetBSD__
1391 struct timeval tv1,tv2;
1392 int s;
1393 #endif
1394
1395
1396 if (cnt > MAX_WAIT_OBJECTS)
1397 return(STATUS_INVALID_PARAMETER);
1398 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1399 return(STATUS_INVALID_PARAMETER);
1400
1401 #ifdef __NetBSD__
1402 DISPATCH_LOCK();
1403 #else
1404 mtx_lock(&ntoskrnl_dispatchlock);
1405 #endif
1406
1407 if (wb_array == NULL)
1408 w = &_wb_array[0];
1409 else
1410 w = wb_array;
1411
1412 /* First pass: see if we can satisfy any waits immediately. */
1413
1414 for (i = 0; i < cnt; i++) {
1415 if (obj[i]->dh_size == OTYPE_MUTEX) {
1416 km = (kmutant *)obj[i];
1417 if (km->km_ownerthread == NULL ||
1418 #ifdef __FreeBSD__
1419 km->km_ownerthread == curthread->td_proc) {
1420 #else
1421 km->km_ownerthread == curproc) {
1422 #endif
1423 obj[i]->dh_sigstate = FALSE;
1424 km->km_acquirecnt++;
1425 #ifdef __FreeBSD__
1426 km->km_ownerthread = curthread->td_proc;
1427 #else
1428 km->km_ownerthread = curproc;
1429 #endif
1430 if (wtype == WAITTYPE_ANY) {
1431 #ifdef __FreeBSD__
1432 mtx_unlock(&ntoskrnl_dispatchlock);
1433 #else /* __NetBSD__ */
1434 DISPATCH_UNLOCK();
1435 #endif
1436 return (STATUS_WAIT_0 + i);
1437 }
1438 }
1439 } else if (obj[i]->dh_sigstate == TRUE) {
1440 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1441 obj[i]->dh_sigstate = FALSE;
1442 if (wtype == WAITTYPE_ANY) {
1443 #ifdef __FreeBSD__
1444 mtx_unlock(&ntoskrnl_dispatchlock);
1445 #else /* __NetBSD__ */
1446 DISPATCH_UNLOCK();
1447 #endif
1448 return (STATUS_WAIT_0 + i);
1449 }
1450 }
1451 }
1452
1453 /*
1454 * Second pass: set up wait for anything we can't
1455 * satisfy immediately.
1456 */
1457
1458 for (i = 0; i < cnt; i++) {
1459 if (obj[i]->dh_sigstate == TRUE)
1460 continue;
1461 INSERT_LIST_TAIL((&obj[i]->dh_waitlisthead),
1462 (&w[i].wb_waitlist));
1463 #ifdef __FreeBSD__
1464 w[i].wb_kthread = td;
1465 #endif
1466 w[i].wb_object = obj[i];
1467 wcnt++;
1468 }
1469
1470 if (duetime != NULL) {
1471 if (*duetime < 0) {
1472 tv.tv_sec = - (*duetime) / 10000000;
1473 tv.tv_usec = (- (*duetime) / 10) -
1474 (tv.tv_sec * 1000000);
1475 } else {
1476 ntoskrnl_time(&curtime);
1477 if (*duetime < curtime)
1478 tv.tv_sec = tv.tv_usec = 0;
1479 else {
1480 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1481 tv.tv_usec = ((*duetime) - curtime) / 10 -
1482 (tv.tv_sec * 1000000);
1483 }
1484 }
1485 }
1486
1487 while (wcnt) {
1488 #ifdef __FreeBSD__
1489 nanotime(&t1);
1490 #else
1491 microtime(&tv1);
1492 TIMEVAL_TO_TIMESPEC(&tv1,&t1);
1493 #endif
1494
1495 #ifdef __FreeBSD__
1496 error = ndis_thsuspend(td->td_proc, &ntoskrnl_dispatchlock,
1497 duetime == NULL ? 0 : tvtohz(&tv));
1498 #else
1499 error = ndis_thsuspend(curproc, &ntoskrnl_dispatchlock,
1500 duetime == NULL ? 0 : tvtohz(&tv));
1501 #endif
1502 #ifdef __FreeBSD__
1503 nanotime(&t2);
1504 #else
1505 microtime(&tv2);
1506 TIMEVAL_TO_TIMESPEC(&tv2,&t2);
1507 #endif
1508
1509 for (i = 0; i < cnt; i++) {
1510 if (obj[i]->dh_size == OTYPE_MUTEX) {
1511 km = (kmutant *)obj;
1512 if (km->km_ownerthread == NULL) {
1513 km->km_ownerthread =
1514 #ifdef __FreeBSD__
1515 curthread->td_proc;
1516 #else
1517 curproc;
1518 #endif
1519 km->km_acquirecnt++;
1520 }
1521 }
1522 if (obj[i]->dh_sigstate == TRUE) {
1523 widx = i;
1524 if (obj[i]->dh_type == EVENT_TYPE_SYNC)
1525 obj[i]->dh_sigstate = FALSE;
1526 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1527 wcnt--;
1528 }
1529 }
1530
1531 if (error || wtype == WAITTYPE_ANY)
1532 break;
1533
1534 if (duetime != NULL) {
1535 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1536 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1537 }
1538 }
1539
1540 if (wcnt) {
1541 for (i = 0; i < cnt; i++)
1542 REMOVE_LIST_ENTRY((&w[i].wb_waitlist));
1543 }
1544
1545 if (error == EWOULDBLOCK) {
1546 #ifdef __FreeBSD__
1547 mtx_unlock(&ntoskrnl_dispatchlock);
1548 #else /* __NetBSD__ */
1549 DISPATCH_UNLOCK();
1550 #endif
1551 return(STATUS_TIMEOUT);
1552 }
1553
1554 if (wtype == WAITTYPE_ANY && wcnt) {
1555 #ifdef __FreeBSD__
1556 mtx_unlock(&ntoskrnl_dispatchlock);
1557 #else /* __NetBSD__ */
1558 DISPATCH_UNLOCK();
1559 #endif
1560 return(STATUS_WAIT_0 + widx);
1561 }
1562
1563 #ifdef __FreeBSD__
1564 mtx_unlock(&ntoskrnl_dispatchlock);
1565 #else /* __NetBSD__ */
1566 DISPATCH_UNLOCK();
1567 #endif
1568
1569 return(STATUS_SUCCESS);
1570 }
1571
1572 __stdcall static void
1573 WRITE_REGISTER_USHORT(reg, val)
1574 uint16_t *reg;
1575 uint16_t val;
1576 {
1577 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1578 return;
1579 }
1580
1581 __stdcall static uint16_t
1582 READ_REGISTER_USHORT(reg)
1583 uint16_t *reg;
1584 {
1585 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1586 }
1587
1588 __stdcall static void
1589 WRITE_REGISTER_ULONG(reg, val)
1590 uint32_t *reg;
1591 uint32_t val;
1592 {
1593 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1594 return;
1595 }
1596
1597 __stdcall static uint32_t
1598 READ_REGISTER_ULONG(reg)
1599 uint32_t *reg;
1600 {
1601 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1602 }
1603
1604 __stdcall static uint8_t
1605 READ_REGISTER_UCHAR(reg)
1606 uint8_t *reg;
1607 {
1608 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1609 }
1610
1611 __stdcall static void
1612 WRITE_REGISTER_UCHAR(reg, val)
1613 uint8_t *reg;
1614 uint8_t val;
1615 {
1616 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1617 return;
1618 }
1619
1620 __stdcall static int64_t
1621 _allmul(a, b)
1622 int64_t a;
1623 int64_t b;
1624 {
1625 return (a * b);
1626 }
1627
1628 __stdcall static int64_t
1629 _alldiv(a, b)
1630 int64_t a;
1631 int64_t b;
1632 {
1633 return (a / b);
1634 }
1635
1636 __stdcall static int64_t
1637 _allrem(a, b)
1638 int64_t a;
1639 int64_t b;
1640 {
1641 return (a % b);
1642 }
1643
1644 __stdcall static uint64_t
1645 _aullmul(a, b)
1646 uint64_t a;
1647 uint64_t b;
1648 {
1649 return (a * b);
1650 }
1651
1652 __stdcall static uint64_t
1653 _aulldiv(a, b)
1654 uint64_t a;
1655 uint64_t b;
1656 {
1657 return (a / b);
1658 }
1659
1660 __stdcall static uint64_t
1661 _aullrem(a, b)
1662 uint64_t a;
1663 uint64_t b;
1664 {
1665 return (a % b);
1666 }
1667
1668 __regparm static int64_t
1669 _allshl(a, b)
1670 int64_t a;
1671 uint8_t b;
1672 {
1673 return (a << b);
1674 }
1675
1676 __regparm static uint64_t
1677 _aullshl(a, b)
1678 uint64_t a;
1679 uint8_t b;
1680 {
1681 return (a << b);
1682 }
1683
1684 __regparm static int64_t
1685 _allshr(a, b)
1686 int64_t a;
1687 uint8_t b;
1688 {
1689 return (a >> b);
1690 }
1691
1692 __regparm static uint64_t
1693 _aullshr(a, b)
1694 uint64_t a;
1695 uint8_t b;
1696 {
1697 return (a >> b);
1698 }
1699
1700 static slist_entry *
1701 ntoskrnl_pushsl(head, entry)
1702 slist_header *head;
1703 slist_entry *entry;
1704 {
1705 slist_entry *oldhead;
1706
1707 oldhead = head->slh_list.slh_next;
1708 entry->sl_next = head->slh_list.slh_next;
1709 head->slh_list.slh_next = entry;
1710 head->slh_list.slh_depth++;
1711 head->slh_list.slh_seq++;
1712
1713 return(oldhead);
1714 }
1715
1716 static slist_entry *
1717 ntoskrnl_popsl(head)
1718 slist_header *head;
1719 {
1720 slist_entry *first;
1721
1722 first = head->slh_list.slh_next;
1723 if (first != NULL) {
1724 head->slh_list.slh_next = first->sl_next;
1725 head->slh_list.slh_depth--;
1726 head->slh_list.slh_seq++;
1727 }
1728
1729 return(first);
1730 }
1731
1732 /*
1733 * We need this to make lookaside lists work for amd64.
1734 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
1735 * list structure. For amd64 to work right, this has to be a
1736 * pointer to the wrapped version of the routine, not the
1737 * original. Letting the Windows driver invoke the original
1738 * function directly will result in a convention calling
1739 * mismatch and a pretty crash. On x86, this effectively
1740 * becomes a no-op since ipt_func and ipt_wrap are the same.
1741 */
1742
1743 static funcptr
1744 ntoskrnl_findwrap(func)
1745 funcptr func;
1746 {
1747 image_patch_table *patch;
1748
1749 patch = ntoskrnl_functbl;
1750 while (patch->ipt_func != NULL) {
1751 if ((funcptr)patch->ipt_func == func)
1752 return((funcptr)patch->ipt_wrap);
1753 patch++;
1754 }
1755
1756 return(NULL);
1757 }
1758
1759 __stdcall static void
1760 ExInitializePagedLookasideList(
1761 paged_lookaside_list *lookaside,
1762 lookaside_alloc_func *allocfunc,
1763 lookaside_free_func *freefunc,
1764 uint32_t flags,
1765 size_t size,
1766 uint32_t tag,
1767 uint16_t depth)
1768 {
1769 bzero((char *)lookaside, sizeof(paged_lookaside_list));
1770
1771 if (size < sizeof(slist_entry))
1772 lookaside->nll_l.gl_size = sizeof(slist_entry);
1773 else
1774 lookaside->nll_l.gl_size = size;
1775 lookaside->nll_l.gl_tag = tag;
1776 if (allocfunc == NULL)
1777 lookaside->nll_l.gl_allocfunc =
1778 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1779 else
1780 lookaside->nll_l.gl_allocfunc = allocfunc;
1781
1782 if (freefunc == NULL)
1783 lookaside->nll_l.gl_freefunc =
1784 ntoskrnl_findwrap((funcptr)ExFreePool);
1785 else
1786 lookaside->nll_l.gl_freefunc = freefunc;
1787
1788 #ifdef __i386__
1789 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1790 #endif
1791
1792 lookaside->nll_l.gl_type = NonPagedPool;
1793 lookaside->nll_l.gl_depth = depth;
1794 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1795
1796 return;
1797 }
1798
1799 __stdcall static void
1800 ExDeletePagedLookasideList(lookaside)
1801 paged_lookaside_list *lookaside;
1802 {
1803 void *buf;
1804 __stdcall void (*freefunc)(void *);
1805
1806 freefunc = lookaside->nll_l.gl_freefunc;
1807 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1808 MSCALL1(freefunc, buf);
1809
1810 return;
1811 }
1812
1813 __stdcall static void
1814 ExInitializeNPagedLookasideList(
1815 npaged_lookaside_list *lookaside,
1816 lookaside_alloc_func *allocfunc,
1817 lookaside_free_func *freefunc,
1818 uint32_t flags,
1819 size_t size,
1820 uint32_t tag,
1821 uint16_t depth)
1822 {
1823 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
1824
1825 if (size < sizeof(slist_entry))
1826 lookaside->nll_l.gl_size = sizeof(slist_entry);
1827 else
1828 lookaside->nll_l.gl_size = size;
1829 lookaside->nll_l.gl_tag = tag;
1830 if (allocfunc == NULL)
1831 lookaside->nll_l.gl_allocfunc =
1832 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
1833 else
1834 lookaside->nll_l.gl_allocfunc = allocfunc;
1835
1836 if (freefunc == NULL)
1837 lookaside->nll_l.gl_freefunc =
1838 ntoskrnl_findwrap((funcptr)ExFreePool);
1839 else
1840 lookaside->nll_l.gl_freefunc = freefunc;
1841
1842 #ifdef __i386__
1843 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
1844 #endif
1845
1846 lookaside->nll_l.gl_type = NonPagedPool;
1847 lookaside->nll_l.gl_depth = depth;
1848 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
1849
1850 return;
1851 }
1852
1853 __stdcall static void
1854 ExDeleteNPagedLookasideList(lookaside)
1855 npaged_lookaside_list *lookaside;
1856 {
1857 void *buf;
1858 __stdcall void (*freefunc)(void *);
1859
1860 freefunc = lookaside->nll_l.gl_freefunc;
1861 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
1862 MSCALL1(freefunc, buf);
1863
1864 return;
1865 }
1866
1867 /*
1868 * Note: the interlocked slist push and pop routines are
1869 * declared to be _fastcall in Windows. gcc 3.4 is supposed
1870 * to have support for this calling convention, however we
1871 * don't have that version available yet, so we kludge things
1872 * up using __regparm__(3) and some argument shuffling.
1873 */
1874
1875 __fastcall static slist_entry *
1876 InterlockedPushEntrySList(REGARGS2(slist_header *head, slist_entry *entry))
1877 {
1878 slist_entry *oldhead;
1879
1880 oldhead = (slist_entry *)FASTCALL3(ExInterlockedPushEntrySList,
1881 head, entry, &ntoskrnl_global);
1882
1883 return(oldhead);
1884 }
1885
1886 __fastcall static slist_entry *
1887 InterlockedPopEntrySList(REGARGS1(slist_header *head))
1888 {
1889 slist_entry *first;
1890
1891 first = (slist_entry *)FASTCALL2(ExInterlockedPopEntrySList,
1892 head, &ntoskrnl_global);
1893
1894 return(first);
1895 }
1896
1897 __fastcall static slist_entry *
1898 ExInterlockedPushEntrySList(REGARGS2(slist_header *head,
1899 slist_entry *entry), kspin_lock *lock)
1900 {
1901 slist_entry *oldhead;
1902 uint8_t irql;
1903
1904 KeAcquireSpinLock(lock, &irql);
1905 oldhead = ntoskrnl_pushsl(head, entry);
1906 KeReleaseSpinLock(lock, irql);
1907
1908 return(oldhead);
1909 }
1910
1911 __fastcall static slist_entry *
1912 ExInterlockedPopEntrySList(REGARGS2(slist_header *head, kspin_lock *lock))
1913 {
1914 slist_entry *first;
1915 uint8_t irql;
1916
1917 KeAcquireSpinLock(lock, &irql);
1918 first = ntoskrnl_popsl(head);
1919 KeReleaseSpinLock(lock, irql);
1920
1921 return(first);
1922 }
1923
1924 __stdcall static uint16_t
1925 ExQueryDepthSList(head)
1926 slist_header *head;
1927 {
1928 uint16_t depth;
1929 uint8_t irql;
1930
1931 KeAcquireSpinLock(&ntoskrnl_global, &irql);
1932 depth = head->slh_list.slh_depth;
1933 KeReleaseSpinLock(&ntoskrnl_global, irql);
1934
1935 return(depth);
1936 }
1937
1938 /* TODO: Make sure that LOCKDEBUG isn't defined otherwise a "struct simplelock" will
1939 * TODO: be more than 4 bytes. I'm using a kspin_lock as a simplelock, and the
1940 * TODO: kspin lock is 4 bytes, so this is OK as long as LOCKDEBUG isn't defined.
1941 */
1942
1943 /*
1944 * The KeInitializeSpinLock(), KefAcquireSpinLockAtDpcLevel()
1945 * and KefReleaseSpinLockFromDpcLevel() appear to be analagous
1946 * to splnet()/splx() in their use. We can't create a new mutex
1947 * lock here because there is no complimentary KeFreeSpinLock()
1948 * function. Instead, we grab a mutex from the mutex pool.
1949 */
1950 __stdcall void
1951 KeInitializeSpinLock(lock)
1952 kspin_lock *lock;
1953 {
1954 #ifdef __FreeBSD__
1955 *lock = 0;
1956 #else /* __NetBSD__ */
1957 simple_lock_init((struct simplelock *)lock);
1958 #endif
1959
1960 return;
1961 }
1962
1963 #ifdef __i386__
1964 __fastcall void
1965 KefAcquireSpinLockAtDpcLevel(REGARGS1(kspin_lock *lock))
1966 {
1967 #ifdef __FreeBSD__
1968 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
1969 /* sit and spin */;
1970 #else /* __NetBSD__ */
1971 simple_lock((struct simplelock *)lock);
1972 #endif
1973
1974 return;
1975 }
1976
1977 __fastcall void
1978 KefReleaseSpinLockFromDpcLevel(REGARGS1(kspin_lock *lock))
1979 {
1980 #ifdef __FreeBSD__
1981 atomic_store_rel_int((volatile u_int *)lock, 0);
1982 #else /* __NetBSD__ */
1983 simple_unlock((struct simplelock *)lock);
1984 #endif
1985 return;
1986 }
1987
1988 __stdcall uint8_t
1989 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
1990 {
1991 uint8_t oldirql;
1992
1993 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
1994 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
1995
1996 oldirql = KeRaiseIrql(DISPATCH_LEVEL);
1997 KeAcquireSpinLockAtDpcLevel(lock);
1998
1999 return(oldirql);
2000 }
2001 #else
2002 __stdcall void
2003 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2004 {
2005 while (atomic_swap_uint((volatile u_int *)lock, 1) == 1)
2006 /* sit and spin */;
2007
2008 return;
2009 }
2010
2011 __stdcall void
2012 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2013 {
2014 *(volatile u_int *)lock = 0;
2015
2016 return;
2017 }
2018 #endif /* __i386__ */
2019
2020 __fastcall uintptr_t
2021 InterlockedExchange(REGARGS2(volatile uint32_t *dst, uintptr_t val))
2022 {
2023 uint8_t irql;
2024 uintptr_t r;
2025
2026 KeAcquireSpinLock(&ntoskrnl_global, &irql);
2027 r = *dst;
2028 *dst = val;
2029 KeReleaseSpinLock(&ntoskrnl_global, irql);
2030
2031 return(r);
2032 }
2033
2034 __fastcall static uint32_t
2035 InterlockedIncrement(REGARGS1(volatile uint32_t *addend))
2036 {
2037 atomic_inc_32(addend);
2038 return(*addend);
2039 }
2040
2041 __fastcall static uint32_t
2042 InterlockedDecrement(REGARGS1(volatile uint32_t *addend))
2043 {
2044 atomic_dec_32(addend);
2045 return(*addend);
2046 }
2047
2048 __fastcall static void
2049 ExInterlockedAddLargeStatistic(REGARGS2(uint64_t *addend, uint32_t inc))
2050 {
2051 uint8_t irql;
2052
2053 KeAcquireSpinLock(&ntoskrnl_global, &irql);
2054 *addend += inc;
2055 KeReleaseSpinLock(&ntoskrnl_global, irql);
2056
2057 return;
2058 };
2059
2060 __stdcall mdl *
2061 IoAllocateMdl(
2062 void *vaddr,
2063 uint32_t len,
2064 uint8_t secondarybuf,
2065 uint8_t chargequota,
2066 irp *iopkt)
2067 {
2068 mdl *m;
2069 int zone = 0;
2070
2071 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2072 m = ExAllocatePoolWithTag(NonPagedPool,
2073 MmSizeOfMdl(vaddr, len), 0);
2074 else {
2075 #ifdef __FreeBSD__
2076 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2077 #else
2078 m = pool_get(&mdl_pool, PR_WAITOK);
2079 #endif
2080 zone++;
2081 }
2082
2083 if (m == NULL)
2084 return (NULL);
2085
2086 MmInitializeMdl(m, vaddr, len);
2087
2088 /*
2089 * MmInitializMdl() clears the flags field, so we
2090 * have to set this here. If the MDL came from the
2091 * MDL UMA zone, tag it so we can release it to
2092 * the right place later.
2093 */
2094 if (zone)
2095 m->mdl_flags = MDL_ZONE_ALLOCED;
2096
2097 if (iopkt != NULL) {
2098 if (secondarybuf == TRUE) {
2099 mdl *last;
2100 last = iopkt->irp_mdl;
2101 while (last->mdl_next != NULL)
2102 last = last->mdl_next;
2103 last->mdl_next = m;
2104 } else {
2105 if (iopkt->irp_mdl != NULL)
2106 panic("leaking an MDL in IoAllocateMdl()");
2107 iopkt->irp_mdl = m;
2108 }
2109 }
2110
2111 return (m);
2112 }
2113
2114 __stdcall void
2115 IoFreeMdl(m)
2116 mdl *m;
2117 {
2118 if (m == NULL)
2119 return;
2120
2121 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2122 #ifdef __FreeBSD__
2123 uma_zfree(mdl_zone, m);
2124 #else
2125 pool_put(&mdl_pool, m);
2126 #endif
2127 else
2128 ExFreePool(m);
2129
2130 return;
2131 }
2132
2133 __stdcall static uint32_t
2134 MmSizeOfMdl(vaddr, len)
2135 void *vaddr;
2136 size_t len;
2137 {
2138 uint32_t l;
2139
2140 l = sizeof(struct mdl) +
2141 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2142
2143 return(l);
2144 }
2145
2146 /*
2147 * The Microsoft documentation says this routine fills in the
2148 * page array of an MDL with the _physical_ page addresses that
2149 * comprise the buffer, but we don't really want to do that here.
2150 * Instead, we just fill in the page array with the kernel virtual
2151 * addresses of the buffers.
2152 */
2153 __stdcall static void
2154 MmBuildMdlForNonPagedPool(m)
2155 mdl *m;
2156 {
2157 vm_offset_t *mdl_pages;
2158 int pagecnt, i;
2159
2160 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2161
2162 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2163 panic("not enough pages in MDL to describe buffer");
2164
2165 mdl_pages = MmGetMdlPfnArray(m);
2166
2167 for (i = 0; i < pagecnt; i++)
2168 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2169
2170 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2171 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2172
2173 return;
2174 }
2175
2176 __stdcall static void *
2177 MmMapLockedPages(
2178 mdl *buf,
2179 uint8_t accessmode)
2180 {
2181 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2182 return(MmGetMdlVirtualAddress(buf));
2183 }
2184
2185 __stdcall static void *
2186 MmMapLockedPagesSpecifyCache(
2187 mdl *buf,
2188 uint8_t accessmode,
2189 uint32_t cachetype,
2190 void *vaddr,
2191 uint32_t bugcheck,
2192 uint32_t prio)
2193 {
2194 return(MmMapLockedPages(buf, accessmode));
2195 }
2196
2197 __stdcall static void
2198 MmUnmapLockedPages(
2199 void *vaddr,
2200 mdl *buf)
2201 {
2202 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2203 return;
2204 }
2205
2206 __stdcall static size_t
2207 RtlCompareMemory(s1, s2, len)
2208 const void *s1;
2209 const void *s2;
2210 size_t len;
2211 {
2212 size_t i, total = 0;
2213 uint8_t *m1, *m2;
2214
2215 m1 = __DECONST(char *, s1);
2216 m2 = __DECONST(char *, s2);
2217
2218 for (i = 0; i < len; i++) {
2219 if (m1[i] == m2[i])
2220 total++;
2221 }
2222 return(total);
2223 }
2224
2225 __stdcall static void
2226 RtlInitAnsiString(dst, src)
2227 ndis_ansi_string *dst;
2228 char *src;
2229 {
2230 ndis_ansi_string *a;
2231
2232 a = dst;
2233 if (a == NULL)
2234 return;
2235 if (src == NULL) {
2236 a->nas_len = a->nas_maxlen = 0;
2237 a->nas_buf = NULL;
2238 } else {
2239 a->nas_buf = src;
2240 a->nas_len = a->nas_maxlen = strlen(src);
2241 }
2242
2243 return;
2244 }
2245
2246 __stdcall static void
2247 RtlInitUnicodeString(dst, src)
2248 ndis_unicode_string *dst;
2249 uint16_t *src;
2250 {
2251 ndis_unicode_string *u;
2252 int i;
2253
2254 u = dst;
2255 if (u == NULL)
2256 return;
2257 if (src == NULL) {
2258 u->us_len = u->us_maxlen = 0;
2259 u->us_buf = NULL;
2260 } else {
2261 i = 0;
2262 while(src[i] != 0)
2263 i++;
2264 u->us_buf = src;
2265 u->us_len = u->us_maxlen = i * 2;
2266 }
2267
2268 return;
2269 }
2270
2271 __stdcall ndis_status
2272 RtlUnicodeStringToInteger(ustr, base, val)
2273 ndis_unicode_string *ustr;
2274 uint32_t base;
2275 uint32_t *val;
2276 {
2277 uint16_t *uchr;
2278 int len, neg = 0;
2279 char abuf[64];
2280 char *astr;
2281
2282 uchr = ustr->us_buf;
2283 len = ustr->us_len;
2284 bzero(abuf, sizeof(abuf));
2285
2286 if ((char)((*uchr) & 0xFF) == '-') {
2287 neg = 1;
2288 uchr++;
2289 len -= 2;
2290 } else if ((char)((*uchr) & 0xFF) == '+') {
2291 neg = 0;
2292 uchr++;
2293 len -= 2;
2294 }
2295
2296 if (base == 0) {
2297 if ((char)((*uchr) & 0xFF) == 'b') {
2298 base = 2;
2299 uchr++;
2300 len -= 2;
2301 } else if ((char)((*uchr) & 0xFF) == 'o') {
2302 base = 8;
2303 uchr++;
2304 len -= 2;
2305 } else if ((char)((*uchr) & 0xFF) == 'x') {
2306 base = 16;
2307 uchr++;
2308 len -= 2;
2309 } else
2310 base = 10;
2311 }
2312
2313 astr = abuf;
2314 if (neg) {
2315 strcpy(astr, "-");
2316 astr++;
2317 }
2318
2319 ndis_unicode_to_ascii(uchr, len, &astr);
2320 *val = strtoul(abuf, NULL, base);
2321
2322 return(NDIS_STATUS_SUCCESS);
2323 }
2324
2325 __stdcall static void
2326 RtlFreeUnicodeString(ustr)
2327 ndis_unicode_string *ustr;
2328 {
2329 if (ustr->us_buf == NULL)
2330 return;
2331 free(ustr->us_buf, M_DEVBUF);
2332 ustr->us_buf = NULL;
2333 return;
2334 }
2335
2336 __stdcall static void
2337 RtlFreeAnsiString(astr)
2338 ndis_ansi_string *astr;
2339 {
2340 if (astr->nas_buf == NULL)
2341 return;
2342 free(astr->nas_buf, M_DEVBUF);
2343 astr->nas_buf = NULL;
2344 return;
2345 }
2346
2347 static int
2348 atoi(str)
2349 const char *str;
2350 {
2351 #ifdef __FreeBSD__
2352 return (int)strtol(str, (char **)NULL, 10);
2353 #else
2354 int n;
2355
2356 for (n = 0; *str && *str >= '' && *str <= '9'; str++)
2357 n = n * 10 + *str - '';
2358 return n;
2359 #endif
2360
2361 }
2362
2363 static long
2364 atol(str)
2365 const char *str;
2366 {
2367 #ifdef __FreeBSD__
2368 return strtol(str, (char **)NULL, 10);
2369 #else
2370 long n;
2371
2372 for (n = 0; *str && *str >= '' && *str <= '9'; str++)
2373 n = n * 10 + *str - '';
2374 return n;
2375 #endif
2376
2377 }
2378
2379
2380 /*
2381 * stolen from ./netipsec/key.c
2382 */
2383
2384 #ifdef __NetBSD__
2385 void srandom(int);
2386 void srandom(int arg) {return;}
2387 #endif
2388
2389
2390 static int
2391 rand(void)
2392 {
2393 struct timeval tv;
2394
2395 microtime(&tv);
2396 srandom(tv.tv_usec);
2397 return((int)random());
2398 }
2399
2400 static void
2401 srand(seed)
2402 unsigned int seed;
2403 {
2404 srandom(seed);
2405 return;
2406 }
2407
2408 __stdcall static uint8_t
2409 IoIsWdmVersionAvailable(major, minor)
2410 uint8_t major;
2411 uint8_t minor;
2412 {
2413 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
2414 return(TRUE);
2415 return(FALSE);
2416 }
2417
2418 __stdcall static ndis_status
2419 IoGetDeviceProperty(
2420 device_object *devobj,
2421 uint32_t regprop,
2422 uint32_t buflen,
2423 void *prop,
2424 uint32_t *reslen)
2425 {
2426 driver_object *drv;
2427 uint16_t **name;
2428
2429 drv = devobj->do_drvobj;
2430
2431 switch (regprop) {
2432 case DEVPROP_DRIVER_KEYNAME:
2433 name = prop;
2434 *name = drv->dro_drivername.us_buf;
2435 *reslen = drv->dro_drivername.us_len;
2436 break;
2437 default:
2438 return(STATUS_INVALID_PARAMETER_2);
2439 break;
2440 }
2441
2442 return(STATUS_SUCCESS);
2443 }
2444
2445 __stdcall static void
2446 KeInitializeMutex(
2447 kmutant *kmutex,
2448 uint32_t level)
2449 {
2450 INIT_LIST_HEAD((&kmutex->km_header.dh_waitlisthead));
2451 kmutex->km_abandoned = FALSE;
2452 kmutex->km_apcdisable = 1;
2453 kmutex->km_header.dh_sigstate = TRUE;
2454 kmutex->km_header.dh_type = EVENT_TYPE_SYNC;
2455 kmutex->km_header.dh_size = OTYPE_MUTEX;
2456 kmutex->km_acquirecnt = 0;
2457 kmutex->km_ownerthread = NULL;
2458 return;
2459 }
2460
2461 __stdcall static uint32_t
2462 KeReleaseMutex(
2463 kmutant *kmutex,
2464 uint8_t kwait)
2465 {
2466 #ifdef __NetBSD__
2467 int s;
2468 #endif
2469
2470 #ifdef __NetBSD__
2471 DISPATCH_LOCK();
2472 #else
2473 mtx_lock(&ntoskrnl_dispatchlock);
2474 #endif
2475
2476 #ifdef __FreeBSD__
2477 if (kmutex->km_ownerthread != curthread->td_proc) {
2478 #else
2479 if (kmutex->km_ownerthread != curproc) {
2480 #endif
2481 #ifdef __FreeBSD__
2482 mtx_unlock(&ntoskrnl_dispatchlock);
2483 #else /* __NetBSD__ */
2484 DISPATCH_UNLOCK();
2485 #endif
2486 return(STATUS_MUTANT_NOT_OWNED);
2487 }
2488 kmutex->km_acquirecnt--;
2489 if (kmutex->km_acquirecnt == 0) {
2490 kmutex->km_ownerthread = NULL;
2491 ntoskrnl_wakeup(&kmutex->km_header);
2492 }
2493
2494 #ifdef __FreeBSD__
2495 mtx_unlock(&ntoskrnl_dispatchlock);
2496 #else /* __NetBSD__ */
2497 DISPATCH_UNLOCK();
2498 #endif
2499
2500 return(kmutex->km_acquirecnt);
2501 }
2502
2503 __stdcall static uint32_t
2504 KeReadStateMutex(kmutex)
2505 kmutant *kmutex;
2506 {
2507 return(kmutex->km_header.dh_sigstate);
2508 }
2509
2510 __stdcall void
2511 KeInitializeEvent(kevent, type, state)
2512 nt_kevent *kevent;
2513 uint32_t type;
2514 uint8_t state;
2515 {
2516 INIT_LIST_HEAD((&kevent->k_header.dh_waitlisthead));
2517 kevent->k_header.dh_sigstate = state;
2518 kevent->k_header.dh_type = type;
2519 kevent->k_header.dh_size = OTYPE_EVENT;
2520 return;
2521 }
2522
2523 __stdcall uint32_t
2524 KeResetEvent(kevent)
2525 nt_kevent *kevent;
2526 {
2527 uint32_t prevstate;
2528 #ifdef __NetBSD__
2529 int s;
2530 #endif
2531
2532 #ifdef __NetBSD__
2533 DISPATCH_LOCK();
2534 #else
2535 mtx_lock(&ntoskrnl_dispatchlock);
2536 #endif
2537
2538 prevstate = kevent->k_header.dh_sigstate;
2539 kevent->k_header.dh_sigstate = FALSE;
2540
2541 #ifdef __FreeBSD__
2542 mtx_unlock(&ntoskrnl_dispatchlock);
2543 #else /* __NetBSD__ */
2544 DISPATCH_UNLOCK();
2545 #endif
2546
2547 return(prevstate);
2548 }
2549
2550 __stdcall uint32_t
2551 KeSetEvent(
2552 nt_kevent *kevent,
2553 uint32_t increment,
2554 uint8_t kwait)
2555 {
2556 uint32_t prevstate;
2557 #ifdef __NetBSD__
2558 int s;
2559 #endif
2560
2561 #ifdef __NetBSD__
2562 DISPATCH_LOCK();
2563 #else
2564 mtx_lock(&ntoskrnl_dispatchlock);
2565 #endif
2566
2567 prevstate = kevent->k_header.dh_sigstate;
2568 ntoskrnl_wakeup(&kevent->k_header);
2569
2570 #ifdef __FreeBSD__
2571 mtx_unlock(&ntoskrnl_dispatchlock);
2572 #else /* __NetBSD__ */
2573 DISPATCH_UNLOCK();
2574 #endif
2575
2576 return(prevstate);
2577 }
2578
2579 __stdcall void
2580 KeClearEvent(kevent)
2581 nt_kevent *kevent;
2582 {
2583 kevent->k_header.dh_sigstate = FALSE;
2584 return;
2585 }
2586
2587 __stdcall uint32_t
2588 KeReadStateEvent(kevent)
2589 nt_kevent *kevent;
2590 {
2591 return(kevent->k_header.dh_sigstate);
2592 }
2593
2594 __stdcall static ndis_status
2595 ObReferenceObjectByHandle(
2596 ndis_handle handle,
2597 uint32_t reqaccess,
2598 void *otype,
2599 uint8_t accessmode,
2600 void **object,
2601 void **handleinfo)
2602 {
2603 nt_objref *nr;
2604
2605 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
2606 if (nr == NULL)
2607 return(NDIS_STATUS_FAILURE);
2608
2609 INIT_LIST_HEAD((&nr->no_dh.dh_waitlisthead));
2610 nr->no_obj = handle;
2611 nr->no_dh.dh_size = OTYPE_THREAD;
2612 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
2613 *object = nr;
2614
2615 return(NDIS_STATUS_SUCCESS);
2616 }
2617
2618 __fastcall static void
2619 ObfDereferenceObject(REGARGS1(void *object))
2620 {
2621 nt_objref *nr;
2622
2623 nr = object;
2624 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
2625 free(nr, M_DEVBUF);
2626
2627 return;
2628 }
2629
2630 __stdcall static uint32_t
2631 ZwClose(ndis_handle handle)
2632 {
2633 return(STATUS_SUCCESS);
2634 }
2635
2636 /*
2637 * This is here just in case the thread returns without calling
2638 * PsTerminateSystemThread().
2639 */
2640 static void
2641 ntoskrnl_thrfunc(arg)
2642 void *arg;
2643 {
2644 thread_context *thrctx;
2645 __stdcall uint32_t (*tfunc)(void *);
2646 void *tctx;
2647 uint32_t rval;
2648
2649 thrctx = arg;
2650 tfunc = thrctx->tc_thrfunc;
2651 tctx = thrctx->tc_thrctx;
2652 free(thrctx, M_TEMP);
2653
2654 rval = MSCALL1(tfunc, tctx);
2655
2656 PsTerminateSystemThread(rval);
2657 return; /* notreached */
2658 }
2659
2660 __stdcall static ndis_status
2661 PsCreateSystemThread(
2662 ndis_handle *handle,
2663 uint32_t reqaccess,
2664 void *objattrs,
2665 ndis_handle phandle,
2666 void *clientid,
2667 void *thrfunc,
2668 void *thrctx)
2669 {
2670 int error;
2671 char tname[128];
2672 thread_context *tc;
2673 struct proc *p;
2674
2675 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
2676 if (tc == NULL)
2677 return(NDIS_STATUS_FAILURE);
2678
2679 tc->tc_thrctx = thrctx;
2680 tc->tc_thrfunc = thrfunc;
2681
2682 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
2683 #ifdef __FreeBSD__
2684 error = kthread_create(ntoskrnl_thrfunc, tc, &p,
2685 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
2686 #else
2687 /* TODO: Provide a larger stack for these threads (NDIS_KSTACK_PAGES) */
2688 error = ndis_kthread_create(ntoskrnl_thrfunc, tc, &p, NULL, 0, tname);
2689 #endif
2690 *handle = p;
2691
2692 ntoskrnl_kth++;
2693
2694 return(error);
2695 }
2696
2697 /*
2698 * In Windows, the exit of a thread is an event that you're allowed
2699 * to wait on, assuming you've obtained a reference to the thread using
2700 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
2701 * simulate this behavior is to register each thread we create in a
2702 * reference list, and if someone holds a reference to us, we poke
2703 * them.
2704 */
2705 __stdcall static ndis_status
2706 PsTerminateSystemThread(ndis_status status)
2707 {
2708 struct nt_objref *nr;
2709 #ifdef __NetBSD__
2710 int s;
2711 #endif
2712
2713 #ifdef __NetBSD__
2714 DISPATCH_LOCK();
2715 #else
2716 mtx_lock(&ntoskrnl_dispatchlock);
2717 #endif
2718
2719 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
2720 #ifdef __FreeBSD__
2721 if (nr->no_obj != curthread->td_proc)
2722 #else
2723 if (nr->no_obj != curproc)
2724 #endif
2725 continue;
2726 ntoskrnl_wakeup(&nr->no_dh);
2727 break;
2728 }
2729
2730 #ifdef __FreeBSD__
2731 mtx_unlock(&ntoskrnl_dispatchlock);
2732 #else /* __NetBSD__ */
2733 DISPATCH_UNLOCK();
2734 #endif
2735
2736 ntoskrnl_kth--;
2737
2738 #ifdef __FreeBSD__
2739 #if __FreeBSD_version < 502113
2740 mtx_lock(&Giant);
2741 #endif
2742 #endif /* __FreeBSD__ */
2743 kthread_exit(0);
2744 return(0); /* notreached */
2745 }
2746
2747 static uint32_t
2748 DbgPrint(char *fmt, ...)
2749 {
2750 //va_list ap;
2751
2752 if (bootverbose) {
2753 //va_start(ap, fmt);
2754 //vprintf(fmt, ap);
2755 }
2756
2757 return(STATUS_SUCCESS);
2758 }
2759
2760 __stdcall static void
2761 DbgBreakPoint(void)
2762 {
2763 #if defined(__FreeBSD__) && __FreeBSD_version < 502113
2764 Debugger("DbgBreakPoint(): breakpoint");
2765 #elif defined(__FreeBSD__) && __FreeBSD_version >= 502113
2766 kdb_enter("DbgBreakPoint(): breakpoint");
2767 #else /* NetBSD case */
2768 ; /* TODO Search how to go into debugger without panic */
2769 #endif
2770 }
2771
2772 static void
2773 ntoskrnl_timercall(arg)
2774 void *arg;
2775 {
2776 ktimer *timer;
2777 struct timeval tv;
2778 #ifdef __NetBSD__
2779 int s;
2780 #endif
2781
2782 #ifdef __FreeBSD__
2783 mtx_unlock(&Giant);
2784 #endif
2785
2786 #ifdef __NetBSD__
2787 DISPATCH_LOCK();
2788 #else
2789 mtx_lock(&ntoskrnl_dispatchlock);
2790 #endif
2791
2792 timer = arg;
2793
2794 timer->k_header.dh_inserted = FALSE;
2795
2796 /*
2797 * If this is a periodic timer, re-arm it
2798 * so it will fire again. We do this before
2799 * calling any deferred procedure calls because
2800 * it's possible the DPC might cancel the timer,
2801 * in which case it would be wrong for us to
2802 * re-arm it again afterwards.
2803 */
2804
2805 if (timer->k_period) {
2806 tv.tv_sec = 0;
2807 tv.tv_usec = timer->k_period * 1000;
2808 timer->k_header.dh_inserted = TRUE;
2809 #ifdef __FreeBSD__
2810 timer->k_handle = timeout(ntoskrnl_timercall,
2811 timer, tvtohz(&tv));
2812 #else /* __NetBSD__ */
2813 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2814 #endif /* __NetBSD__ */
2815 }
2816
2817 if (timer->k_dpc != NULL)
2818 KeInsertQueueDpc(timer->k_dpc, NULL, NULL);
2819
2820 ntoskrnl_wakeup(&timer->k_header);
2821
2822 #ifdef __FreeBSD__
2823 mtx_unlock(&ntoskrnl_dispatchlock);
2824 #else /* __NetBSD__ */
2825 DISPATCH_UNLOCK();
2826 #endif
2827
2828 #ifdef __FreeBSD__
2829 mtx_lock(&Giant);
2830 #endif
2831
2832 return;
2833 }
2834
2835 __stdcall void
2836 KeInitializeTimer(timer)
2837 ktimer *timer;
2838 {
2839 if (timer == NULL)
2840 return;
2841
2842 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
2843
2844 return;
2845 }
2846
2847 __stdcall void
2848 KeInitializeTimerEx(timer, type)
2849 ktimer *timer;
2850 uint32_t type;
2851 {
2852 if (timer == NULL)
2853 return;
2854
2855 INIT_LIST_HEAD((&timer->k_header.dh_waitlisthead));
2856 timer->k_header.dh_sigstate = FALSE;
2857 timer->k_header.dh_inserted = FALSE;
2858 timer->k_header.dh_type = type;
2859 timer->k_header.dh_size = OTYPE_TIMER;
2860 #ifdef __FreeBSD__
2861 callout_handle_init(&timer->k_handle);
2862 #else
2863 callout_init(timer->k_handle, 0);
2864 #endif
2865
2866 return;
2867 }
2868
2869 /*
2870 * This is a wrapper for Windows deferred procedure calls that
2871 * have been placed on an NDIS thread work queue. We need it
2872 * since the DPC could be a _stdcall function. Also, as far as
2873 * I can tell, defered procedure calls must run at DISPATCH_LEVEL.
2874 */
2875 static void
2876 ntoskrnl_run_dpc(arg)
2877 void *arg;
2878 {
2879 __stdcall kdpc_func dpcfunc;
2880 kdpc *dpc;
2881 uint8_t irql;
2882
2883 dpc = arg;
2884 dpcfunc = dpc->k_deferedfunc;
2885 irql = KeRaiseIrql(DISPATCH_LEVEL);
2886 MSCALL4(dpcfunc, dpc, dpc->k_deferredctx,
2887 dpc->k_sysarg1, dpc->k_sysarg2);
2888 KeLowerIrql(irql);
2889
2890 return;
2891 }
2892
2893 __stdcall void
2894 KeInitializeDpc(dpc, dpcfunc, dpcctx)
2895 kdpc *dpc;
2896 void *dpcfunc;
2897 void *dpcctx;
2898 {
2899
2900 if (dpc == NULL)
2901 return;
2902
2903 dpc->k_deferedfunc = dpcfunc;
2904 dpc->k_deferredctx = dpcctx;
2905
2906 return;
2907 }
2908
2909 __stdcall uint8_t
2910 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
2911 kdpc *dpc;
2912 void *sysarg1;
2913 void *sysarg2;
2914 {
2915 dpc->k_sysarg1 = sysarg1;
2916 dpc->k_sysarg2 = sysarg2;
2917
2918 if (ndis_sched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2919 return(FALSE);
2920
2921 return(TRUE);
2922 }
2923
2924 __stdcall uint8_t
2925 KeRemoveQueueDpc(dpc)
2926 kdpc *dpc;
2927 {
2928 if (ndis_unsched(ntoskrnl_run_dpc, dpc, NDIS_SWI))
2929 return(FALSE);
2930
2931 return(TRUE);
2932 }
2933
2934 __stdcall uint8_t
2935 KeSetTimerEx(timer, duetime, period, dpc)
2936 ktimer *timer;
2937 int64_t duetime;
2938 uint32_t period;
2939 kdpc *dpc;
2940 {
2941 struct timeval tv;
2942 uint64_t curtime;
2943 uint8_t pending;
2944 #ifdef __NetBSD__
2945 int s;
2946 #endif
2947
2948 if (timer == NULL)
2949 return(FALSE);
2950
2951 #ifdef __NetBSD__
2952 DISPATCH_LOCK();
2953 #else
2954 mtx_lock(&ntoskrnl_dispatchlock);
2955 #endif
2956
2957 if (timer->k_header.dh_inserted == TRUE) {
2958 #ifdef __FreeBSD__
2959 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
2960 #else /* __NetBSD__ */
2961 callout_stop(timer->k_handle);
2962 #endif
2963 timer->k_header.dh_inserted = FALSE;
2964 pending = TRUE;
2965 } else
2966 pending = FALSE;
2967
2968 timer->k_duetime = duetime;
2969 timer->k_period = period;
2970 timer->k_header.dh_sigstate = FALSE;
2971 timer->k_dpc = dpc;
2972
2973 if (duetime < 0) {
2974 tv.tv_sec = - (duetime) / 10000000;
2975 tv.tv_usec = (- (duetime) / 10) -
2976 (tv.tv_sec * 1000000);
2977 } else {
2978 ntoskrnl_time(&curtime);
2979 if (duetime < curtime)
2980 tv.tv_sec = tv.tv_usec = 0;
2981 else {
2982 tv.tv_sec = ((duetime) - curtime) / 10000000;
2983 tv.tv_usec = ((duetime) - curtime) / 10 -
2984 (tv.tv_sec * 1000000);
2985 }
2986 }
2987
2988 timer->k_header.dh_inserted = TRUE;
2989 #ifdef __FreeBSD__
2990 timer->k_handle = timeout(ntoskrnl_timercall, timer, tvtohz(&tv));
2991 #else
2992 callout_reset(timer->k_handle, tvtohz(&tv), ntoskrnl_timercall, timer);
2993 #endif
2994
2995 #ifdef __FreeBSD__
2996 mtx_unlock(&ntoskrnl_dispatchlock);
2997 #else /* __NetBSD__ */
2998 DISPATCH_UNLOCK();
2999 #endif
3000
3001 return(pending);
3002 }
3003
3004 __stdcall uint8_t
3005 KeSetTimer(timer, duetime, dpc)
3006 ktimer *timer;
3007 int64_t duetime;
3008 kdpc *dpc;
3009 {
3010 return (KeSetTimerEx(timer, duetime, 0, dpc));
3011 }
3012
3013 __stdcall uint8_t
3014 KeCancelTimer(timer)
3015 ktimer *timer;
3016 {
3017 uint8_t pending;
3018 #ifdef __NetBSD__
3019 int s;
3020 #endif
3021
3022 if (timer == NULL)
3023 return(FALSE);
3024
3025 #ifdef __NetBSD__
3026 DISPATCH_LOCK();
3027 #else
3028 mtx_lock(&ntoskrnl_dispatchlock);
3029 #endif
3030
3031 if (timer->k_header.dh_inserted == TRUE) {
3032 #ifdef __FreeBSD__
3033 untimeout(ntoskrnl_timercall, timer, timer->k_handle);
3034 #else /* __NetBSD__ */
3035 callout_stop(timer->k_handle);
3036 #endif
3037 pending = TRUE;
3038 } else
3039 pending = KeRemoveQueueDpc(timer->k_dpc);
3040
3041 #ifdef __FreeBSD__
3042 mtx_unlock(&ntoskrnl_dispatchlock);
3043 #else /* __NetBSD__ */
3044 DISPATCH_UNLOCK();
3045 #endif
3046
3047 return(pending);
3048 }
3049
3050 __stdcall uint8_t
3051 KeReadStateTimer(timer)
3052 ktimer *timer;
3053 {
3054 return(timer->k_header.dh_sigstate);
3055 }
3056
3057 __stdcall static void
3058 dummy()
3059 {
3060 printf ("ntoskrnl dummy called...\n");
3061 return;
3062 }
3063
3064
3065 image_patch_table ntoskrnl_functbl[] = {
3066 IMPORT_FUNC(RtlCompareMemory),
3067 IMPORT_FUNC(RtlEqualUnicodeString),
3068 IMPORT_FUNC(RtlCopyUnicodeString),
3069 IMPORT_FUNC(RtlUnicodeStringToAnsiString),
3070 IMPORT_FUNC(RtlAnsiStringToUnicodeString),
3071 IMPORT_FUNC(RtlInitAnsiString),
3072 IMPORT_FUNC_MAP(RtlInitString, RtlInitAnsiString),
3073 IMPORT_FUNC(RtlInitUnicodeString),
3074 IMPORT_FUNC(RtlFreeAnsiString),
3075 IMPORT_FUNC(RtlFreeUnicodeString),
3076 IMPORT_FUNC(RtlUnicodeStringToInteger),
3077 IMPORT_FUNC(sprintf),
3078 IMPORT_FUNC(vsprintf),
3079 IMPORT_FUNC_MAP(_snprintf, snprintf),
3080 IMPORT_FUNC_MAP(_vsnprintf, vsnprintf),
3081 IMPORT_FUNC(DbgPrint),
3082 IMPORT_FUNC(DbgBreakPoint),
3083 IMPORT_FUNC(strncmp),
3084 IMPORT_FUNC(strcmp),
3085 IMPORT_FUNC(strncpy),
3086 IMPORT_FUNC(strcpy),
3087 IMPORT_FUNC(strlen),
3088 IMPORT_FUNC(memcpy),
3089 IMPORT_FUNC_MAP(memmove, ntoskrnl_memset),
3090 IMPORT_FUNC_MAP(memset, ntoskrnl_memset),
3091 IMPORT_FUNC(IoAllocateDriverObjectExtension),
3092 IMPORT_FUNC(IoGetDriverObjectExtension),
3093 IMPORT_FUNC(IofCallDriver),
3094 IMPORT_FUNC(IofCompleteRequest),
3095 IMPORT_FUNC(IoAcquireCancelSpinLock),
3096 IMPORT_FUNC(IoReleaseCancelSpinLock),
3097 IMPORT_FUNC(IoCancelIrp),
3098 IMPORT_FUNC(IoCreateDevice),
3099 IMPORT_FUNC(IoDeleteDevice),
3100 IMPORT_FUNC(IoGetAttachedDevice),
3101 IMPORT_FUNC(IoAttachDeviceToDeviceStack),
3102 IMPORT_FUNC(IoDetachDevice),
3103 IMPORT_FUNC(IoBuildSynchronousFsdRequest),
3104 IMPORT_FUNC(IoBuildAsynchronousFsdRequest),
3105 IMPORT_FUNC(IoBuildDeviceIoControlRequest),
3106 IMPORT_FUNC(IoAllocateIrp),
3107 IMPORT_FUNC(IoReuseIrp),
3108 IMPORT_FUNC(IoMakeAssociatedIrp),
3109 IMPORT_FUNC(IoFreeIrp),
3110 IMPORT_FUNC(IoInitializeIrp),
3111 IMPORT_FUNC(KeWaitForSingleObject),
3112 IMPORT_FUNC(KeWaitForMultipleObjects),
3113 IMPORT_FUNC(_allmul),
3114 IMPORT_FUNC(_alldiv),
3115 IMPORT_FUNC(_allrem),
3116 IMPORT_FUNC(_allshr),
3117 IMPORT_FUNC(_allshl),
3118 IMPORT_FUNC(_aullmul),
3119 IMPORT_FUNC(_aulldiv),
3120 IMPORT_FUNC(_aullrem),
3121 IMPORT_FUNC(_aullshr),
3122 IMPORT_FUNC(_aullshl),
3123 IMPORT_FUNC(atoi),
3124 IMPORT_FUNC(atol),
3125 IMPORT_FUNC(rand),
3126 IMPORT_FUNC(srand),
3127 IMPORT_FUNC(WRITE_REGISTER_USHORT),
3128 IMPORT_FUNC(READ_REGISTER_USHORT),
3129 IMPORT_FUNC(WRITE_REGISTER_ULONG),
3130 IMPORT_FUNC(READ_REGISTER_ULONG),
3131 IMPORT_FUNC(READ_REGISTER_UCHAR),
3132 IMPORT_FUNC(WRITE_REGISTER_UCHAR),
3133 IMPORT_FUNC(ExInitializePagedLookasideList),
3134 IMPORT_FUNC(ExDeletePagedLookasideList),
3135 IMPORT_FUNC(ExInitializeNPagedLookasideList),
3136 IMPORT_FUNC(ExDeleteNPagedLookasideList),
3137 IMPORT_FUNC(InterlockedPopEntrySList),
3138 IMPORT_FUNC(InterlockedPushEntrySList),
3139 IMPORT_FUNC(ExQueryDepthSList),
3140 IMPORT_FUNC_MAP(ExpInterlockedPopEntrySList, InterlockedPopEntrySList),
3141 IMPORT_FUNC_MAP(ExpInterlockedPushEntrySList,
3142 InterlockedPushEntrySList),
3143 IMPORT_FUNC(ExInterlockedPopEntrySList),
3144 IMPORT_FUNC(ExInterlockedPushEntrySList),
3145 IMPORT_FUNC(ExAllocatePoolWithTag),
3146 IMPORT_FUNC(ExFreePool),
3147 #ifdef __i386__
3148 IMPORT_FUNC(KefAcquireSpinLockAtDpcLevel),
3149 IMPORT_FUNC(KefReleaseSpinLockFromDpcLevel),
3150 IMPORT_FUNC(KeAcquireSpinLockRaiseToDpc),
3151 #else
3152 /*
3153 * For AMD64, we can get away with just mapping
3154 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
3155 * because the calling conventions end up being the same.
3156 * On i386, we have to be careful because KfAcquireSpinLock()
3157 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
3158 */
3159 IMPORT_FUNC(KeAcquireSpinLockAtDpcLevel),
3160 IMPORT_FUNC(KeReleaseSpinLockFromDpcLevel),
3161 IMPORT_FUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock),
3162 #endif
3163 IMPORT_FUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock),
3164 IMPORT_FUNC(InterlockedIncrement),
3165 IMPORT_FUNC(InterlockedDecrement),
3166 IMPORT_FUNC(ExInterlockedAddLargeStatistic),
3167 IMPORT_FUNC(IoAllocateMdl),
3168 IMPORT_FUNC(IoFreeMdl),
3169 IMPORT_FUNC(MmSizeOfMdl),
3170 IMPORT_FUNC(MmMapLockedPages),
3171 IMPORT_FUNC(MmMapLockedPagesSpecifyCache),
3172 IMPORT_FUNC(MmUnmapLockedPages),
3173 IMPORT_FUNC(MmBuildMdlForNonPagedPool),
3174 IMPORT_FUNC(KeInitializeSpinLock),
3175 IMPORT_FUNC(IoIsWdmVersionAvailable),
3176 IMPORT_FUNC(IoGetDeviceProperty),
3177 IMPORT_FUNC(KeInitializeMutex),
3178 IMPORT_FUNC(KeReleaseMutex),
3179 IMPORT_FUNC(KeReadStateMutex),
3180 IMPORT_FUNC(KeInitializeEvent),
3181 IMPORT_FUNC(KeSetEvent),
3182 IMPORT_FUNC(KeResetEvent),
3183 IMPORT_FUNC(KeClearEvent),
3184 IMPORT_FUNC(KeReadStateEvent),
3185 IMPORT_FUNC(KeInitializeTimer),
3186 IMPORT_FUNC(KeInitializeTimerEx),
3187 IMPORT_FUNC(KeSetTimer),
3188 IMPORT_FUNC(KeSetTimerEx),
3189 IMPORT_FUNC(KeCancelTimer),
3190 IMPORT_FUNC(KeReadStateTimer),
3191 IMPORT_FUNC(KeInitializeDpc),
3192 IMPORT_FUNC(KeInsertQueueDpc),
3193 IMPORT_FUNC(KeRemoveQueueDpc),
3194 IMPORT_FUNC(ObReferenceObjectByHandle),
3195 IMPORT_FUNC(ObfDereferenceObject),
3196 IMPORT_FUNC(ZwClose),
3197 IMPORT_FUNC(PsCreateSystemThread),
3198 IMPORT_FUNC(PsTerminateSystemThread),
3199
3200 /*
3201 * This last entry is a catch-all for any function we haven't
3202 * implemented yet. The PE import list patching routine will
3203 * use it for any function that doesn't have an explicit match
3204 * in this table.
3205 */
3206
3207 { NULL, (FUNC)dummy, NULL },
3208
3209 /* End of list. */
3210
3211 { NULL, NULL, NULL }
3212 };
Cache object: f82543c0c19bc1dd6511b861162405d1
|