1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/8.0/sys/compat/ndis/subr_ntoskrnl.c 189942 2009-03-18 01:57:54Z weongyo $");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
48 #include <sys/kdb.h>
49 #endif
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58
59 #include <machine/atomic.h>
60 #include <machine/bus.h>
61 #include <machine/stdarg.h>
62 #include <machine/resource.h>
63
64 #include <sys/bus.h>
65 #include <sys/rman.h>
66
67 #include <vm/vm.h>
68 #include <vm/vm_param.h>
69 #include <vm/pmap.h>
70 #include <vm/uma.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_map.h>
73
74 #include <compat/ndis/pe_var.h>
75 #include <compat/ndis/cfg_var.h>
76 #include <compat/ndis/resource_var.h>
77 #include <compat/ndis/ntoskrnl_var.h>
78 #include <compat/ndis/hal_var.h>
79 #include <compat/ndis/ndis_var.h>
80
81 #ifdef NTOSKRNL_DEBUG_TIMERS
82 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
83
84 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
85 sysctl_show_timers, "I", "Show ntoskrnl timer stats");
86 #endif
87
88 struct kdpc_queue {
89 list_entry kq_disp;
90 struct thread *kq_td;
91 int kq_cpu;
92 int kq_exit;
93 int kq_running;
94 kspin_lock kq_lock;
95 nt_kevent kq_proc;
96 nt_kevent kq_done;
97 };
98
99 typedef struct kdpc_queue kdpc_queue;
100
101 struct wb_ext {
102 struct cv we_cv;
103 struct thread *we_td;
104 };
105
106 typedef struct wb_ext wb_ext;
107
108 #define NTOSKRNL_TIMEOUTS 256
109 #ifdef NTOSKRNL_DEBUG_TIMERS
110 static uint64_t ntoskrnl_timer_fires;
111 static uint64_t ntoskrnl_timer_sets;
112 static uint64_t ntoskrnl_timer_reloads;
113 static uint64_t ntoskrnl_timer_cancels;
114 #endif
115
116 struct callout_entry {
117 struct callout ce_callout;
118 list_entry ce_list;
119 };
120
121 typedef struct callout_entry callout_entry;
122
123 static struct list_entry ntoskrnl_calllist;
124 static struct mtx ntoskrnl_calllock;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130 unicode_string *, uint8_t);
131 static void RtlCopyUnicodeString(unicode_string *,
132 unicode_string *);
133 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
134 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
135 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
136 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
137 static irp *IoBuildDeviceIoControlRequest(uint32_t,
138 device_object *, void *, uint32_t, void *, uint32_t,
139 uint8_t, nt_kevent *, io_status_block *);
140 static irp *IoAllocateIrp(uint8_t, uint8_t);
141 static void IoReuseIrp(irp *, uint32_t);
142 static void IoFreeIrp(irp *);
143 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
144 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
145 static uint32_t KeWaitForMultipleObjects(uint32_t,
146 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
147 int64_t *, wait_block *);
148 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
149 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
150 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
151 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
152 static void ntoskrnl_insert_timer(ktimer *, int);
153 static void ntoskrnl_remove_timer(ktimer *);
154 #ifdef NTOSKRNL_DEBUG_TIMERS
155 static void ntoskrnl_show_timers(void);
156 #endif
157 static void ntoskrnl_timercall(void *);
158 static void ntoskrnl_dpc_thread(void *);
159 static void ntoskrnl_destroy_dpc_threads(void);
160 static void ntoskrnl_destroy_workitem_threads(void);
161 static void ntoskrnl_workitem_thread(void *);
162 static void ntoskrnl_workitem(device_object *, void *);
163 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
164 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
165 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
166 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
167 static uint16_t READ_REGISTER_USHORT(uint16_t *);
168 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
169 static uint32_t READ_REGISTER_ULONG(uint32_t *);
170 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
171 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
172 static int64_t _allmul(int64_t, int64_t);
173 static int64_t _alldiv(int64_t, int64_t);
174 static int64_t _allrem(int64_t, int64_t);
175 static int64_t _allshr(int64_t, uint8_t);
176 static int64_t _allshl(int64_t, uint8_t);
177 static uint64_t _aullmul(uint64_t, uint64_t);
178 static uint64_t _aulldiv(uint64_t, uint64_t);
179 static uint64_t _aullrem(uint64_t, uint64_t);
180 static uint64_t _aullshr(uint64_t, uint8_t);
181 static uint64_t _aullshl(uint64_t, uint8_t);
182 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
183 static slist_entry *ntoskrnl_popsl(slist_header *);
184 static void ExInitializePagedLookasideList(paged_lookaside_list *,
185 lookaside_alloc_func *, lookaside_free_func *,
186 uint32_t, size_t, uint32_t, uint16_t);
187 static void ExDeletePagedLookasideList(paged_lookaside_list *);
188 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
189 lookaside_alloc_func *, lookaside_free_func *,
190 uint32_t, size_t, uint32_t, uint16_t);
191 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
192 static slist_entry
193 *ExInterlockedPushEntrySList(slist_header *,
194 slist_entry *, kspin_lock *);
195 static slist_entry
196 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
197 static uint32_t InterlockedIncrement(volatile uint32_t *);
198 static uint32_t InterlockedDecrement(volatile uint32_t *);
199 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
200 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
201 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
202 uint64_t, uint64_t, uint64_t, uint32_t);
203 static void MmFreeContiguousMemory(void *);
204 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
205 static uint32_t MmSizeOfMdl(void *, size_t);
206 static void *MmMapLockedPages(mdl *, uint8_t);
207 static void *MmMapLockedPagesSpecifyCache(mdl *,
208 uint8_t, uint32_t, void *, uint32_t, uint32_t);
209 static void MmUnmapLockedPages(void *, mdl *);
210 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
211 static void RtlZeroMemory(void *, size_t);
212 static void RtlCopyMemory(void *, const void *, size_t);
213 static size_t RtlCompareMemory(const void *, const void *, size_t);
214 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
215 uint32_t, uint32_t *);
216 static int atoi (const char *);
217 static long atol (const char *);
218 static int rand(void);
219 static void srand(unsigned int);
220 static void KeQuerySystemTime(uint64_t *);
221 static uint32_t KeTickCount(void);
222 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
223 static void ntoskrnl_thrfunc(void *);
224 static ndis_status PsCreateSystemThread(ndis_handle *,
225 uint32_t, void *, ndis_handle, void *, void *, void *);
226 static ndis_status PsTerminateSystemThread(ndis_status);
227 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
228 uint32_t, void *, device_object *);
229 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
230 uint32_t, void *, uint32_t *);
231 static void KeInitializeMutex(kmutant *, uint32_t);
232 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
233 static uint32_t KeReadStateMutex(kmutant *);
234 static ndis_status ObReferenceObjectByHandle(ndis_handle,
235 uint32_t, void *, uint8_t, void **, void **);
236 static void ObfDereferenceObject(void *);
237 static uint32_t ZwClose(ndis_handle);
238 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
239 uint32_t, void *);
240 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
241 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
242 static void *ntoskrnl_memset(void *, int, size_t);
243 static void *ntoskrnl_memmove(void *, void *, size_t);
244 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
245 static char *ntoskrnl_strstr(char *, char *);
246 static char *ntoskrnl_strncat(char *, char *, size_t);
247 static int ntoskrnl_toupper(int);
248 static int ntoskrnl_tolower(int);
249 static funcptr ntoskrnl_findwrap(funcptr);
250 static uint32_t DbgPrint(char *, ...);
251 static void DbgBreakPoint(void);
252 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
253 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
254 static int32_t KeSetPriorityThread(struct thread *, int32_t);
255 static void dummy(void);
256
257 static struct mtx ntoskrnl_dispatchlock;
258 static struct mtx ntoskrnl_interlock;
259 static kspin_lock ntoskrnl_cancellock;
260 static int ntoskrnl_kth = 0;
261 static struct nt_objref_head ntoskrnl_reflist;
262 static uma_zone_t mdl_zone;
263 static uma_zone_t iw_zone;
264 static struct kdpc_queue *kq_queues;
265 static struct kdpc_queue *wq_queues;
266 static int wq_idx = 0;
267
268 int
269 ntoskrnl_libinit()
270 {
271 image_patch_table *patch;
272 int error;
273 struct proc *p;
274 kdpc_queue *kq;
275 callout_entry *e;
276 int i;
277 char name[64];
278
279 mtx_init(&ntoskrnl_dispatchlock,
280 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
281 mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
282 KeInitializeSpinLock(&ntoskrnl_cancellock);
283 KeInitializeSpinLock(&ntoskrnl_intlock);
284 TAILQ_INIT(&ntoskrnl_reflist);
285
286 InitializeListHead(&ntoskrnl_calllist);
287 InitializeListHead(&ntoskrnl_intlist);
288 mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
289
290 kq_queues = ExAllocatePoolWithTag(NonPagedPool,
291 #ifdef NTOSKRNL_MULTIPLE_DPCS
292 sizeof(kdpc_queue) * mp_ncpus, 0);
293 #else
294 sizeof(kdpc_queue), 0);
295 #endif
296
297 if (kq_queues == NULL)
298 return(ENOMEM);
299
300 wq_queues = ExAllocatePoolWithTag(NonPagedPool,
301 sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
302
303 if (wq_queues == NULL)
304 return(ENOMEM);
305
306 #ifdef NTOSKRNL_MULTIPLE_DPCS
307 bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
308 #else
309 bzero((char *)kq_queues, sizeof(kdpc_queue));
310 #endif
311 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
312
313 /*
314 * Launch the DPC threads.
315 */
316
317 #ifdef NTOSKRNL_MULTIPLE_DPCS
318 for (i = 0; i < mp_ncpus; i++) {
319 #else
320 for (i = 0; i < 1; i++) {
321 #endif
322 kq = kq_queues + i;
323 kq->kq_cpu = i;
324 sprintf(name, "Windows DPC %d", i);
325 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
326 RFHIGHPID, NDIS_KSTACK_PAGES, name);
327 if (error)
328 panic("failed to launch DPC thread");
329 }
330
331 /*
332 * Launch the workitem threads.
333 */
334
335 for (i = 0; i < WORKITEM_THREADS; i++) {
336 kq = wq_queues + i;
337 sprintf(name, "Windows Workitem %d", i);
338 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
339 RFHIGHPID, NDIS_KSTACK_PAGES, name);
340 if (error)
341 panic("failed to launch workitem thread");
342 }
343
344 patch = ntoskrnl_functbl;
345 while (patch->ipt_func != NULL) {
346 windrv_wrap((funcptr)patch->ipt_func,
347 (funcptr *)&patch->ipt_wrap,
348 patch->ipt_argcnt, patch->ipt_ftype);
349 patch++;
350 }
351
352 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
353 e = ExAllocatePoolWithTag(NonPagedPool,
354 sizeof(callout_entry), 0);
355 if (e == NULL)
356 panic("failed to allocate timeouts");
357 mtx_lock_spin(&ntoskrnl_calllock);
358 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
359 mtx_unlock_spin(&ntoskrnl_calllock);
360 }
361
362 /*
363 * MDLs are supposed to be variable size (they describe
364 * buffers containing some number of pages, but we don't
365 * know ahead of time how many pages that will be). But
366 * always allocating them off the heap is very slow. As
367 * a compromise, we create an MDL UMA zone big enough to
368 * handle any buffer requiring up to 16 pages, and we
369 * use those for any MDLs for buffers of 16 pages or less
370 * in size. For buffers larger than that (which we assume
371 * will be few and far between, we allocate the MDLs off
372 * the heap.
373 */
374
375 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
376 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
377
378 iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
379 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
380
381 return(0);
382 }
383
384 int
385 ntoskrnl_libfini()
386 {
387 image_patch_table *patch;
388 callout_entry *e;
389 list_entry *l;
390
391 patch = ntoskrnl_functbl;
392 while (patch->ipt_func != NULL) {
393 windrv_unwrap(patch->ipt_wrap);
394 patch++;
395 }
396
397 /* Stop the workitem queues. */
398 ntoskrnl_destroy_workitem_threads();
399 /* Stop the DPC queues. */
400 ntoskrnl_destroy_dpc_threads();
401
402 ExFreePool(kq_queues);
403 ExFreePool(wq_queues);
404
405 uma_zdestroy(mdl_zone);
406 uma_zdestroy(iw_zone);
407
408 mtx_lock_spin(&ntoskrnl_calllock);
409 while(!IsListEmpty(&ntoskrnl_calllist)) {
410 l = RemoveHeadList(&ntoskrnl_calllist);
411 e = CONTAINING_RECORD(l, callout_entry, ce_list);
412 mtx_unlock_spin(&ntoskrnl_calllock);
413 ExFreePool(e);
414 mtx_lock_spin(&ntoskrnl_calllock);
415 }
416 mtx_unlock_spin(&ntoskrnl_calllock);
417
418 mtx_destroy(&ntoskrnl_dispatchlock);
419 mtx_destroy(&ntoskrnl_interlock);
420 mtx_destroy(&ntoskrnl_calllock);
421
422 return(0);
423 }
424
425 /*
426 * We need to be able to reference this externally from the wrapper;
427 * GCC only generates a local implementation of memset.
428 */
429 static void *
430 ntoskrnl_memset(buf, ch, size)
431 void *buf;
432 int ch;
433 size_t size;
434 {
435 return(memset(buf, ch, size));
436 }
437
438 static void *
439 ntoskrnl_memmove(dst, src, size)
440 void *src;
441 void *dst;
442 size_t size;
443 {
444 bcopy(src, dst, size);
445 return(dst);
446 }
447
448 static void *
449 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
450 {
451 if (len != 0) {
452 unsigned char *p = buf;
453
454 do {
455 if (*p++ == ch)
456 return (p - 1);
457 } while (--len != 0);
458 }
459 return (NULL);
460 }
461
462 static char *
463 ntoskrnl_strstr(s, find)
464 char *s, *find;
465 {
466 char c, sc;
467 size_t len;
468
469 if ((c = *find++) != 0) {
470 len = strlen(find);
471 do {
472 do {
473 if ((sc = *s++) == 0)
474 return (NULL);
475 } while (sc != c);
476 } while (strncmp(s, find, len) != 0);
477 s--;
478 }
479 return ((char *)s);
480 }
481
482 /* Taken from libc */
483 static char *
484 ntoskrnl_strncat(dst, src, n)
485 char *dst;
486 char *src;
487 size_t n;
488 {
489 if (n != 0) {
490 char *d = dst;
491 const char *s = src;
492
493 while (*d != 0)
494 d++;
495 do {
496 if ((*d = *s++) == 0)
497 break;
498 d++;
499 } while (--n != 0);
500 *d = 0;
501 }
502 return (dst);
503 }
504
505 static int
506 ntoskrnl_toupper(c)
507 int c;
508 {
509 return(toupper(c));
510 }
511
512 static int
513 ntoskrnl_tolower(c)
514 int c;
515 {
516 return(tolower(c));
517 }
518
519 static uint8_t
520 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
521 uint8_t caseinsensitive)
522 {
523 int i;
524
525 if (str1->us_len != str2->us_len)
526 return(FALSE);
527
528 for (i = 0; i < str1->us_len; i++) {
529 if (caseinsensitive == TRUE) {
530 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
531 toupper((char)(str2->us_buf[i] & 0xFF)))
532 return(FALSE);
533 } else {
534 if (str1->us_buf[i] != str2->us_buf[i])
535 return(FALSE);
536 }
537 }
538
539 return(TRUE);
540 }
541
542 static void
543 RtlCopyUnicodeString(dest, src)
544 unicode_string *dest;
545 unicode_string *src;
546 {
547
548 if (dest->us_maxlen >= src->us_len)
549 dest->us_len = src->us_len;
550 else
551 dest->us_len = dest->us_maxlen;
552 memcpy(dest->us_buf, src->us_buf, dest->us_len);
553 return;
554 }
555
556 static void
557 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
558 char *ascii;
559 uint16_t *unicode;
560 int len;
561 {
562 int i;
563 uint16_t *ustr;
564
565 ustr = unicode;
566 for (i = 0; i < len; i++) {
567 *ustr = (uint16_t)ascii[i];
568 ustr++;
569 }
570
571 return;
572 }
573
574 static void
575 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
576 uint16_t *unicode;
577 char *ascii;
578 int len;
579 {
580 int i;
581 uint8_t *astr;
582
583 astr = ascii;
584 for (i = 0; i < len / 2; i++) {
585 *astr = (uint8_t)unicode[i];
586 astr++;
587 }
588
589 return;
590 }
591
592 uint32_t
593 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
594 {
595 if (dest == NULL || src == NULL)
596 return(STATUS_INVALID_PARAMETER);
597
598 dest->as_len = src->us_len / 2;
599 if (dest->as_maxlen < dest->as_len)
600 dest->as_len = dest->as_maxlen;
601
602 if (allocate == TRUE) {
603 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
604 (src->us_len / 2) + 1, 0);
605 if (dest->as_buf == NULL)
606 return(STATUS_INSUFFICIENT_RESOURCES);
607 dest->as_len = dest->as_maxlen = src->us_len / 2;
608 } else {
609 dest->as_len = src->us_len / 2; /* XXX */
610 if (dest->as_maxlen < dest->as_len)
611 dest->as_len = dest->as_maxlen;
612 }
613
614 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
615 dest->as_len * 2);
616
617 return (STATUS_SUCCESS);
618 }
619
620 uint32_t
621 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
622 uint8_t allocate)
623 {
624 if (dest == NULL || src == NULL)
625 return(STATUS_INVALID_PARAMETER);
626
627 if (allocate == TRUE) {
628 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
629 src->as_len * 2, 0);
630 if (dest->us_buf == NULL)
631 return(STATUS_INSUFFICIENT_RESOURCES);
632 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
633 } else {
634 dest->us_len = src->as_len * 2; /* XXX */
635 if (dest->us_maxlen < dest->us_len)
636 dest->us_len = dest->us_maxlen;
637 }
638
639 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
640 dest->us_len / 2);
641
642 return (STATUS_SUCCESS);
643 }
644
645 void *
646 ExAllocatePoolWithTag(pooltype, len, tag)
647 uint32_t pooltype;
648 size_t len;
649 uint32_t tag;
650 {
651 void *buf;
652
653 buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
654 if (buf == NULL)
655 return(NULL);
656
657 return(buf);
658 }
659
660 void
661 ExFreePool(buf)
662 void *buf;
663 {
664 free(buf, M_DEVBUF);
665 return;
666 }
667
668 uint32_t
669 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
670 driver_object *drv;
671 void *clid;
672 uint32_t extlen;
673 void **ext;
674 {
675 custom_extension *ce;
676
677 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
678 + extlen, 0);
679
680 if (ce == NULL)
681 return(STATUS_INSUFFICIENT_RESOURCES);
682
683 ce->ce_clid = clid;
684 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
685
686 *ext = (void *)(ce + 1);
687
688 return(STATUS_SUCCESS);
689 }
690
691 void *
692 IoGetDriverObjectExtension(drv, clid)
693 driver_object *drv;
694 void *clid;
695 {
696 list_entry *e;
697 custom_extension *ce;
698
699 /*
700 * Sanity check. Our dummy bus drivers don't have
701 * any driver extentions.
702 */
703
704 if (drv->dro_driverext == NULL)
705 return(NULL);
706
707 e = drv->dro_driverext->dre_usrext.nle_flink;
708 while (e != &drv->dro_driverext->dre_usrext) {
709 ce = (custom_extension *)e;
710 if (ce->ce_clid == clid)
711 return((void *)(ce + 1));
712 e = e->nle_flink;
713 }
714
715 return(NULL);
716 }
717
718
719 uint32_t
720 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
721 uint32_t devtype, uint32_t devchars, uint8_t exclusive,
722 device_object **newdev)
723 {
724 device_object *dev;
725
726 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
727 if (dev == NULL)
728 return(STATUS_INSUFFICIENT_RESOURCES);
729
730 dev->do_type = devtype;
731 dev->do_drvobj = drv;
732 dev->do_currirp = NULL;
733 dev->do_flags = 0;
734
735 if (devextlen) {
736 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
737 devextlen, 0);
738
739 if (dev->do_devext == NULL) {
740 ExFreePool(dev);
741 return(STATUS_INSUFFICIENT_RESOURCES);
742 }
743
744 bzero(dev->do_devext, devextlen);
745 } else
746 dev->do_devext = NULL;
747
748 dev->do_size = sizeof(device_object) + devextlen;
749 dev->do_refcnt = 1;
750 dev->do_attacheddev = NULL;
751 dev->do_nextdev = NULL;
752 dev->do_devtype = devtype;
753 dev->do_stacksize = 1;
754 dev->do_alignreq = 1;
755 dev->do_characteristics = devchars;
756 dev->do_iotimer = NULL;
757 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
758
759 /*
760 * Vpd is used for disk/tape devices,
761 * but we don't support those. (Yet.)
762 */
763 dev->do_vpb = NULL;
764
765 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
766 sizeof(devobj_extension), 0);
767
768 if (dev->do_devobj_ext == NULL) {
769 if (dev->do_devext != NULL)
770 ExFreePool(dev->do_devext);
771 ExFreePool(dev);
772 return(STATUS_INSUFFICIENT_RESOURCES);
773 }
774
775 dev->do_devobj_ext->dve_type = 0;
776 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
777 dev->do_devobj_ext->dve_devobj = dev;
778
779 /*
780 * Attach this device to the driver object's list
781 * of devices. Note: this is not the same as attaching
782 * the device to the device stack. The driver's AddDevice
783 * routine must explicitly call IoAddDeviceToDeviceStack()
784 * to do that.
785 */
786
787 if (drv->dro_devobj == NULL) {
788 drv->dro_devobj = dev;
789 dev->do_nextdev = NULL;
790 } else {
791 dev->do_nextdev = drv->dro_devobj;
792 drv->dro_devobj = dev;
793 }
794
795 *newdev = dev;
796
797 return(STATUS_SUCCESS);
798 }
799
800 void
801 IoDeleteDevice(dev)
802 device_object *dev;
803 {
804 device_object *prev;
805
806 if (dev == NULL)
807 return;
808
809 if (dev->do_devobj_ext != NULL)
810 ExFreePool(dev->do_devobj_ext);
811
812 if (dev->do_devext != NULL)
813 ExFreePool(dev->do_devext);
814
815 /* Unlink the device from the driver's device list. */
816
817 prev = dev->do_drvobj->dro_devobj;
818 if (prev == dev)
819 dev->do_drvobj->dro_devobj = dev->do_nextdev;
820 else {
821 while (prev->do_nextdev != dev)
822 prev = prev->do_nextdev;
823 prev->do_nextdev = dev->do_nextdev;
824 }
825
826 ExFreePool(dev);
827
828 return;
829 }
830
831 device_object *
832 IoGetAttachedDevice(dev)
833 device_object *dev;
834 {
835 device_object *d;
836
837 if (dev == NULL)
838 return (NULL);
839
840 d = dev;
841
842 while (d->do_attacheddev != NULL)
843 d = d->do_attacheddev;
844
845 return (d);
846 }
847
848 static irp *
849 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
850 uint32_t func;
851 device_object *dobj;
852 void *buf;
853 uint32_t len;
854 uint64_t *off;
855 nt_kevent *event;
856 io_status_block *status;
857 {
858 irp *ip;
859
860 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
861 if (ip == NULL)
862 return(NULL);
863 ip->irp_usrevent = event;
864
865 return(ip);
866 }
867
868 static irp *
869 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
870 uint32_t func;
871 device_object *dobj;
872 void *buf;
873 uint32_t len;
874 uint64_t *off;
875 io_status_block *status;
876 {
877 irp *ip;
878 io_stack_location *sl;
879
880 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
881 if (ip == NULL)
882 return(NULL);
883
884 ip->irp_usriostat = status;
885 ip->irp_tail.irp_overlay.irp_thread = NULL;
886
887 sl = IoGetNextIrpStackLocation(ip);
888 sl->isl_major = func;
889 sl->isl_minor = 0;
890 sl->isl_flags = 0;
891 sl->isl_ctl = 0;
892 sl->isl_devobj = dobj;
893 sl->isl_fileobj = NULL;
894 sl->isl_completionfunc = NULL;
895
896 ip->irp_userbuf = buf;
897
898 if (dobj->do_flags & DO_BUFFERED_IO) {
899 ip->irp_assoc.irp_sysbuf =
900 ExAllocatePoolWithTag(NonPagedPool, len, 0);
901 if (ip->irp_assoc.irp_sysbuf == NULL) {
902 IoFreeIrp(ip);
903 return(NULL);
904 }
905 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
906 }
907
908 if (dobj->do_flags & DO_DIRECT_IO) {
909 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
910 if (ip->irp_mdl == NULL) {
911 if (ip->irp_assoc.irp_sysbuf != NULL)
912 ExFreePool(ip->irp_assoc.irp_sysbuf);
913 IoFreeIrp(ip);
914 return(NULL);
915 }
916 ip->irp_userbuf = NULL;
917 ip->irp_assoc.irp_sysbuf = NULL;
918 }
919
920 if (func == IRP_MJ_READ) {
921 sl->isl_parameters.isl_read.isl_len = len;
922 if (off != NULL)
923 sl->isl_parameters.isl_read.isl_byteoff = *off;
924 else
925 sl->isl_parameters.isl_read.isl_byteoff = 0;
926 }
927
928 if (func == IRP_MJ_WRITE) {
929 sl->isl_parameters.isl_write.isl_len = len;
930 if (off != NULL)
931 sl->isl_parameters.isl_write.isl_byteoff = *off;
932 else
933 sl->isl_parameters.isl_write.isl_byteoff = 0;
934 }
935
936 return(ip);
937 }
938
939 static irp *
940 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
941 uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
942 nt_kevent *event, io_status_block *status)
943 {
944 irp *ip;
945 io_stack_location *sl;
946 uint32_t buflen;
947
948 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
949 if (ip == NULL)
950 return(NULL);
951 ip->irp_usrevent = event;
952 ip->irp_usriostat = status;
953 ip->irp_tail.irp_overlay.irp_thread = NULL;
954
955 sl = IoGetNextIrpStackLocation(ip);
956 sl->isl_major = isinternal == TRUE ?
957 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
958 sl->isl_minor = 0;
959 sl->isl_flags = 0;
960 sl->isl_ctl = 0;
961 sl->isl_devobj = dobj;
962 sl->isl_fileobj = NULL;
963 sl->isl_completionfunc = NULL;
964 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
965 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
966 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
967
968 switch(IO_METHOD(iocode)) {
969 case METHOD_BUFFERED:
970 if (ilen > olen)
971 buflen = ilen;
972 else
973 buflen = olen;
974 if (buflen) {
975 ip->irp_assoc.irp_sysbuf =
976 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
977 if (ip->irp_assoc.irp_sysbuf == NULL) {
978 IoFreeIrp(ip);
979 return(NULL);
980 }
981 }
982 if (ilen && ibuf != NULL) {
983 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
984 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
985 buflen - ilen);
986 } else
987 bzero(ip->irp_assoc.irp_sysbuf, ilen);
988 ip->irp_userbuf = obuf;
989 break;
990 case METHOD_IN_DIRECT:
991 case METHOD_OUT_DIRECT:
992 if (ilen && ibuf != NULL) {
993 ip->irp_assoc.irp_sysbuf =
994 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
995 if (ip->irp_assoc.irp_sysbuf == NULL) {
996 IoFreeIrp(ip);
997 return(NULL);
998 }
999 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
1000 }
1001 if (olen && obuf != NULL) {
1002 ip->irp_mdl = IoAllocateMdl(obuf, olen,
1003 FALSE, FALSE, ip);
1004 /*
1005 * Normally we would MmProbeAndLockPages()
1006 * here, but we don't have to in our
1007 * imlementation.
1008 */
1009 }
1010 break;
1011 case METHOD_NEITHER:
1012 ip->irp_userbuf = obuf;
1013 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1014 break;
1015 default:
1016 break;
1017 }
1018
1019 /*
1020 * Ideally, we should associate this IRP with the calling
1021 * thread here.
1022 */
1023
1024 return (ip);
1025 }
1026
1027 static irp *
1028 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1029 {
1030 irp *i;
1031
1032 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1033 if (i == NULL)
1034 return (NULL);
1035
1036 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1037
1038 return (i);
1039 }
1040
1041 static irp *
1042 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1043 {
1044 irp *associrp;
1045
1046 associrp = IoAllocateIrp(stsize, FALSE);
1047 if (associrp == NULL)
1048 return(NULL);
1049
1050 mtx_lock(&ntoskrnl_dispatchlock);
1051 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1052 associrp->irp_tail.irp_overlay.irp_thread =
1053 ip->irp_tail.irp_overlay.irp_thread;
1054 associrp->irp_assoc.irp_master = ip;
1055 mtx_unlock(&ntoskrnl_dispatchlock);
1056
1057 return(associrp);
1058 }
1059
1060 static void
1061 IoFreeIrp(ip)
1062 irp *ip;
1063 {
1064 ExFreePool(ip);
1065 return;
1066 }
1067
1068 static void
1069 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1070 {
1071 bzero((char *)io, IoSizeOfIrp(ssize));
1072 io->irp_size = psize;
1073 io->irp_stackcnt = ssize;
1074 io->irp_currentstackloc = ssize;
1075 InitializeListHead(&io->irp_thlist);
1076 io->irp_tail.irp_overlay.irp_csl =
1077 (io_stack_location *)(io + 1) + ssize;
1078
1079 return;
1080 }
1081
1082 static void
1083 IoReuseIrp(ip, status)
1084 irp *ip;
1085 uint32_t status;
1086 {
1087 uint8_t allocflags;
1088
1089 allocflags = ip->irp_allocflags;
1090 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1091 ip->irp_iostat.isb_status = status;
1092 ip->irp_allocflags = allocflags;
1093
1094 return;
1095 }
1096
1097 void
1098 IoAcquireCancelSpinLock(uint8_t *irql)
1099 {
1100 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1101 return;
1102 }
1103
1104 void
1105 IoReleaseCancelSpinLock(uint8_t irql)
1106 {
1107 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1108 return;
1109 }
1110
1111 uint8_t
1112 IoCancelIrp(irp *ip)
1113 {
1114 cancel_func cfunc;
1115 uint8_t cancelirql;
1116
1117 IoAcquireCancelSpinLock(&cancelirql);
1118 cfunc = IoSetCancelRoutine(ip, NULL);
1119 ip->irp_cancel = TRUE;
1120 if (cfunc == NULL) {
1121 IoReleaseCancelSpinLock(cancelirql);
1122 return(FALSE);
1123 }
1124 ip->irp_cancelirql = cancelirql;
1125 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1126 return (uint8_t)IoSetCancelValue(ip, TRUE);
1127 }
1128
1129 uint32_t
1130 IofCallDriver(dobj, ip)
1131 device_object *dobj;
1132 irp *ip;
1133 {
1134 driver_object *drvobj;
1135 io_stack_location *sl;
1136 uint32_t status;
1137 driver_dispatch disp;
1138
1139 drvobj = dobj->do_drvobj;
1140
1141 if (ip->irp_currentstackloc <= 0)
1142 panic("IoCallDriver(): out of stack locations");
1143
1144 IoSetNextIrpStackLocation(ip);
1145 sl = IoGetCurrentIrpStackLocation(ip);
1146
1147 sl->isl_devobj = dobj;
1148
1149 disp = drvobj->dro_dispatch[sl->isl_major];
1150 status = MSCALL2(disp, dobj, ip);
1151
1152 return(status);
1153 }
1154
1155 void
1156 IofCompleteRequest(irp *ip, uint8_t prioboost)
1157 {
1158 uint32_t status;
1159 device_object *dobj;
1160 io_stack_location *sl;
1161 completion_func cf;
1162
1163 KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1164 ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1165
1166 sl = IoGetCurrentIrpStackLocation(ip);
1167 IoSkipCurrentIrpStackLocation(ip);
1168
1169 do {
1170 if (sl->isl_ctl & SL_PENDING_RETURNED)
1171 ip->irp_pendingreturned = TRUE;
1172
1173 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1174 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1175 else
1176 dobj = NULL;
1177
1178 if (sl->isl_completionfunc != NULL &&
1179 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1180 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1181 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1182 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1183 (ip->irp_cancel == TRUE &&
1184 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1185 cf = sl->isl_completionfunc;
1186 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1187 if (status == STATUS_MORE_PROCESSING_REQUIRED)
1188 return;
1189 } else {
1190 if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1191 (ip->irp_pendingreturned == TRUE))
1192 IoMarkIrpPending(ip);
1193 }
1194
1195 /* move to the next. */
1196 IoSkipCurrentIrpStackLocation(ip);
1197 sl++;
1198 } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1199
1200 if (ip->irp_usriostat != NULL)
1201 *ip->irp_usriostat = ip->irp_iostat;
1202 if (ip->irp_usrevent != NULL)
1203 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1204
1205 /* Handle any associated IRPs. */
1206
1207 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1208 uint32_t masterirpcnt;
1209 irp *masterirp;
1210 mdl *m;
1211
1212 masterirp = ip->irp_assoc.irp_master;
1213 masterirpcnt =
1214 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1215
1216 while ((m = ip->irp_mdl) != NULL) {
1217 ip->irp_mdl = m->mdl_next;
1218 IoFreeMdl(m);
1219 }
1220 IoFreeIrp(ip);
1221 if (masterirpcnt == 0)
1222 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1223 return;
1224 }
1225
1226 /* With any luck, these conditions will never arise. */
1227
1228 if (ip->irp_flags & IRP_PAGING_IO) {
1229 if (ip->irp_mdl != NULL)
1230 IoFreeMdl(ip->irp_mdl);
1231 IoFreeIrp(ip);
1232 }
1233
1234 return;
1235 }
1236
1237 void
1238 ntoskrnl_intr(arg)
1239 void *arg;
1240 {
1241 kinterrupt *iobj;
1242 uint8_t irql;
1243 uint8_t claimed;
1244 list_entry *l;
1245
1246 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1247 l = ntoskrnl_intlist.nle_flink;
1248 while (l != &ntoskrnl_intlist) {
1249 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1250 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1251 if (claimed == TRUE)
1252 break;
1253 l = l->nle_flink;
1254 }
1255 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1256
1257 return;
1258 }
1259
1260 uint8_t
1261 KeAcquireInterruptSpinLock(iobj)
1262 kinterrupt *iobj;
1263 {
1264 uint8_t irql;
1265 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1266 return(irql);
1267 }
1268
1269 void
1270 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1271 {
1272 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1273 return;
1274 }
1275
1276 uint8_t
1277 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1278 kinterrupt *iobj;
1279 void *syncfunc;
1280 void *syncctx;
1281 {
1282 uint8_t irql;
1283
1284 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1285 MSCALL1(syncfunc, syncctx);
1286 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1287
1288 return(TRUE);
1289 }
1290
1291 /*
1292 * IoConnectInterrupt() is passed only the interrupt vector and
1293 * irql that a device wants to use, but no device-specific tag
1294 * of any kind. This conflicts rather badly with FreeBSD's
1295 * bus_setup_intr(), which needs the device_t for the device
1296 * requesting interrupt delivery. In order to bypass this
1297 * inconsistency, we implement a second level of interrupt
1298 * dispatching on top of bus_setup_intr(). All devices use
1299 * ntoskrnl_intr() as their ISR, and any device requesting
1300 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1301 * dispatch list. When an interrupt arrives, we walk the list
1302 * and invoke all the registered ISRs. This effectively makes all
1303 * interrupts shared, but it's the only way to duplicate the
1304 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1305 */
1306
1307 uint32_t
1308 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1309 kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1310 uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1311 {
1312 uint8_t curirql;
1313
1314 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1315 if (*iobj == NULL)
1316 return(STATUS_INSUFFICIENT_RESOURCES);
1317
1318 (*iobj)->ki_svcfunc = svcfunc;
1319 (*iobj)->ki_svcctx = svcctx;
1320
1321 if (lock == NULL) {
1322 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1323 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1324 } else
1325 (*iobj)->ki_lock = lock;
1326
1327 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1328 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1329 KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1330
1331 return(STATUS_SUCCESS);
1332 }
1333
1334 void
1335 IoDisconnectInterrupt(iobj)
1336 kinterrupt *iobj;
1337 {
1338 uint8_t irql;
1339
1340 if (iobj == NULL)
1341 return;
1342
1343 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1344 RemoveEntryList((&iobj->ki_list));
1345 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1346
1347 ExFreePool(iobj);
1348
1349 return;
1350 }
1351
1352 device_object *
1353 IoAttachDeviceToDeviceStack(src, dst)
1354 device_object *src;
1355 device_object *dst;
1356 {
1357 device_object *attached;
1358
1359 mtx_lock(&ntoskrnl_dispatchlock);
1360 attached = IoGetAttachedDevice(dst);
1361 attached->do_attacheddev = src;
1362 src->do_attacheddev = NULL;
1363 src->do_stacksize = attached->do_stacksize + 1;
1364 mtx_unlock(&ntoskrnl_dispatchlock);
1365
1366 return(attached);
1367 }
1368
1369 void
1370 IoDetachDevice(topdev)
1371 device_object *topdev;
1372 {
1373 device_object *tail;
1374
1375 mtx_lock(&ntoskrnl_dispatchlock);
1376
1377 /* First, break the chain. */
1378 tail = topdev->do_attacheddev;
1379 if (tail == NULL) {
1380 mtx_unlock(&ntoskrnl_dispatchlock);
1381 return;
1382 }
1383 topdev->do_attacheddev = tail->do_attacheddev;
1384 topdev->do_refcnt--;
1385
1386 /* Now reduce the stacksize count for the takm_il objects. */
1387
1388 tail = topdev->do_attacheddev;
1389 while (tail != NULL) {
1390 tail->do_stacksize--;
1391 tail = tail->do_attacheddev;
1392 }
1393
1394 mtx_unlock(&ntoskrnl_dispatchlock);
1395
1396 return;
1397 }
1398
1399 /*
1400 * For the most part, an object is considered signalled if
1401 * dh_sigstate == TRUE. The exception is for mutant objects
1402 * (mutexes), where the logic works like this:
1403 *
1404 * - If the thread already owns the object and sigstate is
1405 * less than or equal to 0, then the object is considered
1406 * signalled (recursive acquisition).
1407 * - If dh_sigstate == 1, the object is also considered
1408 * signalled.
1409 */
1410
1411 static int
1412 ntoskrnl_is_signalled(obj, td)
1413 nt_dispatch_header *obj;
1414 struct thread *td;
1415 {
1416 kmutant *km;
1417
1418 if (obj->dh_type == DISP_TYPE_MUTANT) {
1419 km = (kmutant *)obj;
1420 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1421 obj->dh_sigstate == 1)
1422 return(TRUE);
1423 return(FALSE);
1424 }
1425
1426 if (obj->dh_sigstate > 0)
1427 return(TRUE);
1428 return(FALSE);
1429 }
1430
1431 static void
1432 ntoskrnl_satisfy_wait(obj, td)
1433 nt_dispatch_header *obj;
1434 struct thread *td;
1435 {
1436 kmutant *km;
1437
1438 switch (obj->dh_type) {
1439 case DISP_TYPE_MUTANT:
1440 km = (struct kmutant *)obj;
1441 obj->dh_sigstate--;
1442 /*
1443 * If sigstate reaches 0, the mutex is now
1444 * non-signalled (the new thread owns it).
1445 */
1446 if (obj->dh_sigstate == 0) {
1447 km->km_ownerthread = td;
1448 if (km->km_abandoned == TRUE)
1449 km->km_abandoned = FALSE;
1450 }
1451 break;
1452 /* Synchronization objects get reset to unsignalled. */
1453 case DISP_TYPE_SYNCHRONIZATION_EVENT:
1454 case DISP_TYPE_SYNCHRONIZATION_TIMER:
1455 obj->dh_sigstate = 0;
1456 break;
1457 case DISP_TYPE_SEMAPHORE:
1458 obj->dh_sigstate--;
1459 break;
1460 default:
1461 break;
1462 }
1463
1464 return;
1465 }
1466
1467 static void
1468 ntoskrnl_satisfy_multiple_waits(wb)
1469 wait_block *wb;
1470 {
1471 wait_block *cur;
1472 struct thread *td;
1473
1474 cur = wb;
1475 td = wb->wb_kthread;
1476
1477 do {
1478 ntoskrnl_satisfy_wait(wb->wb_object, td);
1479 cur->wb_awakened = TRUE;
1480 cur = cur->wb_next;
1481 } while (cur != wb);
1482
1483 return;
1484 }
1485
1486 /* Always called with dispatcher lock held. */
1487 static void
1488 ntoskrnl_waittest(obj, increment)
1489 nt_dispatch_header *obj;
1490 uint32_t increment;
1491 {
1492 wait_block *w, *next;
1493 list_entry *e;
1494 struct thread *td;
1495 wb_ext *we;
1496 int satisfied;
1497
1498 /*
1499 * Once an object has been signalled, we walk its list of
1500 * wait blocks. If a wait block can be awakened, then satisfy
1501 * waits as necessary and wake the thread.
1502 *
1503 * The rules work like this:
1504 *
1505 * If a wait block is marked as WAITTYPE_ANY, then
1506 * we can satisfy the wait conditions on the current
1507 * object and wake the thread right away. Satisfying
1508 * the wait also has the effect of breaking us out
1509 * of the search loop.
1510 *
1511 * If the object is marked as WAITTYLE_ALL, then the
1512 * wait block will be part of a circularly linked
1513 * list of wait blocks belonging to a waiting thread
1514 * that's sleeping in KeWaitForMultipleObjects(). In
1515 * order to wake the thread, all the objects in the
1516 * wait list must be in the signalled state. If they
1517 * are, we then satisfy all of them and wake the
1518 * thread.
1519 *
1520 */
1521
1522 e = obj->dh_waitlisthead.nle_flink;
1523
1524 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1525 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1526 we = w->wb_ext;
1527 td = we->we_td;
1528 satisfied = FALSE;
1529 if (w->wb_waittype == WAITTYPE_ANY) {
1530 /*
1531 * Thread can be awakened if
1532 * any wait is satisfied.
1533 */
1534 ntoskrnl_satisfy_wait(obj, td);
1535 satisfied = TRUE;
1536 w->wb_awakened = TRUE;
1537 } else {
1538 /*
1539 * Thread can only be woken up
1540 * if all waits are satisfied.
1541 * If the thread is waiting on multiple
1542 * objects, they should all be linked
1543 * through the wb_next pointers in the
1544 * wait blocks.
1545 */
1546 satisfied = TRUE;
1547 next = w->wb_next;
1548 while (next != w) {
1549 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1550 satisfied = FALSE;
1551 break;
1552 }
1553 next = next->wb_next;
1554 }
1555 ntoskrnl_satisfy_multiple_waits(w);
1556 }
1557
1558 if (satisfied == TRUE)
1559 cv_broadcastpri(&we->we_cv,
1560 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1561 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1562
1563 e = e->nle_flink;
1564 }
1565
1566 return;
1567 }
1568
1569 /*
1570 * Return the number of 100 nanosecond intervals since
1571 * January 1, 1601. (?!?!)
1572 */
1573 void
1574 ntoskrnl_time(tval)
1575 uint64_t *tval;
1576 {
1577 struct timespec ts;
1578
1579 nanotime(&ts);
1580 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1581 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1582
1583 return;
1584 }
1585
1586 static void
1587 KeQuerySystemTime(current_time)
1588 uint64_t *current_time;
1589 {
1590 ntoskrnl_time(current_time);
1591 }
1592
1593 static uint32_t
1594 KeTickCount(void)
1595 {
1596 struct timeval tv;
1597 getmicrouptime(&tv);
1598 return tvtohz(&tv);
1599 }
1600
1601
1602 /*
1603 * KeWaitForSingleObject() is a tricky beast, because it can be used
1604 * with several different object types: semaphores, timers, events,
1605 * mutexes and threads. Semaphores don't appear very often, but the
1606 * other object types are quite common. KeWaitForSingleObject() is
1607 * what's normally used to acquire a mutex, and it can be used to
1608 * wait for a thread termination.
1609 *
1610 * The Windows NDIS API is implemented in terms of Windows kernel
1611 * primitives, and some of the object manipulation is duplicated in
1612 * NDIS. For example, NDIS has timers and events, which are actually
1613 * Windows kevents and ktimers. Now, you're supposed to only use the
1614 * NDIS variants of these objects within the confines of the NDIS API,
1615 * but there are some naughty developers out there who will use
1616 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1617 * have to support that as well. Conseqently, our NDIS timer and event
1618 * code has to be closely tied into our ntoskrnl timer and event code,
1619 * just as it is in Windows.
1620 *
1621 * KeWaitForSingleObject() may do different things for different kinds
1622 * of objects:
1623 *
1624 * - For events, we check if the event has been signalled. If the
1625 * event is already in the signalled state, we just return immediately,
1626 * otherwise we wait for it to be set to the signalled state by someone
1627 * else calling KeSetEvent(). Events can be either synchronization or
1628 * notification events.
1629 *
1630 * - For timers, if the timer has already fired and the timer is in
1631 * the signalled state, we just return, otherwise we wait on the
1632 * timer. Unlike an event, timers get signalled automatically when
1633 * they expire rather than someone having to trip them manually.
1634 * Timers initialized with KeInitializeTimer() are always notification
1635 * events: KeInitializeTimerEx() lets you initialize a timer as
1636 * either a notification or synchronization event.
1637 *
1638 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1639 * on the mutex until it's available and then grab it. When a mutex is
1640 * released, it enters the signalled state, which wakes up one of the
1641 * threads waiting to acquire it. Mutexes are always synchronization
1642 * events.
1643 *
1644 * - For threads, the only thing we do is wait until the thread object
1645 * enters a signalled state, which occurs when the thread terminates.
1646 * Threads are always notification events.
1647 *
1648 * A notification event wakes up all threads waiting on an object. A
1649 * synchronization event wakes up just one. Also, a synchronization event
1650 * is auto-clearing, which means we automatically set the event back to
1651 * the non-signalled state once the wakeup is done.
1652 */
1653
1654 uint32_t
1655 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1656 uint8_t alertable, int64_t *duetime)
1657 {
1658 wait_block w;
1659 struct thread *td = curthread;
1660 struct timeval tv;
1661 int error = 0;
1662 uint64_t curtime;
1663 wb_ext we;
1664 nt_dispatch_header *obj;
1665
1666 obj = arg;
1667
1668 if (obj == NULL)
1669 return(STATUS_INVALID_PARAMETER);
1670
1671 mtx_lock(&ntoskrnl_dispatchlock);
1672
1673 cv_init(&we.we_cv, "KeWFS");
1674 we.we_td = td;
1675
1676 /*
1677 * Check to see if this object is already signalled,
1678 * and just return without waiting if it is.
1679 */
1680 if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1681 /* Sanity check the signal state value. */
1682 if (obj->dh_sigstate != INT32_MIN) {
1683 ntoskrnl_satisfy_wait(obj, curthread);
1684 mtx_unlock(&ntoskrnl_dispatchlock);
1685 return (STATUS_SUCCESS);
1686 } else {
1687 /*
1688 * There's a limit to how many times we can
1689 * recursively acquire a mutant. If we hit
1690 * the limit, something is very wrong.
1691 */
1692 if (obj->dh_type == DISP_TYPE_MUTANT) {
1693 mtx_unlock(&ntoskrnl_dispatchlock);
1694 panic("mutant limit exceeded");
1695 }
1696 }
1697 }
1698
1699 bzero((char *)&w, sizeof(wait_block));
1700 w.wb_object = obj;
1701 w.wb_ext = &we;
1702 w.wb_waittype = WAITTYPE_ANY;
1703 w.wb_next = &w;
1704 w.wb_waitkey = 0;
1705 w.wb_awakened = FALSE;
1706 w.wb_oldpri = td->td_priority;
1707
1708 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1709
1710 /*
1711 * The timeout value is specified in 100 nanosecond units
1712 * and can be a positive or negative number. If it's positive,
1713 * then the duetime is absolute, and we need to convert it
1714 * to an absolute offset relative to now in order to use it.
1715 * If it's negative, then the duetime is relative and we
1716 * just have to convert the units.
1717 */
1718
1719 if (duetime != NULL) {
1720 if (*duetime < 0) {
1721 tv.tv_sec = - (*duetime) / 10000000;
1722 tv.tv_usec = (- (*duetime) / 10) -
1723 (tv.tv_sec * 1000000);
1724 } else {
1725 ntoskrnl_time(&curtime);
1726 if (*duetime < curtime)
1727 tv.tv_sec = tv.tv_usec = 0;
1728 else {
1729 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1730 tv.tv_usec = ((*duetime) - curtime) / 10 -
1731 (tv.tv_sec * 1000000);
1732 }
1733 }
1734 }
1735
1736 if (duetime == NULL)
1737 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1738 else
1739 error = cv_timedwait(&we.we_cv,
1740 &ntoskrnl_dispatchlock, tvtohz(&tv));
1741
1742 RemoveEntryList(&w.wb_waitlist);
1743
1744 cv_destroy(&we.we_cv);
1745
1746 /* We timed out. Leave the object alone and return status. */
1747
1748 if (error == EWOULDBLOCK) {
1749 mtx_unlock(&ntoskrnl_dispatchlock);
1750 return(STATUS_TIMEOUT);
1751 }
1752
1753 mtx_unlock(&ntoskrnl_dispatchlock);
1754
1755 return(STATUS_SUCCESS);
1756 /*
1757 return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1758 mode, alertable, duetime, &w));
1759 */
1760 }
1761
1762 static uint32_t
1763 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1764 uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1765 wait_block *wb_array)
1766 {
1767 struct thread *td = curthread;
1768 wait_block *whead, *w;
1769 wait_block _wb_array[MAX_WAIT_OBJECTS];
1770 nt_dispatch_header *cur;
1771 struct timeval tv;
1772 int i, wcnt = 0, error = 0;
1773 uint64_t curtime;
1774 struct timespec t1, t2;
1775 uint32_t status = STATUS_SUCCESS;
1776 wb_ext we;
1777
1778 if (cnt > MAX_WAIT_OBJECTS)
1779 return(STATUS_INVALID_PARAMETER);
1780 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1781 return(STATUS_INVALID_PARAMETER);
1782
1783 mtx_lock(&ntoskrnl_dispatchlock);
1784
1785 cv_init(&we.we_cv, "KeWFM");
1786 we.we_td = td;
1787
1788 if (wb_array == NULL)
1789 whead = _wb_array;
1790 else
1791 whead = wb_array;
1792
1793 bzero((char *)whead, sizeof(wait_block) * cnt);
1794
1795 /* First pass: see if we can satisfy any waits immediately. */
1796
1797 wcnt = 0;
1798 w = whead;
1799
1800 for (i = 0; i < cnt; i++) {
1801 InsertTailList((&obj[i]->dh_waitlisthead),
1802 (&w->wb_waitlist));
1803 w->wb_ext = &we;
1804 w->wb_object = obj[i];
1805 w->wb_waittype = wtype;
1806 w->wb_waitkey = i;
1807 w->wb_awakened = FALSE;
1808 w->wb_oldpri = td->td_priority;
1809 w->wb_next = w + 1;
1810 w++;
1811 wcnt++;
1812 if (ntoskrnl_is_signalled(obj[i], td)) {
1813 /*
1814 * There's a limit to how many times
1815 * we can recursively acquire a mutant.
1816 * If we hit the limit, something
1817 * is very wrong.
1818 */
1819 if (obj[i]->dh_sigstate == INT32_MIN &&
1820 obj[i]->dh_type == DISP_TYPE_MUTANT) {
1821 mtx_unlock(&ntoskrnl_dispatchlock);
1822 panic("mutant limit exceeded");
1823 }
1824
1825 /*
1826 * If this is a WAITTYPE_ANY wait, then
1827 * satisfy the waited object and exit
1828 * right now.
1829 */
1830
1831 if (wtype == WAITTYPE_ANY) {
1832 ntoskrnl_satisfy_wait(obj[i], td);
1833 status = STATUS_WAIT_0 + i;
1834 goto wait_done;
1835 } else {
1836 w--;
1837 wcnt--;
1838 w->wb_object = NULL;
1839 RemoveEntryList(&w->wb_waitlist);
1840 }
1841 }
1842 }
1843
1844 /*
1845 * If this is a WAITTYPE_ALL wait and all objects are
1846 * already signalled, satisfy the waits and exit now.
1847 */
1848
1849 if (wtype == WAITTYPE_ALL && wcnt == 0) {
1850 for (i = 0; i < cnt; i++)
1851 ntoskrnl_satisfy_wait(obj[i], td);
1852 status = STATUS_SUCCESS;
1853 goto wait_done;
1854 }
1855
1856 /*
1857 * Create a circular waitblock list. The waitcount
1858 * must always be non-zero when we get here.
1859 */
1860
1861 (w - 1)->wb_next = whead;
1862
1863 /* Wait on any objects that aren't yet signalled. */
1864
1865 /* Calculate timeout, if any. */
1866
1867 if (duetime != NULL) {
1868 if (*duetime < 0) {
1869 tv.tv_sec = - (*duetime) / 10000000;
1870 tv.tv_usec = (- (*duetime) / 10) -
1871 (tv.tv_sec * 1000000);
1872 } else {
1873 ntoskrnl_time(&curtime);
1874 if (*duetime < curtime)
1875 tv.tv_sec = tv.tv_usec = 0;
1876 else {
1877 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1878 tv.tv_usec = ((*duetime) - curtime) / 10 -
1879 (tv.tv_sec * 1000000);
1880 }
1881 }
1882 }
1883
1884 while (wcnt) {
1885 nanotime(&t1);
1886
1887 if (duetime == NULL)
1888 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1889 else
1890 error = cv_timedwait(&we.we_cv,
1891 &ntoskrnl_dispatchlock, tvtohz(&tv));
1892
1893 /* Wait with timeout expired. */
1894
1895 if (error) {
1896 status = STATUS_TIMEOUT;
1897 goto wait_done;
1898 }
1899
1900 nanotime(&t2);
1901
1902 /* See what's been signalled. */
1903
1904 w = whead;
1905 do {
1906 cur = w->wb_object;
1907 if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1908 w->wb_awakened == TRUE) {
1909 /* Sanity check the signal state value. */
1910 if (cur->dh_sigstate == INT32_MIN &&
1911 cur->dh_type == DISP_TYPE_MUTANT) {
1912 mtx_unlock(&ntoskrnl_dispatchlock);
1913 panic("mutant limit exceeded");
1914 }
1915 wcnt--;
1916 if (wtype == WAITTYPE_ANY) {
1917 status = w->wb_waitkey &
1918 STATUS_WAIT_0;
1919 goto wait_done;
1920 }
1921 }
1922 w = w->wb_next;
1923 } while (w != whead);
1924
1925 /*
1926 * If all objects have been signalled, or if this
1927 * is a WAITTYPE_ANY wait and we were woke up by
1928 * someone, we can bail.
1929 */
1930
1931 if (wcnt == 0) {
1932 status = STATUS_SUCCESS;
1933 goto wait_done;
1934 }
1935
1936 /*
1937 * If this is WAITTYPE_ALL wait, and there's still
1938 * objects that haven't been signalled, deduct the
1939 * time that's elapsed so far from the timeout and
1940 * wait again (or continue waiting indefinitely if
1941 * there's no timeout).
1942 */
1943
1944 if (duetime != NULL) {
1945 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1946 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1947 }
1948 }
1949
1950
1951 wait_done:
1952
1953 cv_destroy(&we.we_cv);
1954
1955 for (i = 0; i < cnt; i++) {
1956 if (whead[i].wb_object != NULL)
1957 RemoveEntryList(&whead[i].wb_waitlist);
1958
1959 }
1960 mtx_unlock(&ntoskrnl_dispatchlock);
1961
1962 return(status);
1963 }
1964
1965 static void
1966 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1967 {
1968 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1969 return;
1970 }
1971
1972 static uint16_t
1973 READ_REGISTER_USHORT(reg)
1974 uint16_t *reg;
1975 {
1976 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1977 }
1978
1979 static void
1980 WRITE_REGISTER_ULONG(reg, val)
1981 uint32_t *reg;
1982 uint32_t val;
1983 {
1984 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1985 return;
1986 }
1987
1988 static uint32_t
1989 READ_REGISTER_ULONG(reg)
1990 uint32_t *reg;
1991 {
1992 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1993 }
1994
1995 static uint8_t
1996 READ_REGISTER_UCHAR(uint8_t *reg)
1997 {
1998 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1999 }
2000
2001 static void
2002 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
2003 {
2004 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2005 return;
2006 }
2007
2008 static int64_t
2009 _allmul(a, b)
2010 int64_t a;
2011 int64_t b;
2012 {
2013 return (a * b);
2014 }
2015
2016 static int64_t
2017 _alldiv(a, b)
2018 int64_t a;
2019 int64_t b;
2020 {
2021 return (a / b);
2022 }
2023
2024 static int64_t
2025 _allrem(a, b)
2026 int64_t a;
2027 int64_t b;
2028 {
2029 return (a % b);
2030 }
2031
2032 static uint64_t
2033 _aullmul(a, b)
2034 uint64_t a;
2035 uint64_t b;
2036 {
2037 return (a * b);
2038 }
2039
2040 static uint64_t
2041 _aulldiv(a, b)
2042 uint64_t a;
2043 uint64_t b;
2044 {
2045 return (a / b);
2046 }
2047
2048 static uint64_t
2049 _aullrem(a, b)
2050 uint64_t a;
2051 uint64_t b;
2052 {
2053 return (a % b);
2054 }
2055
2056 static int64_t
2057 _allshl(int64_t a, uint8_t b)
2058 {
2059 return (a << b);
2060 }
2061
2062 static uint64_t
2063 _aullshl(uint64_t a, uint8_t b)
2064 {
2065 return (a << b);
2066 }
2067
2068 static int64_t
2069 _allshr(int64_t a, uint8_t b)
2070 {
2071 return (a >> b);
2072 }
2073
2074 static uint64_t
2075 _aullshr(uint64_t a, uint8_t b)
2076 {
2077 return (a >> b);
2078 }
2079
2080 static slist_entry *
2081 ntoskrnl_pushsl(head, entry)
2082 slist_header *head;
2083 slist_entry *entry;
2084 {
2085 slist_entry *oldhead;
2086
2087 oldhead = head->slh_list.slh_next;
2088 entry->sl_next = head->slh_list.slh_next;
2089 head->slh_list.slh_next = entry;
2090 head->slh_list.slh_depth++;
2091 head->slh_list.slh_seq++;
2092
2093 return(oldhead);
2094 }
2095
2096 static slist_entry *
2097 ntoskrnl_popsl(head)
2098 slist_header *head;
2099 {
2100 slist_entry *first;
2101
2102 first = head->slh_list.slh_next;
2103 if (first != NULL) {
2104 head->slh_list.slh_next = first->sl_next;
2105 head->slh_list.slh_depth--;
2106 head->slh_list.slh_seq++;
2107 }
2108
2109 return(first);
2110 }
2111
2112 /*
2113 * We need this to make lookaside lists work for amd64.
2114 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2115 * list structure. For amd64 to work right, this has to be a
2116 * pointer to the wrapped version of the routine, not the
2117 * original. Letting the Windows driver invoke the original
2118 * function directly will result in a convention calling
2119 * mismatch and a pretty crash. On x86, this effectively
2120 * becomes a no-op since ipt_func and ipt_wrap are the same.
2121 */
2122
2123 static funcptr
2124 ntoskrnl_findwrap(func)
2125 funcptr func;
2126 {
2127 image_patch_table *patch;
2128
2129 patch = ntoskrnl_functbl;
2130 while (patch->ipt_func != NULL) {
2131 if ((funcptr)patch->ipt_func == func)
2132 return((funcptr)patch->ipt_wrap);
2133 patch++;
2134 }
2135
2136 return(NULL);
2137 }
2138
2139 static void
2140 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2141 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2142 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2143 {
2144 bzero((char *)lookaside, sizeof(paged_lookaside_list));
2145
2146 if (size < sizeof(slist_entry))
2147 lookaside->nll_l.gl_size = sizeof(slist_entry);
2148 else
2149 lookaside->nll_l.gl_size = size;
2150 lookaside->nll_l.gl_tag = tag;
2151 if (allocfunc == NULL)
2152 lookaside->nll_l.gl_allocfunc =
2153 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2154 else
2155 lookaside->nll_l.gl_allocfunc = allocfunc;
2156
2157 if (freefunc == NULL)
2158 lookaside->nll_l.gl_freefunc =
2159 ntoskrnl_findwrap((funcptr)ExFreePool);
2160 else
2161 lookaside->nll_l.gl_freefunc = freefunc;
2162
2163 #ifdef __i386__
2164 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2165 #endif
2166
2167 lookaside->nll_l.gl_type = NonPagedPool;
2168 lookaside->nll_l.gl_depth = depth;
2169 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2170
2171 return;
2172 }
2173
2174 static void
2175 ExDeletePagedLookasideList(lookaside)
2176 paged_lookaside_list *lookaside;
2177 {
2178 void *buf;
2179 void (*freefunc)(void *);
2180
2181 freefunc = lookaside->nll_l.gl_freefunc;
2182 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2183 MSCALL1(freefunc, buf);
2184
2185 return;
2186 }
2187
2188 static void
2189 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2190 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2191 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2192 {
2193 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2194
2195 if (size < sizeof(slist_entry))
2196 lookaside->nll_l.gl_size = sizeof(slist_entry);
2197 else
2198 lookaside->nll_l.gl_size = size;
2199 lookaside->nll_l.gl_tag = tag;
2200 if (allocfunc == NULL)
2201 lookaside->nll_l.gl_allocfunc =
2202 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2203 else
2204 lookaside->nll_l.gl_allocfunc = allocfunc;
2205
2206 if (freefunc == NULL)
2207 lookaside->nll_l.gl_freefunc =
2208 ntoskrnl_findwrap((funcptr)ExFreePool);
2209 else
2210 lookaside->nll_l.gl_freefunc = freefunc;
2211
2212 #ifdef __i386__
2213 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2214 #endif
2215
2216 lookaside->nll_l.gl_type = NonPagedPool;
2217 lookaside->nll_l.gl_depth = depth;
2218 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2219
2220 return;
2221 }
2222
2223 static void
2224 ExDeleteNPagedLookasideList(lookaside)
2225 npaged_lookaside_list *lookaside;
2226 {
2227 void *buf;
2228 void (*freefunc)(void *);
2229
2230 freefunc = lookaside->nll_l.gl_freefunc;
2231 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2232 MSCALL1(freefunc, buf);
2233
2234 return;
2235 }
2236
2237 slist_entry *
2238 InterlockedPushEntrySList(head, entry)
2239 slist_header *head;
2240 slist_entry *entry;
2241 {
2242 slist_entry *oldhead;
2243
2244 mtx_lock_spin(&ntoskrnl_interlock);
2245 oldhead = ntoskrnl_pushsl(head, entry);
2246 mtx_unlock_spin(&ntoskrnl_interlock);
2247
2248 return(oldhead);
2249 }
2250
2251 slist_entry *
2252 InterlockedPopEntrySList(head)
2253 slist_header *head;
2254 {
2255 slist_entry *first;
2256
2257 mtx_lock_spin(&ntoskrnl_interlock);
2258 first = ntoskrnl_popsl(head);
2259 mtx_unlock_spin(&ntoskrnl_interlock);
2260
2261 return(first);
2262 }
2263
2264 static slist_entry *
2265 ExInterlockedPushEntrySList(head, entry, lock)
2266 slist_header *head;
2267 slist_entry *entry;
2268 kspin_lock *lock;
2269 {
2270 return(InterlockedPushEntrySList(head, entry));
2271 }
2272
2273 static slist_entry *
2274 ExInterlockedPopEntrySList(head, lock)
2275 slist_header *head;
2276 kspin_lock *lock;
2277 {
2278 return(InterlockedPopEntrySList(head));
2279 }
2280
2281 uint16_t
2282 ExQueryDepthSList(head)
2283 slist_header *head;
2284 {
2285 uint16_t depth;
2286
2287 mtx_lock_spin(&ntoskrnl_interlock);
2288 depth = head->slh_list.slh_depth;
2289 mtx_unlock_spin(&ntoskrnl_interlock);
2290
2291 return(depth);
2292 }
2293
2294 void
2295 KeInitializeSpinLock(lock)
2296 kspin_lock *lock;
2297 {
2298 *lock = 0;
2299
2300 return;
2301 }
2302
2303 #ifdef __i386__
2304 void
2305 KefAcquireSpinLockAtDpcLevel(lock)
2306 kspin_lock *lock;
2307 {
2308 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2309 int i = 0;
2310 #endif
2311
2312 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2313 /* sit and spin */;
2314 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2315 i++;
2316 if (i > 200000000)
2317 panic("DEADLOCK!");
2318 #endif
2319 }
2320
2321 return;
2322 }
2323
2324 void
2325 KefReleaseSpinLockFromDpcLevel(lock)
2326 kspin_lock *lock;
2327 {
2328 atomic_store_rel_int((volatile u_int *)lock, 0);
2329
2330 return;
2331 }
2332
2333 uint8_t
2334 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2335 {
2336 uint8_t oldirql;
2337
2338 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2339 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2340
2341 KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2342 KeAcquireSpinLockAtDpcLevel(lock);
2343
2344 return(oldirql);
2345 }
2346 #else
2347 void
2348 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2349 {
2350 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2351 /* sit and spin */;
2352
2353 return;
2354 }
2355
2356 void
2357 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2358 {
2359 atomic_store_rel_int((volatile u_int *)lock, 0);
2360
2361 return;
2362 }
2363 #endif /* __i386__ */
2364
2365 uintptr_t
2366 InterlockedExchange(dst, val)
2367 volatile uint32_t *dst;
2368 uintptr_t val;
2369 {
2370 uintptr_t r;
2371
2372 mtx_lock_spin(&ntoskrnl_interlock);
2373 r = *dst;
2374 *dst = val;
2375 mtx_unlock_spin(&ntoskrnl_interlock);
2376
2377 return(r);
2378 }
2379
2380 static uint32_t
2381 InterlockedIncrement(addend)
2382 volatile uint32_t *addend;
2383 {
2384 atomic_add_long((volatile u_long *)addend, 1);
2385 return(*addend);
2386 }
2387
2388 static uint32_t
2389 InterlockedDecrement(addend)
2390 volatile uint32_t *addend;
2391 {
2392 atomic_subtract_long((volatile u_long *)addend, 1);
2393 return(*addend);
2394 }
2395
2396 static void
2397 ExInterlockedAddLargeStatistic(addend, inc)
2398 uint64_t *addend;
2399 uint32_t inc;
2400 {
2401 mtx_lock_spin(&ntoskrnl_interlock);
2402 *addend += inc;
2403 mtx_unlock_spin(&ntoskrnl_interlock);
2404
2405 return;
2406 };
2407
2408 mdl *
2409 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2410 uint8_t chargequota, irp *iopkt)
2411 {
2412 mdl *m;
2413 int zone = 0;
2414
2415 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2416 m = ExAllocatePoolWithTag(NonPagedPool,
2417 MmSizeOfMdl(vaddr, len), 0);
2418 else {
2419 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2420 zone++;
2421 }
2422
2423 if (m == NULL)
2424 return (NULL);
2425
2426 MmInitializeMdl(m, vaddr, len);
2427
2428 /*
2429 * MmInitializMdl() clears the flags field, so we
2430 * have to set this here. If the MDL came from the
2431 * MDL UMA zone, tag it so we can release it to
2432 * the right place later.
2433 */
2434 if (zone)
2435 m->mdl_flags = MDL_ZONE_ALLOCED;
2436
2437 if (iopkt != NULL) {
2438 if (secondarybuf == TRUE) {
2439 mdl *last;
2440 last = iopkt->irp_mdl;
2441 while (last->mdl_next != NULL)
2442 last = last->mdl_next;
2443 last->mdl_next = m;
2444 } else {
2445 if (iopkt->irp_mdl != NULL)
2446 panic("leaking an MDL in IoAllocateMdl()");
2447 iopkt->irp_mdl = m;
2448 }
2449 }
2450
2451 return (m);
2452 }
2453
2454 void
2455 IoFreeMdl(m)
2456 mdl *m;
2457 {
2458 if (m == NULL)
2459 return;
2460
2461 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2462 uma_zfree(mdl_zone, m);
2463 else
2464 ExFreePool(m);
2465
2466 return;
2467 }
2468
2469 static void *
2470 MmAllocateContiguousMemory(size, highest)
2471 uint32_t size;
2472 uint64_t highest;
2473 {
2474 void *addr;
2475 size_t pagelength = roundup(size, PAGE_SIZE);
2476
2477 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2478
2479 return(addr);
2480 }
2481
2482 static void *
2483 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2484 boundary, cachetype)
2485 uint32_t size;
2486 uint64_t lowest;
2487 uint64_t highest;
2488 uint64_t boundary;
2489 uint32_t cachetype;
2490 {
2491 void *addr;
2492 size_t pagelength = roundup(size, PAGE_SIZE);
2493
2494 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2495
2496 return(addr);
2497 }
2498
2499 static void
2500 MmFreeContiguousMemory(base)
2501 void *base;
2502 {
2503 ExFreePool(base);
2504 }
2505
2506 static void
2507 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2508 void *base;
2509 uint32_t size;
2510 uint32_t cachetype;
2511 {
2512 ExFreePool(base);
2513 }
2514
2515 static uint32_t
2516 MmSizeOfMdl(vaddr, len)
2517 void *vaddr;
2518 size_t len;
2519 {
2520 uint32_t l;
2521
2522 l = sizeof(struct mdl) +
2523 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2524
2525 return(l);
2526 }
2527
2528 /*
2529 * The Microsoft documentation says this routine fills in the
2530 * page array of an MDL with the _physical_ page addresses that
2531 * comprise the buffer, but we don't really want to do that here.
2532 * Instead, we just fill in the page array with the kernel virtual
2533 * addresses of the buffers.
2534 */
2535 void
2536 MmBuildMdlForNonPagedPool(m)
2537 mdl *m;
2538 {
2539 vm_offset_t *mdl_pages;
2540 int pagecnt, i;
2541
2542 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2543
2544 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2545 panic("not enough pages in MDL to describe buffer");
2546
2547 mdl_pages = MmGetMdlPfnArray(m);
2548
2549 for (i = 0; i < pagecnt; i++)
2550 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2551
2552 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2553 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2554
2555 return;
2556 }
2557
2558 static void *
2559 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2560 {
2561 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2562 return(MmGetMdlVirtualAddress(buf));
2563 }
2564
2565 static void *
2566 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2567 void *vaddr, uint32_t bugcheck, uint32_t prio)
2568 {
2569 return(MmMapLockedPages(buf, accessmode));
2570 }
2571
2572 static void
2573 MmUnmapLockedPages(vaddr, buf)
2574 void *vaddr;
2575 mdl *buf;
2576 {
2577 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2578 return;
2579 }
2580
2581 /*
2582 * This function has a problem in that it will break if you
2583 * compile this module without PAE and try to use it on a PAE
2584 * kernel. Unfortunately, there's no way around this at the
2585 * moment. It's slightly less broken that using pmap_kextract().
2586 * You'd think the virtual memory subsystem would help us out
2587 * here, but it doesn't.
2588 */
2589
2590 uint8_t
2591 MmIsAddressValid(vaddr)
2592 void *vaddr;
2593 {
2594 if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2595 return(TRUE);
2596
2597 return(FALSE);
2598 }
2599
2600 void *
2601 MmMapIoSpace(paddr, len, cachetype)
2602 uint64_t paddr;
2603 uint32_t len;
2604 uint32_t cachetype;
2605 {
2606 devclass_t nexus_class;
2607 device_t *nexus_devs, devp;
2608 int nexus_count = 0;
2609 device_t matching_dev = NULL;
2610 struct resource *res;
2611 int i;
2612 vm_offset_t v;
2613
2614 /* There will always be at least one nexus. */
2615
2616 nexus_class = devclass_find("nexus");
2617 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2618
2619 for (i = 0; i < nexus_count; i++) {
2620 devp = nexus_devs[i];
2621 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2622 if (matching_dev)
2623 break;
2624 }
2625
2626 free(nexus_devs, M_TEMP);
2627
2628 if (matching_dev == NULL)
2629 return(NULL);
2630
2631 v = (vm_offset_t)rman_get_virtual(res);
2632 if (paddr > rman_get_start(res))
2633 v += paddr - rman_get_start(res);
2634
2635 return((void *)v);
2636 }
2637
2638 void
2639 MmUnmapIoSpace(vaddr, len)
2640 void *vaddr;
2641 size_t len;
2642 {
2643 return;
2644 }
2645
2646
2647 static device_t
2648 ntoskrnl_finddev(dev, paddr, res)
2649 device_t dev;
2650 uint64_t paddr;
2651 struct resource **res;
2652 {
2653 device_t *children = NULL;
2654 device_t matching_dev;
2655 int childcnt;
2656 struct resource *r;
2657 struct resource_list *rl;
2658 struct resource_list_entry *rle;
2659 uint32_t flags;
2660 int i;
2661
2662 /* We only want devices that have been successfully probed. */
2663
2664 if (device_is_alive(dev) == FALSE)
2665 return(NULL);
2666
2667 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2668 if (rl != NULL) {
2669 #if __FreeBSD_version < 600022
2670 SLIST_FOREACH(rle, rl, link) {
2671 #else
2672 STAILQ_FOREACH(rle, rl, link) {
2673 #endif
2674 r = rle->res;
2675
2676 if (r == NULL)
2677 continue;
2678
2679 flags = rman_get_flags(r);
2680
2681 if (rle->type == SYS_RES_MEMORY &&
2682 paddr >= rman_get_start(r) &&
2683 paddr <= rman_get_end(r)) {
2684 if (!(flags & RF_ACTIVE))
2685 bus_activate_resource(dev,
2686 SYS_RES_MEMORY, 0, r);
2687 *res = r;
2688 return(dev);
2689 }
2690 }
2691 }
2692
2693 /*
2694 * If this device has children, do another
2695 * level of recursion to inspect them.
2696 */
2697
2698 device_get_children(dev, &children, &childcnt);
2699
2700 for (i = 0; i < childcnt; i++) {
2701 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2702 if (matching_dev != NULL) {
2703 free(children, M_TEMP);
2704 return(matching_dev);
2705 }
2706 }
2707
2708
2709 /* Won't somebody please think of the children! */
2710
2711 if (children != NULL)
2712 free(children, M_TEMP);
2713
2714 return(NULL);
2715 }
2716
2717 /*
2718 * Workitems are unlike DPCs, in that they run in a user-mode thread
2719 * context rather than at DISPATCH_LEVEL in kernel context. In our
2720 * case we run them in kernel context anyway.
2721 */
2722 static void
2723 ntoskrnl_workitem_thread(arg)
2724 void *arg;
2725 {
2726 kdpc_queue *kq;
2727 list_entry *l;
2728 io_workitem *iw;
2729 uint8_t irql;
2730
2731 kq = arg;
2732
2733 InitializeListHead(&kq->kq_disp);
2734 kq->kq_td = curthread;
2735 kq->kq_exit = 0;
2736 KeInitializeSpinLock(&kq->kq_lock);
2737 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2738
2739 while (1) {
2740 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2741
2742 KeAcquireSpinLock(&kq->kq_lock, &irql);
2743
2744 if (kq->kq_exit) {
2745 kq->kq_exit = 0;
2746 KeReleaseSpinLock(&kq->kq_lock, irql);
2747 break;
2748 }
2749
2750 while (!IsListEmpty(&kq->kq_disp)) {
2751 l = RemoveHeadList(&kq->kq_disp);
2752 iw = CONTAINING_RECORD(l,
2753 io_workitem, iw_listentry);
2754 InitializeListHead((&iw->iw_listentry));
2755 if (iw->iw_func == NULL)
2756 continue;
2757 KeReleaseSpinLock(&kq->kq_lock, irql);
2758 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2759 KeAcquireSpinLock(&kq->kq_lock, &irql);
2760 }
2761
2762 KeReleaseSpinLock(&kq->kq_lock, irql);
2763 }
2764
2765 #if __FreeBSD_version < 502113
2766 mtx_lock(&Giant);
2767 #endif
2768 kproc_exit(0);
2769 return; /* notreached */
2770 }
2771
2772 static void
2773 ntoskrnl_destroy_workitem_threads(void)
2774 {
2775 kdpc_queue *kq;
2776 int i;
2777
2778 for (i = 0; i < WORKITEM_THREADS; i++) {
2779 kq = wq_queues + i;
2780 kq->kq_exit = 1;
2781 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2782 while (kq->kq_exit)
2783 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
2784 }
2785
2786 return;
2787 }
2788
2789 io_workitem *
2790 IoAllocateWorkItem(dobj)
2791 device_object *dobj;
2792 {
2793 io_workitem *iw;
2794
2795 iw = uma_zalloc(iw_zone, M_NOWAIT);
2796 if (iw == NULL)
2797 return(NULL);
2798
2799 InitializeListHead(&iw->iw_listentry);
2800 iw->iw_dobj = dobj;
2801
2802 mtx_lock(&ntoskrnl_dispatchlock);
2803 iw->iw_idx = wq_idx;
2804 WORKIDX_INC(wq_idx);
2805 mtx_unlock(&ntoskrnl_dispatchlock);
2806
2807 return(iw);
2808 }
2809
2810 void
2811 IoFreeWorkItem(iw)
2812 io_workitem *iw;
2813 {
2814 uma_zfree(iw_zone, iw);
2815 return;
2816 }
2817
2818 void
2819 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2820 io_workitem *iw;
2821 io_workitem_func iw_func;
2822 uint32_t qtype;
2823 void *ctx;
2824 {
2825 kdpc_queue *kq;
2826 list_entry *l;
2827 io_workitem *cur;
2828 uint8_t irql;
2829
2830 kq = wq_queues + iw->iw_idx;
2831
2832 KeAcquireSpinLock(&kq->kq_lock, &irql);
2833
2834 /*
2835 * Traverse the list and make sure this workitem hasn't
2836 * already been inserted. Queuing the same workitem
2837 * twice will hose the list but good.
2838 */
2839
2840 l = kq->kq_disp.nle_flink;
2841 while (l != &kq->kq_disp) {
2842 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2843 if (cur == iw) {
2844 /* Already queued -- do nothing. */
2845 KeReleaseSpinLock(&kq->kq_lock, irql);
2846 return;
2847 }
2848 l = l->nle_flink;
2849 }
2850
2851 iw->iw_func = iw_func;
2852 iw->iw_ctx = ctx;
2853
2854 InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2855 KeReleaseSpinLock(&kq->kq_lock, irql);
2856
2857 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2858
2859 return;
2860 }
2861
2862 static void
2863 ntoskrnl_workitem(dobj, arg)
2864 device_object *dobj;
2865 void *arg;
2866 {
2867 io_workitem *iw;
2868 work_queue_item *w;
2869 work_item_func f;
2870
2871 iw = arg;
2872 w = (work_queue_item *)dobj;
2873 f = (work_item_func)w->wqi_func;
2874 uma_zfree(iw_zone, iw);
2875 MSCALL2(f, w, w->wqi_ctx);
2876
2877 return;
2878 }
2879
2880 /*
2881 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2882 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2883 * problem with ExQueueWorkItem() is that it can't guard against
2884 * the condition where a driver submits a job to the work queue and
2885 * is then unloaded before the job is able to run. IoQueueWorkItem()
2886 * acquires a reference to the device's device_object via the
2887 * object manager and retains it until after the job has completed,
2888 * which prevents the driver from being unloaded before the job
2889 * runs. (We don't currently support this behavior, though hopefully
2890 * that will change once the object manager API is fleshed out a bit.)
2891 *
2892 * Having said all that, the ExQueueWorkItem() API remains, because
2893 * there are still other parts of Windows that use it, including
2894 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2895 * We fake up the ExQueueWorkItem() API on top of our implementation
2896 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2897 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2898 * queue item (provided by the caller) in to IoAllocateWorkItem()
2899 * instead of the device_object. We need to save this pointer so
2900 * we can apply a sanity check: as with the DPC queue and other
2901 * workitem queues, we can't allow the same work queue item to
2902 * be queued twice. If it's already pending, we silently return
2903 */
2904
2905 void
2906 ExQueueWorkItem(w, qtype)
2907 work_queue_item *w;
2908 uint32_t qtype;
2909 {
2910 io_workitem *iw;
2911 io_workitem_func iwf;
2912 kdpc_queue *kq;
2913 list_entry *l;
2914 io_workitem *cur;
2915 uint8_t irql;
2916
2917
2918 /*
2919 * We need to do a special sanity test to make sure
2920 * the ExQueueWorkItem() API isn't used to queue
2921 * the same workitem twice. Rather than checking the
2922 * io_workitem pointer itself, we test the attached
2923 * device object, which is really a pointer to the
2924 * legacy work queue item structure.
2925 */
2926
2927 kq = wq_queues + WORKITEM_LEGACY_THREAD;
2928 KeAcquireSpinLock(&kq->kq_lock, &irql);
2929 l = kq->kq_disp.nle_flink;
2930 while (l != &kq->kq_disp) {
2931 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2932 if (cur->iw_dobj == (device_object *)w) {
2933 /* Already queued -- do nothing. */
2934 KeReleaseSpinLock(&kq->kq_lock, irql);
2935 return;
2936 }
2937 l = l->nle_flink;
2938 }
2939 KeReleaseSpinLock(&kq->kq_lock, irql);
2940
2941 iw = IoAllocateWorkItem((device_object *)w);
2942 if (iw == NULL)
2943 return;
2944
2945 iw->iw_idx = WORKITEM_LEGACY_THREAD;
2946 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2947 IoQueueWorkItem(iw, iwf, qtype, iw);
2948
2949 return;
2950 }
2951
2952 static void
2953 RtlZeroMemory(dst, len)
2954 void *dst;
2955 size_t len;
2956 {
2957 bzero(dst, len);
2958 return;
2959 }
2960
2961 static void
2962 RtlCopyMemory(dst, src, len)
2963 void *dst;
2964 const void *src;
2965 size_t len;
2966 {
2967 bcopy(src, dst, len);
2968 return;
2969 }
2970
2971 static size_t
2972 RtlCompareMemory(s1, s2, len)
2973 const void *s1;
2974 const void *s2;
2975 size_t len;
2976 {
2977 size_t i, total = 0;
2978 uint8_t *m1, *m2;
2979
2980 m1 = __DECONST(char *, s1);
2981 m2 = __DECONST(char *, s2);
2982
2983 for (i = 0; i < len; i++) {
2984 if (m1[i] == m2[i])
2985 total++;
2986 }
2987 return(total);
2988 }
2989
2990 void
2991 RtlInitAnsiString(dst, src)
2992 ansi_string *dst;
2993 char *src;
2994 {
2995 ansi_string *a;
2996
2997 a = dst;
2998 if (a == NULL)
2999 return;
3000 if (src == NULL) {
3001 a->as_len = a->as_maxlen = 0;
3002 a->as_buf = NULL;
3003 } else {
3004 a->as_buf = src;
3005 a->as_len = a->as_maxlen = strlen(src);
3006 }
3007
3008 return;
3009 }
3010
3011 void
3012 RtlInitUnicodeString(dst, src)
3013 unicode_string *dst;
3014 uint16_t *src;
3015 {
3016 unicode_string *u;
3017 int i;
3018
3019 u = dst;
3020 if (u == NULL)
3021 return;
3022 if (src == NULL) {
3023 u->us_len = u->us_maxlen = 0;
3024 u->us_buf = NULL;
3025 } else {
3026 i = 0;
3027 while(src[i] != 0)
3028 i++;
3029 u->us_buf = src;
3030 u->us_len = u->us_maxlen = i * 2;
3031 }
3032
3033 return;
3034 }
3035
3036 ndis_status
3037 RtlUnicodeStringToInteger(ustr, base, val)
3038 unicode_string *ustr;
3039 uint32_t base;
3040 uint32_t *val;
3041 {
3042 uint16_t *uchr;
3043 int len, neg = 0;
3044 char abuf[64];
3045 char *astr;
3046
3047 uchr = ustr->us_buf;
3048 len = ustr->us_len;
3049 bzero(abuf, sizeof(abuf));
3050
3051 if ((char)((*uchr) & 0xFF) == '-') {
3052 neg = 1;
3053 uchr++;
3054 len -= 2;
3055 } else if ((char)((*uchr) & 0xFF) == '+') {
3056 neg = 0;
3057 uchr++;
3058 len -= 2;
3059 }
3060
3061 if (base == 0) {
3062 if ((char)((*uchr) & 0xFF) == 'b') {
3063 base = 2;
3064 uchr++;
3065 len -= 2;
3066 } else if ((char)((*uchr) & 0xFF) == 'o') {
3067 base = 8;
3068 uchr++;
3069 len -= 2;
3070 } else if ((char)((*uchr) & 0xFF) == 'x') {
3071 base = 16;
3072 uchr++;
3073 len -= 2;
3074 } else
3075 base = 10;
3076 }
3077
3078 astr = abuf;
3079 if (neg) {
3080 strcpy(astr, "-");
3081 astr++;
3082 }
3083
3084 ntoskrnl_unicode_to_ascii(uchr, astr, len);
3085 *val = strtoul(abuf, NULL, base);
3086
3087 return(STATUS_SUCCESS);
3088 }
3089
3090 void
3091 RtlFreeUnicodeString(ustr)
3092 unicode_string *ustr;
3093 {
3094 if (ustr->us_buf == NULL)
3095 return;
3096 ExFreePool(ustr->us_buf);
3097 ustr->us_buf = NULL;
3098 return;
3099 }
3100
3101 void
3102 RtlFreeAnsiString(astr)
3103 ansi_string *astr;
3104 {
3105 if (astr->as_buf == NULL)
3106 return;
3107 ExFreePool(astr->as_buf);
3108 astr->as_buf = NULL;
3109 return;
3110 }
3111
3112 static int
3113 atoi(str)
3114 const char *str;
3115 {
3116 return (int)strtol(str, (char **)NULL, 10);
3117 }
3118
3119 static long
3120 atol(str)
3121 const char *str;
3122 {
3123 return strtol(str, (char **)NULL, 10);
3124 }
3125
3126 static int
3127 rand(void)
3128 {
3129 struct timeval tv;
3130
3131 microtime(&tv);
3132 srandom(tv.tv_usec);
3133 return((int)random());
3134 }
3135
3136 static void
3137 srand(seed)
3138 unsigned int seed;
3139 {
3140 srandom(seed);
3141 return;
3142 }
3143
3144 static uint8_t
3145 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3146 {
3147 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3148 return(TRUE);
3149 return(FALSE);
3150 }
3151
3152 static ndis_status
3153 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
3154 unicode_string *name;
3155 uint32_t reqaccess;
3156 void *fileobj;
3157 device_object *devobj;
3158 {
3159 return(STATUS_SUCCESS);
3160 }
3161
3162 static ndis_status
3163 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3164 device_object *devobj;
3165 uint32_t regprop;
3166 uint32_t buflen;
3167 void *prop;
3168 uint32_t *reslen;
3169 {
3170 driver_object *drv;
3171 uint16_t **name;
3172
3173 drv = devobj->do_drvobj;
3174
3175 switch (regprop) {
3176 case DEVPROP_DRIVER_KEYNAME:
3177 name = prop;
3178 *name = drv->dro_drivername.us_buf;
3179 *reslen = drv->dro_drivername.us_len;
3180 break;
3181 default:
3182 return(STATUS_INVALID_PARAMETER_2);
3183 break;
3184 }
3185
3186 return(STATUS_SUCCESS);
3187 }
3188
3189 static void
3190 KeInitializeMutex(kmutex, level)
3191 kmutant *kmutex;
3192 uint32_t level;
3193 {
3194 InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3195 kmutex->km_abandoned = FALSE;
3196 kmutex->km_apcdisable = 1;
3197 kmutex->km_header.dh_sigstate = 1;
3198 kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3199 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3200 kmutex->km_ownerthread = NULL;
3201 return;
3202 }
3203
3204 static uint32_t
3205 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3206 {
3207 uint32_t prevstate;
3208
3209 mtx_lock(&ntoskrnl_dispatchlock);
3210 prevstate = kmutex->km_header.dh_sigstate;
3211 if (kmutex->km_ownerthread != curthread) {
3212 mtx_unlock(&ntoskrnl_dispatchlock);
3213 return(STATUS_MUTANT_NOT_OWNED);
3214 }
3215
3216 kmutex->km_header.dh_sigstate++;
3217 kmutex->km_abandoned = FALSE;
3218
3219 if (kmutex->km_header.dh_sigstate == 1) {
3220 kmutex->km_ownerthread = NULL;
3221 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3222 }
3223
3224 mtx_unlock(&ntoskrnl_dispatchlock);
3225
3226 return(prevstate);
3227 }
3228
3229 static uint32_t
3230 KeReadStateMutex(kmutex)
3231 kmutant *kmutex;
3232 {
3233 return(kmutex->km_header.dh_sigstate);
3234 }
3235
3236 void
3237 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3238 {
3239 InitializeListHead((&kevent->k_header.dh_waitlisthead));
3240 kevent->k_header.dh_sigstate = state;
3241 if (type == EVENT_TYPE_NOTIFY)
3242 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3243 else
3244 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3245 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3246 return;
3247 }
3248
3249 uint32_t
3250 KeResetEvent(kevent)
3251 nt_kevent *kevent;
3252 {
3253 uint32_t prevstate;
3254
3255 mtx_lock(&ntoskrnl_dispatchlock);
3256 prevstate = kevent->k_header.dh_sigstate;
3257 kevent->k_header.dh_sigstate = FALSE;
3258 mtx_unlock(&ntoskrnl_dispatchlock);
3259
3260 return(prevstate);
3261 }
3262
3263 uint32_t
3264 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3265 {
3266 uint32_t prevstate;
3267 wait_block *w;
3268 nt_dispatch_header *dh;
3269 struct thread *td;
3270 wb_ext *we;
3271
3272 mtx_lock(&ntoskrnl_dispatchlock);
3273 prevstate = kevent->k_header.dh_sigstate;
3274 dh = &kevent->k_header;
3275
3276 if (IsListEmpty(&dh->dh_waitlisthead))
3277 /*
3278 * If there's nobody in the waitlist, just set
3279 * the state to signalled.
3280 */
3281 dh->dh_sigstate = 1;
3282 else {
3283 /*
3284 * Get the first waiter. If this is a synchronization
3285 * event, just wake up that one thread (don't bother
3286 * setting the state to signalled since we're supposed
3287 * to automatically clear synchronization events anyway).
3288 *
3289 * If it's a notification event, or the the first
3290 * waiter is doing a WAITTYPE_ALL wait, go through
3291 * the full wait satisfaction process.
3292 */
3293 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3294 wait_block, wb_waitlist);
3295 we = w->wb_ext;
3296 td = we->we_td;
3297 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3298 w->wb_waittype == WAITTYPE_ALL) {
3299 if (prevstate == 0) {
3300 dh->dh_sigstate = 1;
3301 ntoskrnl_waittest(dh, increment);
3302 }
3303 } else {
3304 w->wb_awakened |= TRUE;
3305 cv_broadcastpri(&we->we_cv,
3306 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3307 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3308 }
3309 }
3310
3311 mtx_unlock(&ntoskrnl_dispatchlock);
3312
3313 return(prevstate);
3314 }
3315
3316 void
3317 KeClearEvent(kevent)
3318 nt_kevent *kevent;
3319 {
3320 kevent->k_header.dh_sigstate = FALSE;
3321 return;
3322 }
3323
3324 uint32_t
3325 KeReadStateEvent(kevent)
3326 nt_kevent *kevent;
3327 {
3328 return(kevent->k_header.dh_sigstate);
3329 }
3330
3331 /*
3332 * The object manager in Windows is responsible for managing
3333 * references and access to various types of objects, including
3334 * device_objects, events, threads, timers and so on. However,
3335 * there's a difference in the way objects are handled in user
3336 * mode versus kernel mode.
3337 *
3338 * In user mode (i.e. Win32 applications), all objects are
3339 * managed by the object manager. For example, when you create
3340 * a timer or event object, you actually end up with an
3341 * object_header (for the object manager's bookkeeping
3342 * purposes) and an object body (which contains the actual object
3343 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3344 * to manage resource quotas and to enforce access restrictions
3345 * on basically every kind of system object handled by the kernel.
3346 *
3347 * However, in kernel mode, you only end up using the object
3348 * manager some of the time. For example, in a driver, you create
3349 * a timer object by simply allocating the memory for a ktimer
3350 * structure and initializing it with KeInitializeTimer(). Hence,
3351 * the timer has no object_header and no reference counting or
3352 * security/resource checks are done on it. The assumption in
3353 * this case is that if you're running in kernel mode, you know
3354 * what you're doing, and you're already at an elevated privilege
3355 * anyway.
3356 *
3357 * There are some exceptions to this. The two most important ones
3358 * for our purposes are device_objects and threads. We need to use
3359 * the object manager to do reference counting on device_objects,
3360 * and for threads, you can only get a pointer to a thread's
3361 * dispatch header by using ObReferenceObjectByHandle() on the
3362 * handle returned by PsCreateSystemThread().
3363 */
3364
3365 static ndis_status
3366 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3367 uint8_t accessmode, void **object, void **handleinfo)
3368 {
3369 nt_objref *nr;
3370
3371 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3372 if (nr == NULL)
3373 return(STATUS_INSUFFICIENT_RESOURCES);
3374
3375 InitializeListHead((&nr->no_dh.dh_waitlisthead));
3376 nr->no_obj = handle;
3377 nr->no_dh.dh_type = DISP_TYPE_THREAD;
3378 nr->no_dh.dh_sigstate = 0;
3379 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3380 sizeof(uint32_t));
3381 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3382 *object = nr;
3383
3384 return(STATUS_SUCCESS);
3385 }
3386
3387 static void
3388 ObfDereferenceObject(object)
3389 void *object;
3390 {
3391 nt_objref *nr;
3392
3393 nr = object;
3394 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3395 free(nr, M_DEVBUF);
3396
3397 return;
3398 }
3399
3400 static uint32_t
3401 ZwClose(handle)
3402 ndis_handle handle;
3403 {
3404 return(STATUS_SUCCESS);
3405 }
3406
3407 static uint32_t
3408 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3409 uint32_t traceclass;
3410 void *traceinfo;
3411 uint32_t infolen;
3412 uint32_t reqlen;
3413 void *buf;
3414 {
3415 return(STATUS_NOT_FOUND);
3416 }
3417
3418 static uint32_t
3419 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3420 void *guid, uint16_t messagenum, ...)
3421 {
3422 return(STATUS_SUCCESS);
3423 }
3424
3425 static uint32_t
3426 IoWMIRegistrationControl(dobj, action)
3427 device_object *dobj;
3428 uint32_t action;
3429 {
3430 return(STATUS_SUCCESS);
3431 }
3432
3433 /*
3434 * This is here just in case the thread returns without calling
3435 * PsTerminateSystemThread().
3436 */
3437 static void
3438 ntoskrnl_thrfunc(arg)
3439 void *arg;
3440 {
3441 thread_context *thrctx;
3442 uint32_t (*tfunc)(void *);
3443 void *tctx;
3444 uint32_t rval;
3445
3446 thrctx = arg;
3447 tfunc = thrctx->tc_thrfunc;
3448 tctx = thrctx->tc_thrctx;
3449 free(thrctx, M_TEMP);
3450
3451 rval = MSCALL1(tfunc, tctx);
3452
3453 PsTerminateSystemThread(rval);
3454 return; /* notreached */
3455 }
3456
3457 static ndis_status
3458 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3459 clientid, thrfunc, thrctx)
3460 ndis_handle *handle;
3461 uint32_t reqaccess;
3462 void *objattrs;
3463 ndis_handle phandle;
3464 void *clientid;
3465 void *thrfunc;
3466 void *thrctx;
3467 {
3468 int error;
3469 char tname[128];
3470 thread_context *tc;
3471 struct proc *p;
3472
3473 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3474 if (tc == NULL)
3475 return(STATUS_INSUFFICIENT_RESOURCES);
3476
3477 tc->tc_thrctx = thrctx;
3478 tc->tc_thrfunc = thrfunc;
3479
3480 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3481 error = kproc_create(ntoskrnl_thrfunc, tc, &p,
3482 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3483
3484 if (error) {
3485 free(tc, M_TEMP);
3486 return(STATUS_INSUFFICIENT_RESOURCES);
3487 }
3488
3489 *handle = p;
3490 ntoskrnl_kth++;
3491
3492 return(STATUS_SUCCESS);
3493 }
3494
3495 /*
3496 * In Windows, the exit of a thread is an event that you're allowed
3497 * to wait on, assuming you've obtained a reference to the thread using
3498 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3499 * simulate this behavior is to register each thread we create in a
3500 * reference list, and if someone holds a reference to us, we poke
3501 * them.
3502 */
3503 static ndis_status
3504 PsTerminateSystemThread(status)
3505 ndis_status status;
3506 {
3507 struct nt_objref *nr;
3508
3509 mtx_lock(&ntoskrnl_dispatchlock);
3510 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3511 if (nr->no_obj != curthread->td_proc)
3512 continue;
3513 nr->no_dh.dh_sigstate = 1;
3514 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3515 break;
3516 }
3517 mtx_unlock(&ntoskrnl_dispatchlock);
3518
3519 ntoskrnl_kth--;
3520
3521 #if __FreeBSD_version < 502113
3522 mtx_lock(&Giant);
3523 #endif
3524 kproc_exit(0);
3525 return(0); /* notreached */
3526 }
3527
3528 static uint32_t
3529 DbgPrint(char *fmt, ...)
3530 {
3531 va_list ap;
3532
3533 if (bootverbose) {
3534 va_start(ap, fmt);
3535 vprintf(fmt, ap);
3536 }
3537
3538 return(STATUS_SUCCESS);
3539 }
3540
3541 static void
3542 DbgBreakPoint(void)
3543 {
3544
3545 #if __FreeBSD_version < 502113
3546 Debugger("DbgBreakPoint(): breakpoint");
3547 #else
3548 kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
3549 #endif
3550 }
3551
3552 static void
3553 KeBugCheckEx(code, param1, param2, param3, param4)
3554 uint32_t code;
3555 u_long param1;
3556 u_long param2;
3557 u_long param3;
3558 u_long param4;
3559 {
3560 panic("KeBugCheckEx: STOP 0x%X", code);
3561 }
3562
3563 static void
3564 ntoskrnl_timercall(arg)
3565 void *arg;
3566 {
3567 ktimer *timer;
3568 struct timeval tv;
3569 kdpc *dpc;
3570
3571 mtx_lock(&ntoskrnl_dispatchlock);
3572
3573 timer = arg;
3574
3575 #ifdef NTOSKRNL_DEBUG_TIMERS
3576 ntoskrnl_timer_fires++;
3577 #endif
3578 ntoskrnl_remove_timer(timer);
3579
3580 /*
3581 * This should never happen, but complain
3582 * if it does.
3583 */
3584
3585 if (timer->k_header.dh_inserted == FALSE) {
3586 mtx_unlock(&ntoskrnl_dispatchlock);
3587 printf("NTOS: timer %p fired even though "
3588 "it was canceled\n", timer);
3589 return;
3590 }
3591
3592 /* Mark the timer as no longer being on the timer queue. */
3593
3594 timer->k_header.dh_inserted = FALSE;
3595
3596 /* Now signal the object and satisfy any waits on it. */
3597
3598 timer->k_header.dh_sigstate = 1;
3599 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3600
3601 /*
3602 * If this is a periodic timer, re-arm it
3603 * so it will fire again. We do this before
3604 * calling any deferred procedure calls because
3605 * it's possible the DPC might cancel the timer,
3606 * in which case it would be wrong for us to
3607 * re-arm it again afterwards.
3608 */
3609
3610 if (timer->k_period) {
3611 tv.tv_sec = 0;
3612 tv.tv_usec = timer->k_period * 1000;
3613 timer->k_header.dh_inserted = TRUE;
3614 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3615 #ifdef NTOSKRNL_DEBUG_TIMERS
3616 ntoskrnl_timer_reloads++;
3617 #endif
3618 }
3619
3620 dpc = timer->k_dpc;
3621
3622 mtx_unlock(&ntoskrnl_dispatchlock);
3623
3624 /* If there's a DPC associated with the timer, queue it up. */
3625
3626 if (dpc != NULL)
3627 KeInsertQueueDpc(dpc, NULL, NULL);
3628
3629 return;
3630 }
3631
3632 #ifdef NTOSKRNL_DEBUG_TIMERS
3633 static int
3634 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3635 {
3636 int ret;
3637
3638 ret = 0;
3639 ntoskrnl_show_timers();
3640 return (sysctl_handle_int(oidp, &ret, 0, req));
3641 }
3642
3643 static void
3644 ntoskrnl_show_timers()
3645 {
3646 int i = 0;
3647 list_entry *l;
3648
3649 mtx_lock_spin(&ntoskrnl_calllock);
3650 l = ntoskrnl_calllist.nle_flink;
3651 while(l != &ntoskrnl_calllist) {
3652 i++;
3653 l = l->nle_flink;
3654 }
3655 mtx_unlock_spin(&ntoskrnl_calllock);
3656
3657 printf("\n");
3658 printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3659 printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3660 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3661 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3662 printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3663 printf("\n");
3664
3665 return;
3666 }
3667 #endif
3668
3669 /*
3670 * Must be called with dispatcher lock held.
3671 */
3672
3673 static void
3674 ntoskrnl_insert_timer(timer, ticks)
3675 ktimer *timer;
3676 int ticks;
3677 {
3678 callout_entry *e;
3679 list_entry *l;
3680 struct callout *c;
3681
3682 /*
3683 * Try and allocate a timer.
3684 */
3685 mtx_lock_spin(&ntoskrnl_calllock);
3686 if (IsListEmpty(&ntoskrnl_calllist)) {
3687 mtx_unlock_spin(&ntoskrnl_calllock);
3688 #ifdef NTOSKRNL_DEBUG_TIMERS
3689 ntoskrnl_show_timers();
3690 #endif
3691 panic("out of timers!");
3692 }
3693 l = RemoveHeadList(&ntoskrnl_calllist);
3694 mtx_unlock_spin(&ntoskrnl_calllock);
3695
3696 e = CONTAINING_RECORD(l, callout_entry, ce_list);
3697 c = &e->ce_callout;
3698
3699 timer->k_callout = c;
3700
3701 callout_init(c, CALLOUT_MPSAFE);
3702 callout_reset(c, ticks, ntoskrnl_timercall, timer);
3703
3704 return;
3705 }
3706
3707 static void
3708 ntoskrnl_remove_timer(timer)
3709 ktimer *timer;
3710 {
3711 callout_entry *e;
3712
3713 e = (callout_entry *)timer->k_callout;
3714 callout_stop(timer->k_callout);
3715
3716 mtx_lock_spin(&ntoskrnl_calllock);
3717 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3718 mtx_unlock_spin(&ntoskrnl_calllock);
3719
3720 return;
3721 }
3722
3723 void
3724 KeInitializeTimer(timer)
3725 ktimer *timer;
3726 {
3727 if (timer == NULL)
3728 return;
3729
3730 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
3731
3732 return;
3733 }
3734
3735 void
3736 KeInitializeTimerEx(timer, type)
3737 ktimer *timer;
3738 uint32_t type;
3739 {
3740 if (timer == NULL)
3741 return;
3742
3743 bzero((char *)timer, sizeof(ktimer));
3744 InitializeListHead((&timer->k_header.dh_waitlisthead));
3745 timer->k_header.dh_sigstate = FALSE;
3746 timer->k_header.dh_inserted = FALSE;
3747 if (type == EVENT_TYPE_NOTIFY)
3748 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3749 else
3750 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3751 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3752
3753 return;
3754 }
3755
3756 /*
3757 * DPC subsystem. A Windows Defered Procedure Call has the following
3758 * properties:
3759 * - It runs at DISPATCH_LEVEL.
3760 * - It can have one of 3 importance values that control when it
3761 * runs relative to other DPCs in the queue.
3762 * - On SMP systems, it can be set to run on a specific processor.
3763 * In order to satisfy the last property, we create a DPC thread for
3764 * each CPU in the system and bind it to that CPU. Each thread
3765 * maintains three queues with different importance levels, which
3766 * will be processed in order from lowest to highest.
3767 *
3768 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3769 * with ISRs, which run in interrupt context and can preempt DPCs.)
3770 * ISRs are given the highest importance so that they'll take
3771 * precedence over timers and other things.
3772 */
3773
3774 static void
3775 ntoskrnl_dpc_thread(arg)
3776 void *arg;
3777 {
3778 kdpc_queue *kq;
3779 kdpc *d;
3780 list_entry *l;
3781 uint8_t irql;
3782
3783 kq = arg;
3784
3785 InitializeListHead(&kq->kq_disp);
3786 kq->kq_td = curthread;
3787 kq->kq_exit = 0;
3788 kq->kq_running = FALSE;
3789 KeInitializeSpinLock(&kq->kq_lock);
3790 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3791 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3792
3793 /*
3794 * Elevate our priority. DPCs are used to run interrupt
3795 * handlers, and they should trigger as soon as possible
3796 * once scheduled by an ISR.
3797 */
3798
3799 thread_lock(curthread);
3800 #ifdef NTOSKRNL_MULTIPLE_DPCS
3801 #if __FreeBSD_version >= 502102
3802 sched_bind(curthread, kq->kq_cpu);
3803 #endif
3804 #endif
3805 sched_prio(curthread, PRI_MIN_KERN);
3806 #if __FreeBSD_version < 600000
3807 curthread->td_base_pri = PRI_MIN_KERN;
3808 #endif
3809 thread_unlock(curthread);
3810
3811 while (1) {
3812 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3813
3814 KeAcquireSpinLock(&kq->kq_lock, &irql);
3815
3816 if (kq->kq_exit) {
3817 kq->kq_exit = 0;
3818 KeReleaseSpinLock(&kq->kq_lock, irql);
3819 break;
3820 }
3821
3822 kq->kq_running = TRUE;
3823
3824 while (!IsListEmpty(&kq->kq_disp)) {
3825 l = RemoveHeadList((&kq->kq_disp));
3826 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3827 InitializeListHead((&d->k_dpclistentry));
3828 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3829 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3830 d->k_sysarg1, d->k_sysarg2);
3831 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3832 }
3833
3834 kq->kq_running = FALSE;
3835
3836 KeReleaseSpinLock(&kq->kq_lock, irql);
3837
3838 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3839 }
3840
3841 #if __FreeBSD_version < 502113
3842 mtx_lock(&Giant);
3843 #endif
3844 kproc_exit(0);
3845 return; /* notreached */
3846 }
3847
3848 static void
3849 ntoskrnl_destroy_dpc_threads(void)
3850 {
3851 kdpc_queue *kq;
3852 kdpc dpc;
3853 int i;
3854
3855 kq = kq_queues;
3856 #ifdef NTOSKRNL_MULTIPLE_DPCS
3857 for (i = 0; i < mp_ncpus; i++) {
3858 #else
3859 for (i = 0; i < 1; i++) {
3860 #endif
3861 kq += i;
3862
3863 kq->kq_exit = 1;
3864 KeInitializeDpc(&dpc, NULL, NULL);
3865 KeSetTargetProcessorDpc(&dpc, i);
3866 KeInsertQueueDpc(&dpc, NULL, NULL);
3867 while (kq->kq_exit)
3868 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
3869 }
3870
3871 return;
3872 }
3873
3874 static uint8_t
3875 ntoskrnl_insert_dpc(head, dpc)
3876 list_entry *head;
3877 kdpc *dpc;
3878 {
3879 list_entry *l;
3880 kdpc *d;
3881
3882 l = head->nle_flink;
3883 while (l != head) {
3884 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3885 if (d == dpc)
3886 return(FALSE);
3887 l = l->nle_flink;
3888 }
3889
3890 if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3891 InsertTailList((head), (&dpc->k_dpclistentry));
3892 else
3893 InsertHeadList((head), (&dpc->k_dpclistentry));
3894
3895 return (TRUE);
3896 }
3897
3898 void
3899 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3900 kdpc *dpc;
3901 void *dpcfunc;
3902 void *dpcctx;
3903 {
3904
3905 if (dpc == NULL)
3906 return;
3907
3908 dpc->k_deferedfunc = dpcfunc;
3909 dpc->k_deferredctx = dpcctx;
3910 dpc->k_num = KDPC_CPU_DEFAULT;
3911 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3912 InitializeListHead((&dpc->k_dpclistentry));
3913
3914 return;
3915 }
3916
3917 uint8_t
3918 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3919 kdpc *dpc;
3920 void *sysarg1;
3921 void *sysarg2;
3922 {
3923 kdpc_queue *kq;
3924 uint8_t r;
3925 uint8_t irql;
3926
3927 if (dpc == NULL)
3928 return(FALSE);
3929
3930 kq = kq_queues;
3931
3932 #ifdef NTOSKRNL_MULTIPLE_DPCS
3933 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3934
3935 /*
3936 * By default, the DPC is queued to run on the same CPU
3937 * that scheduled it.
3938 */
3939
3940 if (dpc->k_num == KDPC_CPU_DEFAULT)
3941 kq += curthread->td_oncpu;
3942 else
3943 kq += dpc->k_num;
3944 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3945 #else
3946 KeAcquireSpinLock(&kq->kq_lock, &irql);
3947 #endif
3948
3949 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3950 if (r == TRUE) {
3951 dpc->k_sysarg1 = sysarg1;
3952 dpc->k_sysarg2 = sysarg2;
3953 }
3954 KeReleaseSpinLock(&kq->kq_lock, irql);
3955
3956 if (r == FALSE)
3957 return(r);
3958
3959 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3960
3961 return(r);
3962 }
3963
3964 uint8_t
3965 KeRemoveQueueDpc(dpc)
3966 kdpc *dpc;
3967 {
3968 kdpc_queue *kq;
3969 uint8_t irql;
3970
3971 if (dpc == NULL)
3972 return(FALSE);
3973
3974 #ifdef NTOSKRNL_MULTIPLE_DPCS
3975 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3976
3977 kq = kq_queues + dpc->k_num;
3978
3979 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3980 #else
3981 kq = kq_queues;
3982 KeAcquireSpinLock(&kq->kq_lock, &irql);
3983 #endif
3984
3985 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3986 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3987 KeLowerIrql(irql);
3988 return(FALSE);
3989 }
3990
3991 RemoveEntryList((&dpc->k_dpclistentry));
3992 InitializeListHead((&dpc->k_dpclistentry));
3993
3994 KeReleaseSpinLock(&kq->kq_lock, irql);
3995
3996 return(TRUE);
3997 }
3998
3999 void
4000 KeSetImportanceDpc(dpc, imp)
4001 kdpc *dpc;
4002 uint32_t imp;
4003 {
4004 if (imp != KDPC_IMPORTANCE_LOW &&
4005 imp != KDPC_IMPORTANCE_MEDIUM &&
4006 imp != KDPC_IMPORTANCE_HIGH)
4007 return;
4008
4009 dpc->k_importance = (uint8_t)imp;
4010 return;
4011 }
4012
4013 void
4014 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
4015 {
4016 if (cpu > mp_ncpus)
4017 return;
4018
4019 dpc->k_num = cpu;
4020 return;
4021 }
4022
4023 void
4024 KeFlushQueuedDpcs(void)
4025 {
4026 kdpc_queue *kq;
4027 int i;
4028
4029 /*
4030 * Poke each DPC queue and wait
4031 * for them to drain.
4032 */
4033
4034 #ifdef NTOSKRNL_MULTIPLE_DPCS
4035 for (i = 0; i < mp_ncpus; i++) {
4036 #else
4037 for (i = 0; i < 1; i++) {
4038 #endif
4039 kq = kq_queues + i;
4040 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4041 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4042 }
4043
4044 return;
4045 }
4046
4047 uint32_t
4048 KeGetCurrentProcessorNumber(void)
4049 {
4050 return((uint32_t)curthread->td_oncpu);
4051 }
4052
4053 uint8_t
4054 KeSetTimerEx(timer, duetime, period, dpc)
4055 ktimer *timer;
4056 int64_t duetime;
4057 uint32_t period;
4058 kdpc *dpc;
4059 {
4060 struct timeval tv;
4061 uint64_t curtime;
4062 uint8_t pending;
4063
4064 if (timer == NULL)
4065 return(FALSE);
4066
4067 mtx_lock(&ntoskrnl_dispatchlock);
4068
4069 if (timer->k_header.dh_inserted == TRUE) {
4070 ntoskrnl_remove_timer(timer);
4071 #ifdef NTOSKRNL_DEBUG_TIMERS
4072 ntoskrnl_timer_cancels++;
4073 #endif
4074 timer->k_header.dh_inserted = FALSE;
4075 pending = TRUE;
4076 } else
4077 pending = FALSE;
4078
4079 timer->k_duetime = duetime;
4080 timer->k_period = period;
4081 timer->k_header.dh_sigstate = FALSE;
4082 timer->k_dpc = dpc;
4083
4084 if (duetime < 0) {
4085 tv.tv_sec = - (duetime) / 10000000;
4086 tv.tv_usec = (- (duetime) / 10) -
4087 (tv.tv_sec * 1000000);
4088 } else {
4089 ntoskrnl_time(&curtime);
4090 if (duetime < curtime)
4091 tv.tv_sec = tv.tv_usec = 0;
4092 else {
4093 tv.tv_sec = ((duetime) - curtime) / 10000000;
4094 tv.tv_usec = ((duetime) - curtime) / 10 -
4095 (tv.tv_sec * 1000000);
4096 }
4097 }
4098
4099 timer->k_header.dh_inserted = TRUE;
4100 ntoskrnl_insert_timer(timer, tvtohz(&tv));
4101 #ifdef NTOSKRNL_DEBUG_TIMERS
4102 ntoskrnl_timer_sets++;
4103 #endif
4104
4105 mtx_unlock(&ntoskrnl_dispatchlock);
4106
4107 return(pending);
4108 }
4109
4110 uint8_t
4111 KeSetTimer(timer, duetime, dpc)
4112 ktimer *timer;
4113 int64_t duetime;
4114 kdpc *dpc;
4115 {
4116 return (KeSetTimerEx(timer, duetime, 0, dpc));
4117 }
4118
4119 /*
4120 * The Windows DDK documentation seems to say that cancelling
4121 * a timer that has a DPC will result in the DPC also being
4122 * cancelled, but this isn't really the case.
4123 */
4124
4125 uint8_t
4126 KeCancelTimer(timer)
4127 ktimer *timer;
4128 {
4129 uint8_t pending;
4130
4131 if (timer == NULL)
4132 return(FALSE);
4133
4134 mtx_lock(&ntoskrnl_dispatchlock);
4135
4136 pending = timer->k_header.dh_inserted;
4137
4138 if (timer->k_header.dh_inserted == TRUE) {
4139 timer->k_header.dh_inserted = FALSE;
4140 ntoskrnl_remove_timer(timer);
4141 #ifdef NTOSKRNL_DEBUG_TIMERS
4142 ntoskrnl_timer_cancels++;
4143 #endif
4144 }
4145
4146 mtx_unlock(&ntoskrnl_dispatchlock);
4147
4148 return(pending);
4149 }
4150
4151 uint8_t
4152 KeReadStateTimer(timer)
4153 ktimer *timer;
4154 {
4155 return(timer->k_header.dh_sigstate);
4156 }
4157
4158 static int32_t
4159 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
4160 {
4161 ktimer timer;
4162
4163 if (wait_mode != 0)
4164 panic("invalid wait_mode %d", wait_mode);
4165
4166 KeInitializeTimer(&timer);
4167 KeSetTimer(&timer, *interval, NULL);
4168 KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
4169
4170 return STATUS_SUCCESS;
4171 }
4172
4173 static uint64_t
4174 KeQueryInterruptTime(void)
4175 {
4176 int ticks;
4177 struct timeval tv;
4178
4179 getmicrouptime(&tv);
4180
4181 ticks = tvtohz(&tv);
4182
4183 return ticks * ((10000000 + hz - 1) / hz);
4184 }
4185
4186 static struct thread *
4187 KeGetCurrentThread(void)
4188 {
4189
4190 return curthread;
4191 }
4192
4193 static int32_t
4194 KeSetPriorityThread(td, pri)
4195 struct thread *td;
4196 int32_t pri;
4197 {
4198 int32_t old;
4199
4200 if (td == NULL)
4201 return LOW_REALTIME_PRIORITY;
4202
4203 if (td->td_priority <= PRI_MIN_KERN)
4204 old = HIGH_PRIORITY;
4205 else if (td->td_priority >= PRI_MAX_KERN)
4206 old = LOW_PRIORITY;
4207 else
4208 old = LOW_REALTIME_PRIORITY;
4209
4210 thread_lock(td);
4211 if (pri == HIGH_PRIORITY)
4212 sched_prio(td, PRI_MIN_KERN);
4213 if (pri == LOW_REALTIME_PRIORITY)
4214 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
4215 if (pri == LOW_PRIORITY)
4216 sched_prio(td, PRI_MAX_KERN);
4217 thread_unlock(td);
4218
4219 return old;
4220 }
4221
4222 static void
4223 dummy()
4224 {
4225 printf ("ntoskrnl dummy called...\n");
4226 return;
4227 }
4228
4229
4230 image_patch_table ntoskrnl_functbl[] = {
4231 IMPORT_SFUNC(RtlZeroMemory, 2),
4232 IMPORT_SFUNC(RtlCopyMemory, 3),
4233 IMPORT_SFUNC(RtlCompareMemory, 3),
4234 IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4235 IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4236 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4237 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4238 IMPORT_SFUNC(RtlInitAnsiString, 2),
4239 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4240 IMPORT_SFUNC(RtlInitUnicodeString, 2),
4241 IMPORT_SFUNC(RtlFreeAnsiString, 1),
4242 IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4243 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4244 IMPORT_CFUNC(sprintf, 0),
4245 IMPORT_CFUNC(vsprintf, 0),
4246 IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4247 IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4248 IMPORT_CFUNC(DbgPrint, 0),
4249 IMPORT_SFUNC(DbgBreakPoint, 0),
4250 IMPORT_SFUNC(KeBugCheckEx, 5),
4251 IMPORT_CFUNC(strncmp, 0),
4252 IMPORT_CFUNC(strcmp, 0),
4253 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4254 IMPORT_CFUNC(strncpy, 0),
4255 IMPORT_CFUNC(strcpy, 0),
4256 IMPORT_CFUNC(strlen, 0),
4257 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4258 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4259 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4260 IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4261 IMPORT_CFUNC_MAP(strchr, index, 0),
4262 IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4263 IMPORT_CFUNC(memcpy, 0),
4264 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4265 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4266 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4267 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4268 IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4269 IMPORT_FFUNC(IofCallDriver, 2),
4270 IMPORT_FFUNC(IofCompleteRequest, 2),
4271 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4272 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4273 IMPORT_SFUNC(IoCancelIrp, 1),
4274 IMPORT_SFUNC(IoConnectInterrupt, 11),
4275 IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4276 IMPORT_SFUNC(IoCreateDevice, 7),
4277 IMPORT_SFUNC(IoDeleteDevice, 1),
4278 IMPORT_SFUNC(IoGetAttachedDevice, 1),
4279 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4280 IMPORT_SFUNC(IoDetachDevice, 1),
4281 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4282 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4283 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4284 IMPORT_SFUNC(IoAllocateIrp, 2),
4285 IMPORT_SFUNC(IoReuseIrp, 2),
4286 IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4287 IMPORT_SFUNC(IoFreeIrp, 1),
4288 IMPORT_SFUNC(IoInitializeIrp, 3),
4289 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4290 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4291 IMPORT_SFUNC(KeSynchronizeExecution, 3),
4292 IMPORT_SFUNC(KeWaitForSingleObject, 5),
4293 IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4294 IMPORT_SFUNC(_allmul, 4),
4295 IMPORT_SFUNC(_alldiv, 4),
4296 IMPORT_SFUNC(_allrem, 4),
4297 IMPORT_RFUNC(_allshr, 0),
4298 IMPORT_RFUNC(_allshl, 0),
4299 IMPORT_SFUNC(_aullmul, 4),
4300 IMPORT_SFUNC(_aulldiv, 4),
4301 IMPORT_SFUNC(_aullrem, 4),
4302 IMPORT_RFUNC(_aullshr, 0),
4303 IMPORT_RFUNC(_aullshl, 0),
4304 IMPORT_CFUNC(atoi, 0),
4305 IMPORT_CFUNC(atol, 0),
4306 IMPORT_CFUNC(rand, 0),
4307 IMPORT_CFUNC(srand, 0),
4308 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4309 IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4310 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4311 IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4312 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4313 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4314 IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4315 IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4316 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4317 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4318 IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4319 IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4320 IMPORT_SFUNC(ExQueryDepthSList, 1),
4321 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4322 InterlockedPopEntrySList, 1),
4323 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4324 InterlockedPushEntrySList, 2),
4325 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4326 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4327 IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4328 IMPORT_SFUNC(ExFreePool, 1),
4329 #ifdef __i386__
4330 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4331 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4332 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4333 #else
4334 /*
4335 * For AMD64, we can get away with just mapping
4336 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4337 * because the calling conventions end up being the same.
4338 * On i386, we have to be careful because KfAcquireSpinLock()
4339 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4340 */
4341 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4342 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4343 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4344 #endif
4345 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4346 IMPORT_FFUNC(InterlockedIncrement, 1),
4347 IMPORT_FFUNC(InterlockedDecrement, 1),
4348 IMPORT_FFUNC(InterlockedExchange, 2),
4349 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4350 IMPORT_SFUNC(IoAllocateMdl, 5),
4351 IMPORT_SFUNC(IoFreeMdl, 1),
4352 IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4353 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4354 IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4355 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4356 IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4357 IMPORT_SFUNC(MmSizeOfMdl, 1),
4358 IMPORT_SFUNC(MmMapLockedPages, 2),
4359 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4360 IMPORT_SFUNC(MmUnmapLockedPages, 2),
4361 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4362 IMPORT_SFUNC(MmIsAddressValid, 1),
4363 IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4364 IMPORT_SFUNC(MmUnmapIoSpace, 2),
4365 IMPORT_SFUNC(KeInitializeSpinLock, 1),
4366 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4367 IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4368 IMPORT_SFUNC(IoGetDeviceProperty, 5),
4369 IMPORT_SFUNC(IoAllocateWorkItem, 1),
4370 IMPORT_SFUNC(IoFreeWorkItem, 1),
4371 IMPORT_SFUNC(IoQueueWorkItem, 4),
4372 IMPORT_SFUNC(ExQueueWorkItem, 2),
4373 IMPORT_SFUNC(ntoskrnl_workitem, 2),
4374 IMPORT_SFUNC(KeInitializeMutex, 2),
4375 IMPORT_SFUNC(KeReleaseMutex, 2),
4376 IMPORT_SFUNC(KeReadStateMutex, 1),
4377 IMPORT_SFUNC(KeInitializeEvent, 3),
4378 IMPORT_SFUNC(KeSetEvent, 3),
4379 IMPORT_SFUNC(KeResetEvent, 1),
4380 IMPORT_SFUNC(KeClearEvent, 1),
4381 IMPORT_SFUNC(KeReadStateEvent, 1),
4382 IMPORT_SFUNC(KeInitializeTimer, 1),
4383 IMPORT_SFUNC(KeInitializeTimerEx, 2),
4384 IMPORT_SFUNC(KeSetTimer, 3),
4385 IMPORT_SFUNC(KeSetTimerEx, 4),
4386 IMPORT_SFUNC(KeCancelTimer, 1),
4387 IMPORT_SFUNC(KeReadStateTimer, 1),
4388 IMPORT_SFUNC(KeInitializeDpc, 3),
4389 IMPORT_SFUNC(KeInsertQueueDpc, 3),
4390 IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4391 IMPORT_SFUNC(KeSetImportanceDpc, 2),
4392 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4393 IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4394 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4395 IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4396 IMPORT_FFUNC(ObfDereferenceObject, 1),
4397 IMPORT_SFUNC(ZwClose, 1),
4398 IMPORT_SFUNC(PsCreateSystemThread, 7),
4399 IMPORT_SFUNC(PsTerminateSystemThread, 1),
4400 IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4401 IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4402 IMPORT_CFUNC(WmiTraceMessage, 0),
4403 IMPORT_SFUNC(KeQuerySystemTime, 1),
4404 IMPORT_CFUNC(KeTickCount, 0),
4405 IMPORT_SFUNC(KeDelayExecutionThread, 3),
4406 IMPORT_SFUNC(KeQueryInterruptTime, 0),
4407 IMPORT_SFUNC(KeGetCurrentThread, 0),
4408 IMPORT_SFUNC(KeSetPriorityThread, 2),
4409
4410 /*
4411 * This last entry is a catch-all for any function we haven't
4412 * implemented yet. The PE import list patching routine will
4413 * use it for any function that doesn't have an explicit match
4414 * in this table.
4415 */
4416
4417 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4418
4419 /* End of list. */
4420
4421 { NULL, NULL, NULL }
4422 };
Cache object: 5e4a82545ac6e70f137a84850e3785bc
|