1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/6.4/sys/compat/ndis/subr_ntoskrnl.c 166252 2007-01-26 06:45:33Z sam $");
35
36 #include <sys/ctype.h>
37 #include <sys/unistd.h>
38 #include <sys/param.h>
39 #include <sys/types.h>
40 #include <sys/errno.h>
41 #include <sys/systm.h>
42 #include <sys/malloc.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45
46 #include <sys/callout.h>
47 #if __FreeBSD_version > 502113
48 #include <sys/kdb.h>
49 #endif
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/condvar.h>
53 #include <sys/kthread.h>
54 #include <sys/module.h>
55 #include <sys/smp.h>
56 #include <sys/sched.h>
57 #include <sys/sysctl.h>
58
59 #include <machine/atomic.h>
60 #include <machine/clock.h>
61 #include <machine/bus.h>
62 #include <machine/stdarg.h>
63 #include <machine/resource.h>
64
65 #include <sys/bus.h>
66 #include <sys/rman.h>
67
68 #include <vm/vm.h>
69 #include <vm/vm_param.h>
70 #include <vm/pmap.h>
71 #include <vm/uma.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_map.h>
74
75 #include <compat/ndis/pe_var.h>
76 #include <compat/ndis/cfg_var.h>
77 #include <compat/ndis/resource_var.h>
78 #include <compat/ndis/ntoskrnl_var.h>
79 #include <compat/ndis/hal_var.h>
80 #include <compat/ndis/ndis_var.h>
81
82 #ifdef NTOSKRNL_DEBUG_TIMERS
83 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
84
85 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
86 sysctl_show_timers, "I", "Show ntoskrnl timer stats");
87 #endif
88
89 struct kdpc_queue {
90 list_entry kq_disp;
91 struct thread *kq_td;
92 int kq_cpu;
93 int kq_exit;
94 int kq_running;
95 kspin_lock kq_lock;
96 nt_kevent kq_proc;
97 nt_kevent kq_done;
98 };
99
100 typedef struct kdpc_queue kdpc_queue;
101
102 struct wb_ext {
103 struct cv we_cv;
104 struct thread *we_td;
105 };
106
107 typedef struct wb_ext wb_ext;
108
109 #define NTOSKRNL_TIMEOUTS 256
110 #ifdef NTOSKRNL_DEBUG_TIMERS
111 static uint64_t ntoskrnl_timer_fires;
112 static uint64_t ntoskrnl_timer_sets;
113 static uint64_t ntoskrnl_timer_reloads;
114 static uint64_t ntoskrnl_timer_cancels;
115 #endif
116
117 struct callout_entry {
118 struct callout ce_callout;
119 list_entry ce_list;
120 };
121
122 typedef struct callout_entry callout_entry;
123
124 static struct list_entry ntoskrnl_calllist;
125 static struct mtx ntoskrnl_calllock;
126
127 static struct list_entry ntoskrnl_intlist;
128 static kspin_lock ntoskrnl_intlock;
129
130 static uint8_t RtlEqualUnicodeString(unicode_string *,
131 unicode_string *, uint8_t);
132 static void RtlCopyUnicodeString(unicode_string *,
133 unicode_string *);
134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
135 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
137 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
139 device_object *, void *, uint32_t, void *, uint32_t,
140 uint8_t, nt_kevent *, io_status_block *);
141 static irp *IoAllocateIrp(uint8_t, uint8_t);
142 static void IoReuseIrp(irp *, uint32_t);
143 static void IoFreeIrp(irp *);
144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
146 static uint32_t KeWaitForMultipleObjects(uint32_t,
147 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
148 int64_t *, wait_block *);
149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
153 static void ntoskrnl_insert_timer(ktimer *, int);
154 static void ntoskrnl_remove_timer(ktimer *);
155 #ifdef NTOSKRNL_DEBUG_TIMERS
156 static void ntoskrnl_show_timers(void);
157 #endif
158 static void ntoskrnl_timercall(void *);
159 static void ntoskrnl_dpc_thread(void *);
160 static void ntoskrnl_destroy_dpc_threads(void);
161 static void ntoskrnl_destroy_workitem_threads(void);
162 static void ntoskrnl_workitem_thread(void *);
163 static void ntoskrnl_workitem(device_object *, void *);
164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
173 static int64_t _allmul(int64_t, int64_t);
174 static int64_t _alldiv(int64_t, int64_t);
175 static int64_t _allrem(int64_t, int64_t);
176 static int64_t _allshr(int64_t, uint8_t);
177 static int64_t _allshl(int64_t, uint8_t);
178 static uint64_t _aullmul(uint64_t, uint64_t);
179 static uint64_t _aulldiv(uint64_t, uint64_t);
180 static uint64_t _aullrem(uint64_t, uint64_t);
181 static uint64_t _aullshr(uint64_t, uint8_t);
182 static uint64_t _aullshl(uint64_t, uint8_t);
183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
184 static slist_entry *ntoskrnl_popsl(slist_header *);
185 static void ExInitializePagedLookasideList(paged_lookaside_list *,
186 lookaside_alloc_func *, lookaside_free_func *,
187 uint32_t, size_t, uint32_t, uint16_t);
188 static void ExDeletePagedLookasideList(paged_lookaside_list *);
189 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
190 lookaside_alloc_func *, lookaside_free_func *,
191 uint32_t, size_t, uint32_t, uint16_t);
192 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
193 static slist_entry
194 *ExInterlockedPushEntrySList(slist_header *,
195 slist_entry *, kspin_lock *);
196 static slist_entry
197 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
198 static uint32_t InterlockedIncrement(volatile uint32_t *);
199 static uint32_t InterlockedDecrement(volatile uint32_t *);
200 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
201 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
202 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
203 uint64_t, uint64_t, uint64_t, uint32_t);
204 static void MmFreeContiguousMemory(void *);
205 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t, uint32_t);
206 static uint32_t MmSizeOfMdl(void *, size_t);
207 static void *MmMapLockedPages(mdl *, uint8_t);
208 static void *MmMapLockedPagesSpecifyCache(mdl *,
209 uint8_t, uint32_t, void *, uint32_t, uint32_t);
210 static void MmUnmapLockedPages(void *, mdl *);
211 static uint8_t MmIsAddressValid(void *);
212 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
213 static void RtlZeroMemory(void *, size_t);
214 static void RtlCopyMemory(void *, const void *, size_t);
215 static size_t RtlCompareMemory(const void *, const void *, size_t);
216 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
217 uint32_t, uint32_t *);
218 static int atoi (const char *);
219 static long atol (const char *);
220 static int rand(void);
221 static void srand(unsigned int);
222 static void ntoskrnl_time(uint64_t *);
223 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
224 static void ntoskrnl_thrfunc(void *);
225 static ndis_status PsCreateSystemThread(ndis_handle *,
226 uint32_t, void *, ndis_handle, void *, void *, void *);
227 static ndis_status PsTerminateSystemThread(ndis_status);
228 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
229 uint32_t, void *, uint32_t *);
230 static void KeInitializeMutex(kmutant *, uint32_t);
231 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
232 static uint32_t KeReadStateMutex(kmutant *);
233 static ndis_status ObReferenceObjectByHandle(ndis_handle,
234 uint32_t, void *, uint8_t, void **, void **);
235 static void ObfDereferenceObject(void *);
236 static uint32_t ZwClose(ndis_handle);
237 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
238 uint32_t, void *);
239 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
240 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
241 static void *ntoskrnl_memset(void *, int, size_t);
242 static void *ntoskrnl_memmove(void *, void *, size_t);
243 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
244 static char *ntoskrnl_strstr(char *, char *);
245 static int ntoskrnl_toupper(int);
246 static int ntoskrnl_tolower(int);
247 static funcptr ntoskrnl_findwrap(funcptr);
248 static uint32_t DbgPrint(char *, ...);
249 static void DbgBreakPoint(void);
250 static void dummy(void);
251
252 static struct mtx ntoskrnl_dispatchlock;
253 static struct mtx ntoskrnl_interlock;
254 static kspin_lock ntoskrnl_cancellock;
255 static int ntoskrnl_kth = 0;
256 static struct nt_objref_head ntoskrnl_reflist;
257 static uma_zone_t mdl_zone;
258 static uma_zone_t iw_zone;
259 static struct kdpc_queue *kq_queues;
260 static struct kdpc_queue *wq_queues;
261 static int wq_idx = 0;
262
263 int
264 ntoskrnl_libinit()
265 {
266 image_patch_table *patch;
267 int error;
268 struct proc *p;
269 kdpc_queue *kq;
270 callout_entry *e;
271 int i;
272 char name[64];
273
274 mtx_init(&ntoskrnl_dispatchlock,
275 "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
276 mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
277 KeInitializeSpinLock(&ntoskrnl_cancellock);
278 KeInitializeSpinLock(&ntoskrnl_intlock);
279 TAILQ_INIT(&ntoskrnl_reflist);
280
281 InitializeListHead(&ntoskrnl_calllist);
282 InitializeListHead(&ntoskrnl_intlist);
283 mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
284
285 kq_queues = ExAllocatePoolWithTag(NonPagedPool,
286 #ifdef NTOSKRNL_MULTIPLE_DPCS
287 sizeof(kdpc_queue) * mp_ncpus, 0);
288 #else
289 sizeof(kdpc_queue), 0);
290 #endif
291
292 if (kq_queues == NULL)
293 return(ENOMEM);
294
295 wq_queues = ExAllocatePoolWithTag(NonPagedPool,
296 sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
297
298 if (wq_queues == NULL)
299 return(ENOMEM);
300
301 #ifdef NTOSKRNL_MULTIPLE_DPCS
302 bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
303 #else
304 bzero((char *)kq_queues, sizeof(kdpc_queue));
305 #endif
306 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
307
308 /*
309 * Launch the DPC threads.
310 */
311
312 #ifdef NTOSKRNL_MULTIPLE_DPCS
313 for (i = 0; i < mp_ncpus; i++) {
314 #else
315 for (i = 0; i < 1; i++) {
316 #endif
317 kq = kq_queues + i;
318 kq->kq_cpu = i;
319 sprintf(name, "Windows DPC %d", i);
320 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
321 RFHIGHPID, NDIS_KSTACK_PAGES, name);
322 if (error)
323 panic("failed to launch DPC thread");
324 }
325
326 /*
327 * Launch the workitem threads.
328 */
329
330 for (i = 0; i < WORKITEM_THREADS; i++) {
331 kq = wq_queues + i;
332 sprintf(name, "Windows Workitem %d", i);
333 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
334 RFHIGHPID, NDIS_KSTACK_PAGES, name);
335 if (error)
336 panic("failed to launch workitem thread");
337 }
338
339 patch = ntoskrnl_functbl;
340 while (patch->ipt_func != NULL) {
341 windrv_wrap((funcptr)patch->ipt_func,
342 (funcptr *)&patch->ipt_wrap,
343 patch->ipt_argcnt, patch->ipt_ftype);
344 patch++;
345 }
346
347 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
348 e = ExAllocatePoolWithTag(NonPagedPool,
349 sizeof(callout_entry), 0);
350 if (e == NULL)
351 panic("failed to allocate timeouts");
352 mtx_lock_spin(&ntoskrnl_calllock);
353 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
354 mtx_unlock_spin(&ntoskrnl_calllock);
355 }
356
357 /*
358 * MDLs are supposed to be variable size (they describe
359 * buffers containing some number of pages, but we don't
360 * know ahead of time how many pages that will be). But
361 * always allocating them off the heap is very slow. As
362 * a compromise, we create an MDL UMA zone big enough to
363 * handle any buffer requiring up to 16 pages, and we
364 * use those for any MDLs for buffers of 16 pages or less
365 * in size. For buffers larger than that (which we assume
366 * will be few and far between, we allocate the MDLs off
367 * the heap.
368 */
369
370 mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
371 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
372
373 iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
374 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
375
376 return(0);
377 }
378
379 int
380 ntoskrnl_libfini()
381 {
382 image_patch_table *patch;
383 callout_entry *e;
384 list_entry *l;
385
386 patch = ntoskrnl_functbl;
387 while (patch->ipt_func != NULL) {
388 windrv_unwrap(patch->ipt_wrap);
389 patch++;
390 }
391
392 /* Stop the workitem queues. */
393 ntoskrnl_destroy_workitem_threads();
394 /* Stop the DPC queues. */
395 ntoskrnl_destroy_dpc_threads();
396
397 ExFreePool(kq_queues);
398 ExFreePool(wq_queues);
399
400 uma_zdestroy(mdl_zone);
401 uma_zdestroy(iw_zone);
402
403 mtx_lock_spin(&ntoskrnl_calllock);
404 while(!IsListEmpty(&ntoskrnl_calllist)) {
405 l = RemoveHeadList(&ntoskrnl_calllist);
406 e = CONTAINING_RECORD(l, callout_entry, ce_list);
407 mtx_unlock_spin(&ntoskrnl_calllock);
408 ExFreePool(e);
409 mtx_lock_spin(&ntoskrnl_calllock);
410 }
411 mtx_unlock_spin(&ntoskrnl_calllock);
412
413 mtx_destroy(&ntoskrnl_dispatchlock);
414 mtx_destroy(&ntoskrnl_interlock);
415 mtx_destroy(&ntoskrnl_calllock);
416
417 return(0);
418 }
419
420 /*
421 * We need to be able to reference this externally from the wrapper;
422 * GCC only generates a local implementation of memset.
423 */
424 static void *
425 ntoskrnl_memset(buf, ch, size)
426 void *buf;
427 int ch;
428 size_t size;
429 {
430 return(memset(buf, ch, size));
431 }
432
433 static void *
434 ntoskrnl_memmove(dst, src, size)
435 void *src;
436 void *dst;
437 size_t size;
438 {
439 bcopy(src, dst, size);
440 return(dst);
441 }
442
443 static void *
444 ntoskrnl_memchr(buf, ch, len)
445 void *buf;
446 unsigned char ch;
447 size_t len;
448 {
449 if (len != 0) {
450 unsigned char *p = buf;
451
452 do {
453 if (*p++ == ch)
454 return (p - 1);
455 } while (--len != 0);
456 }
457 return (NULL);
458 }
459
460 static char *
461 ntoskrnl_strstr(s, find)
462 char *s, *find;
463 {
464 char c, sc;
465 size_t len;
466
467 if ((c = *find++) != 0) {
468 len = strlen(find);
469 do {
470 do {
471 if ((sc = *s++) == 0)
472 return (NULL);
473 } while (sc != c);
474 } while (strncmp(s, find, len) != 0);
475 s--;
476 }
477 return ((char *)s);
478 }
479
480 static int
481 ntoskrnl_toupper(c)
482 int c;
483 {
484 return(toupper(c));
485 }
486
487 static int
488 ntoskrnl_tolower(c)
489 int c;
490 {
491 return(tolower(c));
492 }
493
494 static uint8_t
495 RtlEqualUnicodeString(str1, str2, caseinsensitive)
496 unicode_string *str1;
497 unicode_string *str2;
498 uint8_t caseinsensitive;
499 {
500 int i;
501
502 if (str1->us_len != str2->us_len)
503 return(FALSE);
504
505 for (i = 0; i < str1->us_len; i++) {
506 if (caseinsensitive == TRUE) {
507 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
508 toupper((char)(str2->us_buf[i] & 0xFF)))
509 return(FALSE);
510 } else {
511 if (str1->us_buf[i] != str2->us_buf[i])
512 return(FALSE);
513 }
514 }
515
516 return(TRUE);
517 }
518
519 static void
520 RtlCopyUnicodeString(dest, src)
521 unicode_string *dest;
522 unicode_string *src;
523 {
524
525 if (dest->us_maxlen >= src->us_len)
526 dest->us_len = src->us_len;
527 else
528 dest->us_len = dest->us_maxlen;
529 memcpy(dest->us_buf, src->us_buf, dest->us_len);
530 return;
531 }
532
533 static void
534 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
535 char *ascii;
536 uint16_t *unicode;
537 int len;
538 {
539 int i;
540 uint16_t *ustr;
541
542 ustr = unicode;
543 for (i = 0; i < len; i++) {
544 *ustr = (uint16_t)ascii[i];
545 ustr++;
546 }
547
548 return;
549 }
550
551 static void
552 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
553 uint16_t *unicode;
554 char *ascii;
555 int len;
556 {
557 int i;
558 uint8_t *astr;
559
560 astr = ascii;
561 for (i = 0; i < len / 2; i++) {
562 *astr = (uint8_t)unicode[i];
563 astr++;
564 }
565
566 return;
567 }
568
569 uint32_t
570 RtlUnicodeStringToAnsiString(dest, src, allocate)
571 ansi_string *dest;
572 unicode_string *src;
573 uint8_t allocate;
574 {
575 if (dest == NULL || src == NULL)
576 return(STATUS_INVALID_PARAMETER);
577
578 dest->as_len = src->us_len / 2;
579 if (dest->as_maxlen < dest->as_len)
580 dest->as_len = dest->as_maxlen;
581
582 if (allocate == TRUE) {
583 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
584 (src->us_len / 2) + 1, 0);
585 if (dest->as_buf == NULL)
586 return(STATUS_INSUFFICIENT_RESOURCES);
587 dest->as_len = dest->as_maxlen = src->us_len / 2;
588 } else {
589 dest->as_len = src->us_len / 2; /* XXX */
590 if (dest->as_maxlen < dest->as_len)
591 dest->as_len = dest->as_maxlen;
592 }
593
594 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
595 dest->as_len * 2);
596
597 return (STATUS_SUCCESS);
598 }
599
600 uint32_t
601 RtlAnsiStringToUnicodeString(dest, src, allocate)
602 unicode_string *dest;
603 ansi_string *src;
604 uint8_t allocate;
605 {
606 if (dest == NULL || src == NULL)
607 return(STATUS_INVALID_PARAMETER);
608
609 if (allocate == TRUE) {
610 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
611 src->as_len * 2, 0);
612 if (dest->us_buf == NULL)
613 return(STATUS_INSUFFICIENT_RESOURCES);
614 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
615 } else {
616 dest->us_len = src->as_len * 2; /* XXX */
617 if (dest->us_maxlen < dest->us_len)
618 dest->us_len = dest->us_maxlen;
619 }
620
621 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
622 dest->us_len / 2);
623
624 return (STATUS_SUCCESS);
625 }
626
627 void *
628 ExAllocatePoolWithTag(pooltype, len, tag)
629 uint32_t pooltype;
630 size_t len;
631 uint32_t tag;
632 {
633 void *buf;
634
635 buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
636 if (buf == NULL)
637 return(NULL);
638
639 return(buf);
640 }
641
642 void
643 ExFreePool(buf)
644 void *buf;
645 {
646 free(buf, M_DEVBUF);
647 return;
648 }
649
650 uint32_t
651 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
652 driver_object *drv;
653 void *clid;
654 uint32_t extlen;
655 void **ext;
656 {
657 custom_extension *ce;
658
659 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
660 + extlen, 0);
661
662 if (ce == NULL)
663 return(STATUS_INSUFFICIENT_RESOURCES);
664
665 ce->ce_clid = clid;
666 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
667
668 *ext = (void *)(ce + 1);
669
670 return(STATUS_SUCCESS);
671 }
672
673 void *
674 IoGetDriverObjectExtension(drv, clid)
675 driver_object *drv;
676 void *clid;
677 {
678 list_entry *e;
679 custom_extension *ce;
680
681 /*
682 * Sanity check. Our dummy bus drivers don't have
683 * any driver extentions.
684 */
685
686 if (drv->dro_driverext == NULL)
687 return(NULL);
688
689 e = drv->dro_driverext->dre_usrext.nle_flink;
690 while (e != &drv->dro_driverext->dre_usrext) {
691 ce = (custom_extension *)e;
692 if (ce->ce_clid == clid)
693 return((void *)(ce + 1));
694 e = e->nle_flink;
695 }
696
697 return(NULL);
698 }
699
700
701 uint32_t
702 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
703 driver_object *drv;
704 uint32_t devextlen;
705 unicode_string *devname;
706 uint32_t devtype;
707 uint32_t devchars;
708 uint8_t exclusive;
709 device_object **newdev;
710 {
711 device_object *dev;
712
713 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
714 if (dev == NULL)
715 return(STATUS_INSUFFICIENT_RESOURCES);
716
717 dev->do_type = devtype;
718 dev->do_drvobj = drv;
719 dev->do_currirp = NULL;
720 dev->do_flags = 0;
721
722 if (devextlen) {
723 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
724 devextlen, 0);
725
726 if (dev->do_devext == NULL) {
727 ExFreePool(dev);
728 return(STATUS_INSUFFICIENT_RESOURCES);
729 }
730
731 bzero(dev->do_devext, devextlen);
732 } else
733 dev->do_devext = NULL;
734
735 dev->do_size = sizeof(device_object) + devextlen;
736 dev->do_refcnt = 1;
737 dev->do_attacheddev = NULL;
738 dev->do_nextdev = NULL;
739 dev->do_devtype = devtype;
740 dev->do_stacksize = 1;
741 dev->do_alignreq = 1;
742 dev->do_characteristics = devchars;
743 dev->do_iotimer = NULL;
744 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
745
746 /*
747 * Vpd is used for disk/tape devices,
748 * but we don't support those. (Yet.)
749 */
750 dev->do_vpb = NULL;
751
752 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
753 sizeof(devobj_extension), 0);
754
755 if (dev->do_devobj_ext == NULL) {
756 if (dev->do_devext != NULL)
757 ExFreePool(dev->do_devext);
758 ExFreePool(dev);
759 return(STATUS_INSUFFICIENT_RESOURCES);
760 }
761
762 dev->do_devobj_ext->dve_type = 0;
763 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
764 dev->do_devobj_ext->dve_devobj = dev;
765
766 /*
767 * Attach this device to the driver object's list
768 * of devices. Note: this is not the same as attaching
769 * the device to the device stack. The driver's AddDevice
770 * routine must explicitly call IoAddDeviceToDeviceStack()
771 * to do that.
772 */
773
774 if (drv->dro_devobj == NULL) {
775 drv->dro_devobj = dev;
776 dev->do_nextdev = NULL;
777 } else {
778 dev->do_nextdev = drv->dro_devobj;
779 drv->dro_devobj = dev;
780 }
781
782 *newdev = dev;
783
784 return(STATUS_SUCCESS);
785 }
786
787 void
788 IoDeleteDevice(dev)
789 device_object *dev;
790 {
791 device_object *prev;
792
793 if (dev == NULL)
794 return;
795
796 if (dev->do_devobj_ext != NULL)
797 ExFreePool(dev->do_devobj_ext);
798
799 if (dev->do_devext != NULL)
800 ExFreePool(dev->do_devext);
801
802 /* Unlink the device from the driver's device list. */
803
804 prev = dev->do_drvobj->dro_devobj;
805 if (prev == dev)
806 dev->do_drvobj->dro_devobj = dev->do_nextdev;
807 else {
808 while (prev->do_nextdev != dev)
809 prev = prev->do_nextdev;
810 prev->do_nextdev = dev->do_nextdev;
811 }
812
813 ExFreePool(dev);
814
815 return;
816 }
817
818 device_object *
819 IoGetAttachedDevice(dev)
820 device_object *dev;
821 {
822 device_object *d;
823
824 if (dev == NULL)
825 return (NULL);
826
827 d = dev;
828
829 while (d->do_attacheddev != NULL)
830 d = d->do_attacheddev;
831
832 return (d);
833 }
834
835 static irp *
836 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
837 uint32_t func;
838 device_object *dobj;
839 void *buf;
840 uint32_t len;
841 uint64_t *off;
842 nt_kevent *event;
843 io_status_block *status;
844 {
845 irp *ip;
846
847 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
848 if (ip == NULL)
849 return(NULL);
850 ip->irp_usrevent = event;
851
852 return(ip);
853 }
854
855 static irp *
856 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
857 uint32_t func;
858 device_object *dobj;
859 void *buf;
860 uint32_t len;
861 uint64_t *off;
862 io_status_block *status;
863 {
864 irp *ip;
865 io_stack_location *sl;
866
867 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
868 if (ip == NULL)
869 return(NULL);
870
871 ip->irp_usriostat = status;
872 ip->irp_tail.irp_overlay.irp_thread = NULL;
873
874 sl = IoGetNextIrpStackLocation(ip);
875 sl->isl_major = func;
876 sl->isl_minor = 0;
877 sl->isl_flags = 0;
878 sl->isl_ctl = 0;
879 sl->isl_devobj = dobj;
880 sl->isl_fileobj = NULL;
881 sl->isl_completionfunc = NULL;
882
883 ip->irp_userbuf = buf;
884
885 if (dobj->do_flags & DO_BUFFERED_IO) {
886 ip->irp_assoc.irp_sysbuf =
887 ExAllocatePoolWithTag(NonPagedPool, len, 0);
888 if (ip->irp_assoc.irp_sysbuf == NULL) {
889 IoFreeIrp(ip);
890 return(NULL);
891 }
892 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
893 }
894
895 if (dobj->do_flags & DO_DIRECT_IO) {
896 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
897 if (ip->irp_mdl == NULL) {
898 if (ip->irp_assoc.irp_sysbuf != NULL)
899 ExFreePool(ip->irp_assoc.irp_sysbuf);
900 IoFreeIrp(ip);
901 return(NULL);
902 }
903 ip->irp_userbuf = NULL;
904 ip->irp_assoc.irp_sysbuf = NULL;
905 }
906
907 if (func == IRP_MJ_READ) {
908 sl->isl_parameters.isl_read.isl_len = len;
909 if (off != NULL)
910 sl->isl_parameters.isl_read.isl_byteoff = *off;
911 else
912 sl->isl_parameters.isl_read.isl_byteoff = 0;
913 }
914
915 if (func == IRP_MJ_WRITE) {
916 sl->isl_parameters.isl_write.isl_len = len;
917 if (off != NULL)
918 sl->isl_parameters.isl_write.isl_byteoff = *off;
919 else
920 sl->isl_parameters.isl_write.isl_byteoff = 0;
921 }
922
923 return(ip);
924 }
925
926 static irp *
927 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
928 isinternal, event, status)
929 uint32_t iocode;
930 device_object *dobj;
931 void *ibuf;
932 uint32_t ilen;
933 void *obuf;
934 uint32_t olen;
935 uint8_t isinternal;
936 nt_kevent *event;
937 io_status_block *status;
938 {
939 irp *ip;
940 io_stack_location *sl;
941 uint32_t buflen;
942
943 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
944 if (ip == NULL)
945 return(NULL);
946 ip->irp_usrevent = event;
947 ip->irp_usriostat = status;
948 ip->irp_tail.irp_overlay.irp_thread = NULL;
949
950 sl = IoGetNextIrpStackLocation(ip);
951 sl->isl_major = isinternal == TRUE ?
952 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
953 sl->isl_minor = 0;
954 sl->isl_flags = 0;
955 sl->isl_ctl = 0;
956 sl->isl_devobj = dobj;
957 sl->isl_fileobj = NULL;
958 sl->isl_completionfunc = NULL;
959 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
960 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
961 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
962
963 switch(IO_METHOD(iocode)) {
964 case METHOD_BUFFERED:
965 if (ilen > olen)
966 buflen = ilen;
967 else
968 buflen = olen;
969 if (buflen) {
970 ip->irp_assoc.irp_sysbuf =
971 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
972 if (ip->irp_assoc.irp_sysbuf == NULL) {
973 IoFreeIrp(ip);
974 return(NULL);
975 }
976 }
977 if (ilen && ibuf != NULL) {
978 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
979 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
980 buflen - ilen);
981 } else
982 bzero(ip->irp_assoc.irp_sysbuf, ilen);
983 ip->irp_userbuf = obuf;
984 break;
985 case METHOD_IN_DIRECT:
986 case METHOD_OUT_DIRECT:
987 if (ilen && ibuf != NULL) {
988 ip->irp_assoc.irp_sysbuf =
989 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
990 if (ip->irp_assoc.irp_sysbuf == NULL) {
991 IoFreeIrp(ip);
992 return(NULL);
993 }
994 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
995 }
996 if (olen && obuf != NULL) {
997 ip->irp_mdl = IoAllocateMdl(obuf, olen,
998 FALSE, FALSE, ip);
999 /*
1000 * Normally we would MmProbeAndLockPages()
1001 * here, but we don't have to in our
1002 * imlementation.
1003 */
1004 }
1005 break;
1006 case METHOD_NEITHER:
1007 ip->irp_userbuf = obuf;
1008 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
1009 break;
1010 default:
1011 break;
1012 }
1013
1014 /*
1015 * Ideally, we should associate this IRP with the calling
1016 * thread here.
1017 */
1018
1019 return (ip);
1020 }
1021
1022 static irp *
1023 IoAllocateIrp(stsize, chargequota)
1024 uint8_t stsize;
1025 uint8_t chargequota;
1026 {
1027 irp *i;
1028
1029 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1030 if (i == NULL)
1031 return (NULL);
1032
1033 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1034
1035 return (i);
1036 }
1037
1038 static irp *
1039 IoMakeAssociatedIrp(ip, stsize)
1040 irp *ip;
1041 uint8_t stsize;
1042 {
1043 irp *associrp;
1044
1045 associrp = IoAllocateIrp(stsize, FALSE);
1046 if (associrp == NULL)
1047 return(NULL);
1048
1049 mtx_lock(&ntoskrnl_dispatchlock);
1050 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1051 associrp->irp_tail.irp_overlay.irp_thread =
1052 ip->irp_tail.irp_overlay.irp_thread;
1053 associrp->irp_assoc.irp_master = ip;
1054 mtx_unlock(&ntoskrnl_dispatchlock);
1055
1056 return(associrp);
1057 }
1058
1059 static void
1060 IoFreeIrp(ip)
1061 irp *ip;
1062 {
1063 ExFreePool(ip);
1064 return;
1065 }
1066
1067 static void
1068 IoInitializeIrp(io, psize, ssize)
1069 irp *io;
1070 uint16_t psize;
1071 uint8_t ssize;
1072 {
1073 bzero((char *)io, IoSizeOfIrp(ssize));
1074 io->irp_size = psize;
1075 io->irp_stackcnt = ssize;
1076 io->irp_currentstackloc = ssize;
1077 InitializeListHead(&io->irp_thlist);
1078 io->irp_tail.irp_overlay.irp_csl =
1079 (io_stack_location *)(io + 1) + ssize;
1080
1081 return;
1082 }
1083
1084 static void
1085 IoReuseIrp(ip, status)
1086 irp *ip;
1087 uint32_t status;
1088 {
1089 uint8_t allocflags;
1090
1091 allocflags = ip->irp_allocflags;
1092 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1093 ip->irp_iostat.isb_status = status;
1094 ip->irp_allocflags = allocflags;
1095
1096 return;
1097 }
1098
1099 void
1100 IoAcquireCancelSpinLock(irql)
1101 uint8_t *irql;
1102 {
1103 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1104 return;
1105 }
1106
1107 void
1108 IoReleaseCancelSpinLock(irql)
1109 uint8_t irql;
1110 {
1111 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1112 return;
1113 }
1114
1115 uint8_t
1116 IoCancelIrp(irp *ip)
1117 {
1118 cancel_func cfunc;
1119
1120 IoAcquireCancelSpinLock(&ip->irp_cancelirql);
1121 cfunc = IoSetCancelRoutine(ip, NULL);
1122 ip->irp_cancel = TRUE;
1123 if (ip->irp_cancelfunc == NULL) {
1124 IoReleaseCancelSpinLock(ip->irp_cancelirql);
1125 return(FALSE);
1126 }
1127 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1128 return(TRUE);
1129 }
1130
1131 uint32_t
1132 IofCallDriver(dobj, ip)
1133 device_object *dobj;
1134 irp *ip;
1135 {
1136 driver_object *drvobj;
1137 io_stack_location *sl;
1138 uint32_t status;
1139 driver_dispatch disp;
1140
1141 drvobj = dobj->do_drvobj;
1142
1143 if (ip->irp_currentstackloc <= 0)
1144 panic("IoCallDriver(): out of stack locations");
1145
1146 IoSetNextIrpStackLocation(ip);
1147 sl = IoGetCurrentIrpStackLocation(ip);
1148
1149 sl->isl_devobj = dobj;
1150
1151 disp = drvobj->dro_dispatch[sl->isl_major];
1152 status = MSCALL2(disp, dobj, ip);
1153
1154 return(status);
1155 }
1156
1157 void
1158 IofCompleteRequest(ip, prioboost)
1159 irp *ip;
1160 uint8_t prioboost;
1161 {
1162 uint32_t i;
1163 uint32_t status;
1164 device_object *dobj;
1165 io_stack_location *sl;
1166 completion_func cf;
1167
1168 ip->irp_pendingreturned =
1169 IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
1170 sl = (io_stack_location *)(ip + 1);
1171
1172 for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
1173 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
1174 IoSkipCurrentIrpStackLocation(ip);
1175 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1176 } else
1177 dobj = NULL;
1178
1179 if (sl[i].isl_completionfunc != NULL &&
1180 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1181 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1182 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1183 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1184 (ip->irp_cancel == TRUE &&
1185 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1186 cf = sl->isl_completionfunc;
1187 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1188 if (status == STATUS_MORE_PROCESSING_REQUIRED)
1189 return;
1190 }
1191
1192 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
1193 SL_PENDING_RETURNED)
1194 ip->irp_pendingreturned = TRUE;
1195 }
1196
1197 /* Handle any associated IRPs. */
1198
1199 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1200 uint32_t masterirpcnt;
1201 irp *masterirp;
1202 mdl *m;
1203
1204 masterirp = ip->irp_assoc.irp_master;
1205 masterirpcnt =
1206 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1207
1208 while ((m = ip->irp_mdl) != NULL) {
1209 ip->irp_mdl = m->mdl_next;
1210 IoFreeMdl(m);
1211 }
1212 IoFreeIrp(ip);
1213 if (masterirpcnt == 0)
1214 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1215 return;
1216 }
1217
1218 /* With any luck, these conditions will never arise. */
1219
1220 if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
1221 if (ip->irp_usriostat != NULL)
1222 *ip->irp_usriostat = ip->irp_iostat;
1223 if (ip->irp_usrevent != NULL)
1224 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1225 if (ip->irp_flags & IRP_PAGING_IO) {
1226 if (ip->irp_mdl != NULL)
1227 IoFreeMdl(ip->irp_mdl);
1228 IoFreeIrp(ip);
1229 }
1230 }
1231
1232 return;
1233 }
1234
1235 void
1236 ntoskrnl_intr(arg)
1237 void *arg;
1238 {
1239 kinterrupt *iobj;
1240 uint8_t irql;
1241 uint8_t claimed;
1242 list_entry *l;
1243
1244 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1245 l = ntoskrnl_intlist.nle_flink;
1246 while (l != &ntoskrnl_intlist) {
1247 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1248 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1249 if (claimed == TRUE)
1250 break;
1251 l = l->nle_flink;
1252 }
1253 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1254
1255 return;
1256 }
1257
1258 uint8_t
1259 KeAcquireInterruptSpinLock(iobj)
1260 kinterrupt *iobj;
1261 {
1262 uint8_t irql;
1263 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1264 return(irql);
1265 }
1266
1267 void
1268 KeReleaseInterruptSpinLock(iobj, irql)
1269 kinterrupt *iobj;
1270 uint8_t irql;
1271 {
1272 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1273 return;
1274 }
1275
1276 uint8_t
1277 KeSynchronizeExecution(iobj, syncfunc, syncctx)
1278 kinterrupt *iobj;
1279 void *syncfunc;
1280 void *syncctx;
1281 {
1282 uint8_t irql;
1283
1284 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1285 MSCALL1(syncfunc, syncctx);
1286 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1287
1288 return(TRUE);
1289 }
1290
1291 /*
1292 * IoConnectInterrupt() is passed only the interrupt vector and
1293 * irql that a device wants to use, but no device-specific tag
1294 * of any kind. This conflicts rather badly with FreeBSD's
1295 * bus_setup_intr(), which needs the device_t for the device
1296 * requesting interrupt delivery. In order to bypass this
1297 * inconsistency, we implement a second level of interrupt
1298 * dispatching on top of bus_setup_intr(). All devices use
1299 * ntoskrnl_intr() as their ISR, and any device requesting
1300 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1301 * dispatch list. When an interrupt arrives, we walk the list
1302 * and invoke all the registered ISRs. This effectively makes all
1303 * interrupts shared, but it's the only way to duplicate the
1304 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1305 */
1306
1307 uint32_t
1308 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
1309 syncirql, imode, shared, affinity, savefloat)
1310 kinterrupt **iobj;
1311 void *svcfunc;
1312 void *svcctx;
1313 uint32_t vector;
1314 kspin_lock *lock;
1315 uint8_t irql;
1316 uint8_t syncirql;
1317 uint8_t imode;
1318 uint8_t shared;
1319 uint32_t affinity;
1320 uint8_t savefloat;
1321 {
1322 uint8_t curirql;
1323
1324 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1325 if (*iobj == NULL)
1326 return(STATUS_INSUFFICIENT_RESOURCES);
1327
1328 (*iobj)->ki_svcfunc = svcfunc;
1329 (*iobj)->ki_svcctx = svcctx;
1330
1331 if (lock == NULL) {
1332 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1333 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1334 } else
1335 (*iobj)->ki_lock = lock;
1336
1337 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1338 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1339 KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1340
1341 return(STATUS_SUCCESS);
1342 }
1343
1344 void
1345 IoDisconnectInterrupt(iobj)
1346 kinterrupt *iobj;
1347 {
1348 uint8_t irql;
1349
1350 if (iobj == NULL)
1351 return;
1352
1353 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1354 RemoveEntryList((&iobj->ki_list));
1355 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1356
1357 ExFreePool(iobj);
1358
1359 return;
1360 }
1361
1362 device_object *
1363 IoAttachDeviceToDeviceStack(src, dst)
1364 device_object *src;
1365 device_object *dst;
1366 {
1367 device_object *attached;
1368
1369 mtx_lock(&ntoskrnl_dispatchlock);
1370 attached = IoGetAttachedDevice(dst);
1371 attached->do_attacheddev = src;
1372 src->do_attacheddev = NULL;
1373 src->do_stacksize = attached->do_stacksize + 1;
1374 mtx_unlock(&ntoskrnl_dispatchlock);
1375
1376 return(attached);
1377 }
1378
1379 void
1380 IoDetachDevice(topdev)
1381 device_object *topdev;
1382 {
1383 device_object *tail;
1384
1385 mtx_lock(&ntoskrnl_dispatchlock);
1386
1387 /* First, break the chain. */
1388 tail = topdev->do_attacheddev;
1389 if (tail == NULL) {
1390 mtx_unlock(&ntoskrnl_dispatchlock);
1391 return;
1392 }
1393 topdev->do_attacheddev = tail->do_attacheddev;
1394 topdev->do_refcnt--;
1395
1396 /* Now reduce the stacksize count for the takm_il objects. */
1397
1398 tail = topdev->do_attacheddev;
1399 while (tail != NULL) {
1400 tail->do_stacksize--;
1401 tail = tail->do_attacheddev;
1402 }
1403
1404 mtx_unlock(&ntoskrnl_dispatchlock);
1405
1406 return;
1407 }
1408
1409 /*
1410 * For the most part, an object is considered signalled if
1411 * dh_sigstate == TRUE. The exception is for mutant objects
1412 * (mutexes), where the logic works like this:
1413 *
1414 * - If the thread already owns the object and sigstate is
1415 * less than or equal to 0, then the object is considered
1416 * signalled (recursive acquisition).
1417 * - If dh_sigstate == 1, the object is also considered
1418 * signalled.
1419 */
1420
1421 static int
1422 ntoskrnl_is_signalled(obj, td)
1423 nt_dispatch_header *obj;
1424 struct thread *td;
1425 {
1426 kmutant *km;
1427
1428 if (obj->dh_type == DISP_TYPE_MUTANT) {
1429 km = (kmutant *)obj;
1430 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1431 obj->dh_sigstate == 1)
1432 return(TRUE);
1433 return(FALSE);
1434 }
1435
1436 if (obj->dh_sigstate > 0)
1437 return(TRUE);
1438 return(FALSE);
1439 }
1440
1441 static void
1442 ntoskrnl_satisfy_wait(obj, td)
1443 nt_dispatch_header *obj;
1444 struct thread *td;
1445 {
1446 kmutant *km;
1447
1448 switch (obj->dh_type) {
1449 case DISP_TYPE_MUTANT:
1450 km = (struct kmutant *)obj;
1451 obj->dh_sigstate--;
1452 /*
1453 * If sigstate reaches 0, the mutex is now
1454 * non-signalled (the new thread owns it).
1455 */
1456 if (obj->dh_sigstate == 0) {
1457 km->km_ownerthread = td;
1458 if (km->km_abandoned == TRUE)
1459 km->km_abandoned = FALSE;
1460 }
1461 break;
1462 /* Synchronization objects get reset to unsignalled. */
1463 case DISP_TYPE_SYNCHRONIZATION_EVENT:
1464 case DISP_TYPE_SYNCHRONIZATION_TIMER:
1465 obj->dh_sigstate = 0;
1466 break;
1467 case DISP_TYPE_SEMAPHORE:
1468 obj->dh_sigstate--;
1469 break;
1470 default:
1471 break;
1472 }
1473
1474 return;
1475 }
1476
1477 static void
1478 ntoskrnl_satisfy_multiple_waits(wb)
1479 wait_block *wb;
1480 {
1481 wait_block *cur;
1482 struct thread *td;
1483
1484 cur = wb;
1485 td = wb->wb_kthread;
1486
1487 do {
1488 ntoskrnl_satisfy_wait(wb->wb_object, td);
1489 cur->wb_awakened = TRUE;
1490 cur = cur->wb_next;
1491 } while (cur != wb);
1492
1493 return;
1494 }
1495
1496 /* Always called with dispatcher lock held. */
1497 static void
1498 ntoskrnl_waittest(obj, increment)
1499 nt_dispatch_header *obj;
1500 uint32_t increment;
1501 {
1502 wait_block *w, *next;
1503 list_entry *e;
1504 struct thread *td;
1505 wb_ext *we;
1506 int satisfied;
1507
1508 /*
1509 * Once an object has been signalled, we walk its list of
1510 * wait blocks. If a wait block can be awakened, then satisfy
1511 * waits as necessary and wake the thread.
1512 *
1513 * The rules work like this:
1514 *
1515 * If a wait block is marked as WAITTYPE_ANY, then
1516 * we can satisfy the wait conditions on the current
1517 * object and wake the thread right away. Satisfying
1518 * the wait also has the effect of breaking us out
1519 * of the search loop.
1520 *
1521 * If the object is marked as WAITTYLE_ALL, then the
1522 * wait block will be part of a circularly linked
1523 * list of wait blocks belonging to a waiting thread
1524 * that's sleeping in KeWaitForMultipleObjects(). In
1525 * order to wake the thread, all the objects in the
1526 * wait list must be in the signalled state. If they
1527 * are, we then satisfy all of them and wake the
1528 * thread.
1529 *
1530 */
1531
1532 e = obj->dh_waitlisthead.nle_flink;
1533
1534 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1535 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1536 we = w->wb_ext;
1537 td = we->we_td;
1538 satisfied = FALSE;
1539 if (w->wb_waittype == WAITTYPE_ANY) {
1540 /*
1541 * Thread can be awakened if
1542 * any wait is satisfied.
1543 */
1544 ntoskrnl_satisfy_wait(obj, td);
1545 satisfied = TRUE;
1546 w->wb_awakened = TRUE;
1547 } else {
1548 /*
1549 * Thread can only be woken up
1550 * if all waits are satisfied.
1551 * If the thread is waiting on multiple
1552 * objects, they should all be linked
1553 * through the wb_next pointers in the
1554 * wait blocks.
1555 */
1556 satisfied = TRUE;
1557 next = w->wb_next;
1558 while (next != w) {
1559 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1560 satisfied = FALSE;
1561 break;
1562 }
1563 next = next->wb_next;
1564 }
1565 ntoskrnl_satisfy_multiple_waits(w);
1566 }
1567
1568 if (satisfied == TRUE)
1569 cv_broadcastpri(&we->we_cv, w->wb_oldpri -
1570 (increment * 4));
1571
1572 e = e->nle_flink;
1573 }
1574
1575 return;
1576 }
1577
1578 static void
1579 ntoskrnl_time(tval)
1580 uint64_t *tval;
1581 {
1582 struct timespec ts;
1583
1584 nanotime(&ts);
1585 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1586 11644473600;
1587
1588 return;
1589 }
1590
1591 /*
1592 * KeWaitForSingleObject() is a tricky beast, because it can be used
1593 * with several different object types: semaphores, timers, events,
1594 * mutexes and threads. Semaphores don't appear very often, but the
1595 * other object types are quite common. KeWaitForSingleObject() is
1596 * what's normally used to acquire a mutex, and it can be used to
1597 * wait for a thread termination.
1598 *
1599 * The Windows NDIS API is implemented in terms of Windows kernel
1600 * primitives, and some of the object manipulation is duplicated in
1601 * NDIS. For example, NDIS has timers and events, which are actually
1602 * Windows kevents and ktimers. Now, you're supposed to only use the
1603 * NDIS variants of these objects within the confines of the NDIS API,
1604 * but there are some naughty developers out there who will use
1605 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1606 * have to support that as well. Conseqently, our NDIS timer and event
1607 * code has to be closely tied into our ntoskrnl timer and event code,
1608 * just as it is in Windows.
1609 *
1610 * KeWaitForSingleObject() may do different things for different kinds
1611 * of objects:
1612 *
1613 * - For events, we check if the event has been signalled. If the
1614 * event is already in the signalled state, we just return immediately,
1615 * otherwise we wait for it to be set to the signalled state by someone
1616 * else calling KeSetEvent(). Events can be either synchronization or
1617 * notification events.
1618 *
1619 * - For timers, if the timer has already fired and the timer is in
1620 * the signalled state, we just return, otherwise we wait on the
1621 * timer. Unlike an event, timers get signalled automatically when
1622 * they expire rather than someone having to trip them manually.
1623 * Timers initialized with KeInitializeTimer() are always notification
1624 * events: KeInitializeTimerEx() lets you initialize a timer as
1625 * either a notification or synchronization event.
1626 *
1627 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1628 * on the mutex until it's available and then grab it. When a mutex is
1629 * released, it enters the signalled state, which wakes up one of the
1630 * threads waiting to acquire it. Mutexes are always synchronization
1631 * events.
1632 *
1633 * - For threads, the only thing we do is wait until the thread object
1634 * enters a signalled state, which occurs when the thread terminates.
1635 * Threads are always notification events.
1636 *
1637 * A notification event wakes up all threads waiting on an object. A
1638 * synchronization event wakes up just one. Also, a synchronization event
1639 * is auto-clearing, which means we automatically set the event back to
1640 * the non-signalled state once the wakeup is done.
1641 */
1642
1643 uint32_t
1644 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
1645 void *arg;
1646 uint32_t reason;
1647 uint32_t mode;
1648 uint8_t alertable;
1649 int64_t *duetime;
1650 {
1651 wait_block w;
1652 struct thread *td = curthread;
1653 struct timeval tv;
1654 int error = 0;
1655 uint64_t curtime;
1656 wb_ext we;
1657 nt_dispatch_header *obj;
1658
1659 obj = arg;
1660
1661 if (obj == NULL)
1662 return(STATUS_INVALID_PARAMETER);
1663
1664 mtx_lock(&ntoskrnl_dispatchlock);
1665
1666 cv_init(&we.we_cv, "KeWFS");
1667 we.we_td = td;
1668
1669 /*
1670 * Check to see if this object is already signalled,
1671 * and just return without waiting if it is.
1672 */
1673 if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1674 /* Sanity check the signal state value. */
1675 if (obj->dh_sigstate != INT32_MIN) {
1676 ntoskrnl_satisfy_wait(obj, curthread);
1677 mtx_unlock(&ntoskrnl_dispatchlock);
1678 return (STATUS_SUCCESS);
1679 } else {
1680 /*
1681 * There's a limit to how many times we can
1682 * recursively acquire a mutant. If we hit
1683 * the limit, something is very wrong.
1684 */
1685 if (obj->dh_type == DISP_TYPE_MUTANT) {
1686 mtx_unlock(&ntoskrnl_dispatchlock);
1687 panic("mutant limit exceeded");
1688 }
1689 }
1690 }
1691
1692 bzero((char *)&w, sizeof(wait_block));
1693 w.wb_object = obj;
1694 w.wb_ext = &we;
1695 w.wb_waittype = WAITTYPE_ANY;
1696 w.wb_next = &w;
1697 w.wb_waitkey = 0;
1698 w.wb_awakened = FALSE;
1699 w.wb_oldpri = td->td_priority;
1700
1701 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1702
1703 /*
1704 * The timeout value is specified in 100 nanosecond units
1705 * and can be a positive or negative number. If it's positive,
1706 * then the duetime is absolute, and we need to convert it
1707 * to an absolute offset relative to now in order to use it.
1708 * If it's negative, then the duetime is relative and we
1709 * just have to convert the units.
1710 */
1711
1712 if (duetime != NULL) {
1713 if (*duetime < 0) {
1714 tv.tv_sec = - (*duetime) / 10000000;
1715 tv.tv_usec = (- (*duetime) / 10) -
1716 (tv.tv_sec * 1000000);
1717 } else {
1718 ntoskrnl_time(&curtime);
1719 if (*duetime < curtime)
1720 tv.tv_sec = tv.tv_usec = 0;
1721 else {
1722 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1723 tv.tv_usec = ((*duetime) - curtime) / 10 -
1724 (tv.tv_sec * 1000000);
1725 }
1726 }
1727 }
1728
1729 if (duetime == NULL)
1730 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1731 else
1732 error = cv_timedwait(&we.we_cv,
1733 &ntoskrnl_dispatchlock, tvtohz(&tv));
1734
1735 RemoveEntryList(&w.wb_waitlist);
1736
1737 cv_destroy(&we.we_cv);
1738
1739 /* We timed out. Leave the object alone and return status. */
1740
1741 if (error == EWOULDBLOCK) {
1742 mtx_unlock(&ntoskrnl_dispatchlock);
1743 return(STATUS_TIMEOUT);
1744 }
1745
1746 mtx_unlock(&ntoskrnl_dispatchlock);
1747
1748 return(STATUS_SUCCESS);
1749 /*
1750 return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1751 mode, alertable, duetime, &w));
1752 */
1753 }
1754
1755 static uint32_t
1756 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
1757 alertable, duetime, wb_array)
1758 uint32_t cnt;
1759 nt_dispatch_header *obj[];
1760 uint32_t wtype;
1761 uint32_t reason;
1762 uint32_t mode;
1763 uint8_t alertable;
1764 int64_t *duetime;
1765 wait_block *wb_array;
1766 {
1767 struct thread *td = curthread;
1768 wait_block *whead, *w;
1769 wait_block _wb_array[MAX_WAIT_OBJECTS];
1770 nt_dispatch_header *cur;
1771 struct timeval tv;
1772 int i, wcnt = 0, error = 0;
1773 uint64_t curtime;
1774 struct timespec t1, t2;
1775 uint32_t status = STATUS_SUCCESS;
1776 wb_ext we;
1777
1778 if (cnt > MAX_WAIT_OBJECTS)
1779 return(STATUS_INVALID_PARAMETER);
1780 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1781 return(STATUS_INVALID_PARAMETER);
1782
1783 mtx_lock(&ntoskrnl_dispatchlock);
1784
1785 cv_init(&we.we_cv, "KeWFM");
1786 we.we_td = td;
1787
1788 if (wb_array == NULL)
1789 whead = _wb_array;
1790 else
1791 whead = wb_array;
1792
1793 bzero((char *)whead, sizeof(wait_block) * cnt);
1794
1795 /* First pass: see if we can satisfy any waits immediately. */
1796
1797 wcnt = 0;
1798 w = whead;
1799
1800 for (i = 0; i < cnt; i++) {
1801 InsertTailList((&obj[i]->dh_waitlisthead),
1802 (&w->wb_waitlist));
1803 w->wb_ext = &we;
1804 w->wb_object = obj[i];
1805 w->wb_waittype = wtype;
1806 w->wb_waitkey = i;
1807 w->wb_awakened = FALSE;
1808 w->wb_oldpri = td->td_priority;
1809 w->wb_next = w + 1;
1810 w++;
1811 wcnt++;
1812 if (ntoskrnl_is_signalled(obj[i], td)) {
1813 /*
1814 * There's a limit to how many times
1815 * we can recursively acquire a mutant.
1816 * If we hit the limit, something
1817 * is very wrong.
1818 */
1819 if (obj[i]->dh_sigstate == INT32_MIN &&
1820 obj[i]->dh_type == DISP_TYPE_MUTANT) {
1821 mtx_unlock(&ntoskrnl_dispatchlock);
1822 panic("mutant limit exceeded");
1823 }
1824
1825 /*
1826 * If this is a WAITTYPE_ANY wait, then
1827 * satisfy the waited object and exit
1828 * right now.
1829 */
1830
1831 if (wtype == WAITTYPE_ANY) {
1832 ntoskrnl_satisfy_wait(obj[i], td);
1833 status = STATUS_WAIT_0 + i;
1834 goto wait_done;
1835 } else {
1836 w--;
1837 wcnt--;
1838 w->wb_object = NULL;
1839 RemoveEntryList(&w->wb_waitlist);
1840 }
1841 }
1842 }
1843
1844 /*
1845 * If this is a WAITTYPE_ALL wait and all objects are
1846 * already signalled, satisfy the waits and exit now.
1847 */
1848
1849 if (wtype == WAITTYPE_ALL && wcnt == 0) {
1850 for (i = 0; i < cnt; i++)
1851 ntoskrnl_satisfy_wait(obj[i], td);
1852 status = STATUS_SUCCESS;
1853 goto wait_done;
1854 }
1855
1856 /*
1857 * Create a circular waitblock list. The waitcount
1858 * must always be non-zero when we get here.
1859 */
1860
1861 (w - 1)->wb_next = whead;
1862
1863 /* Wait on any objects that aren't yet signalled. */
1864
1865 /* Calculate timeout, if any. */
1866
1867 if (duetime != NULL) {
1868 if (*duetime < 0) {
1869 tv.tv_sec = - (*duetime) / 10000000;
1870 tv.tv_usec = (- (*duetime) / 10) -
1871 (tv.tv_sec * 1000000);
1872 } else {
1873 ntoskrnl_time(&curtime);
1874 if (*duetime < curtime)
1875 tv.tv_sec = tv.tv_usec = 0;
1876 else {
1877 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1878 tv.tv_usec = ((*duetime) - curtime) / 10 -
1879 (tv.tv_sec * 1000000);
1880 }
1881 }
1882 }
1883
1884 while (wcnt) {
1885 nanotime(&t1);
1886
1887 if (duetime == NULL)
1888 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1889 else
1890 error = cv_timedwait(&we.we_cv,
1891 &ntoskrnl_dispatchlock, tvtohz(&tv));
1892
1893 /* Wait with timeout expired. */
1894
1895 if (error) {
1896 status = STATUS_TIMEOUT;
1897 goto wait_done;
1898 }
1899
1900 nanotime(&t2);
1901
1902 /* See what's been signalled. */
1903
1904 w = whead;
1905 do {
1906 cur = w->wb_object;
1907 if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1908 w->wb_awakened == TRUE) {
1909 /* Sanity check the signal state value. */
1910 if (cur->dh_sigstate == INT32_MIN &&
1911 cur->dh_type == DISP_TYPE_MUTANT) {
1912 mtx_unlock(&ntoskrnl_dispatchlock);
1913 panic("mutant limit exceeded");
1914 }
1915 wcnt--;
1916 if (wtype == WAITTYPE_ANY) {
1917 status = w->wb_waitkey &
1918 STATUS_WAIT_0;
1919 goto wait_done;
1920 }
1921 }
1922 w = w->wb_next;
1923 } while (w != whead);
1924
1925 /*
1926 * If all objects have been signalled, or if this
1927 * is a WAITTYPE_ANY wait and we were woke up by
1928 * someone, we can bail.
1929 */
1930
1931 if (wcnt == 0) {
1932 status = STATUS_SUCCESS;
1933 goto wait_done;
1934 }
1935
1936 /*
1937 * If this is WAITTYPE_ALL wait, and there's still
1938 * objects that haven't been signalled, deduct the
1939 * time that's elapsed so far from the timeout and
1940 * wait again (or continue waiting indefinitely if
1941 * there's no timeout).
1942 */
1943
1944 if (duetime != NULL) {
1945 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1946 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1947 }
1948 }
1949
1950
1951 wait_done:
1952
1953 cv_destroy(&we.we_cv);
1954
1955 for (i = 0; i < cnt; i++) {
1956 if (whead[i].wb_object != NULL)
1957 RemoveEntryList(&whead[i].wb_waitlist);
1958
1959 }
1960 mtx_unlock(&ntoskrnl_dispatchlock);
1961
1962 return(status);
1963 }
1964
1965 static void
1966 WRITE_REGISTER_USHORT(reg, val)
1967 uint16_t *reg;
1968 uint16_t val;
1969 {
1970 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1971 return;
1972 }
1973
1974 static uint16_t
1975 READ_REGISTER_USHORT(reg)
1976 uint16_t *reg;
1977 {
1978 return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1979 }
1980
1981 static void
1982 WRITE_REGISTER_ULONG(reg, val)
1983 uint32_t *reg;
1984 uint32_t val;
1985 {
1986 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1987 return;
1988 }
1989
1990 static uint32_t
1991 READ_REGISTER_ULONG(reg)
1992 uint32_t *reg;
1993 {
1994 return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1995 }
1996
1997 static uint8_t
1998 READ_REGISTER_UCHAR(reg)
1999 uint8_t *reg;
2000 {
2001 return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
2002 }
2003
2004 static void
2005 WRITE_REGISTER_UCHAR(reg, val)
2006 uint8_t *reg;
2007 uint8_t val;
2008 {
2009 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
2010 return;
2011 }
2012
2013 static int64_t
2014 _allmul(a, b)
2015 int64_t a;
2016 int64_t b;
2017 {
2018 return (a * b);
2019 }
2020
2021 static int64_t
2022 _alldiv(a, b)
2023 int64_t a;
2024 int64_t b;
2025 {
2026 return (a / b);
2027 }
2028
2029 static int64_t
2030 _allrem(a, b)
2031 int64_t a;
2032 int64_t b;
2033 {
2034 return (a % b);
2035 }
2036
2037 static uint64_t
2038 _aullmul(a, b)
2039 uint64_t a;
2040 uint64_t b;
2041 {
2042 return (a * b);
2043 }
2044
2045 static uint64_t
2046 _aulldiv(a, b)
2047 uint64_t a;
2048 uint64_t b;
2049 {
2050 return (a / b);
2051 }
2052
2053 static uint64_t
2054 _aullrem(a, b)
2055 uint64_t a;
2056 uint64_t b;
2057 {
2058 return (a % b);
2059 }
2060
2061 static int64_t
2062 _allshl(a, b)
2063 int64_t a;
2064 uint8_t b;
2065 {
2066 return (a << b);
2067 }
2068
2069 static uint64_t
2070 _aullshl(a, b)
2071 uint64_t a;
2072 uint8_t b;
2073 {
2074 return (a << b);
2075 }
2076
2077 static int64_t
2078 _allshr(a, b)
2079 int64_t a;
2080 uint8_t b;
2081 {
2082 return (a >> b);
2083 }
2084
2085 static uint64_t
2086 _aullshr(a, b)
2087 uint64_t a;
2088 uint8_t b;
2089 {
2090 return (a >> b);
2091 }
2092
2093 static slist_entry *
2094 ntoskrnl_pushsl(head, entry)
2095 slist_header *head;
2096 slist_entry *entry;
2097 {
2098 slist_entry *oldhead;
2099
2100 oldhead = head->slh_list.slh_next;
2101 entry->sl_next = head->slh_list.slh_next;
2102 head->slh_list.slh_next = entry;
2103 head->slh_list.slh_depth++;
2104 head->slh_list.slh_seq++;
2105
2106 return(oldhead);
2107 }
2108
2109 static slist_entry *
2110 ntoskrnl_popsl(head)
2111 slist_header *head;
2112 {
2113 slist_entry *first;
2114
2115 first = head->slh_list.slh_next;
2116 if (first != NULL) {
2117 head->slh_list.slh_next = first->sl_next;
2118 head->slh_list.slh_depth--;
2119 head->slh_list.slh_seq++;
2120 }
2121
2122 return(first);
2123 }
2124
2125 /*
2126 * We need this to make lookaside lists work for amd64.
2127 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2128 * list structure. For amd64 to work right, this has to be a
2129 * pointer to the wrapped version of the routine, not the
2130 * original. Letting the Windows driver invoke the original
2131 * function directly will result in a convention calling
2132 * mismatch and a pretty crash. On x86, this effectively
2133 * becomes a no-op since ipt_func and ipt_wrap are the same.
2134 */
2135
2136 static funcptr
2137 ntoskrnl_findwrap(func)
2138 funcptr func;
2139 {
2140 image_patch_table *patch;
2141
2142 patch = ntoskrnl_functbl;
2143 while (patch->ipt_func != NULL) {
2144 if ((funcptr)patch->ipt_func == func)
2145 return((funcptr)patch->ipt_wrap);
2146 patch++;
2147 }
2148
2149 return(NULL);
2150 }
2151
2152 static void
2153 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
2154 flags, size, tag, depth)
2155 paged_lookaside_list *lookaside;
2156 lookaside_alloc_func *allocfunc;
2157 lookaside_free_func *freefunc;
2158 uint32_t flags;
2159 size_t size;
2160 uint32_t tag;
2161 uint16_t depth;
2162 {
2163 bzero((char *)lookaside, sizeof(paged_lookaside_list));
2164
2165 if (size < sizeof(slist_entry))
2166 lookaside->nll_l.gl_size = sizeof(slist_entry);
2167 else
2168 lookaside->nll_l.gl_size = size;
2169 lookaside->nll_l.gl_tag = tag;
2170 if (allocfunc == NULL)
2171 lookaside->nll_l.gl_allocfunc =
2172 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2173 else
2174 lookaside->nll_l.gl_allocfunc = allocfunc;
2175
2176 if (freefunc == NULL)
2177 lookaside->nll_l.gl_freefunc =
2178 ntoskrnl_findwrap((funcptr)ExFreePool);
2179 else
2180 lookaside->nll_l.gl_freefunc = freefunc;
2181
2182 #ifdef __i386__
2183 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2184 #endif
2185
2186 lookaside->nll_l.gl_type = NonPagedPool;
2187 lookaside->nll_l.gl_depth = depth;
2188 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2189
2190 return;
2191 }
2192
2193 static void
2194 ExDeletePagedLookasideList(lookaside)
2195 paged_lookaside_list *lookaside;
2196 {
2197 void *buf;
2198 void (*freefunc)(void *);
2199
2200 freefunc = lookaside->nll_l.gl_freefunc;
2201 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2202 MSCALL1(freefunc, buf);
2203
2204 return;
2205 }
2206
2207 static void
2208 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
2209 flags, size, tag, depth)
2210 npaged_lookaside_list *lookaside;
2211 lookaside_alloc_func *allocfunc;
2212 lookaside_free_func *freefunc;
2213 uint32_t flags;
2214 size_t size;
2215 uint32_t tag;
2216 uint16_t depth;
2217 {
2218 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2219
2220 if (size < sizeof(slist_entry))
2221 lookaside->nll_l.gl_size = sizeof(slist_entry);
2222 else
2223 lookaside->nll_l.gl_size = size;
2224 lookaside->nll_l.gl_tag = tag;
2225 if (allocfunc == NULL)
2226 lookaside->nll_l.gl_allocfunc =
2227 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2228 else
2229 lookaside->nll_l.gl_allocfunc = allocfunc;
2230
2231 if (freefunc == NULL)
2232 lookaside->nll_l.gl_freefunc =
2233 ntoskrnl_findwrap((funcptr)ExFreePool);
2234 else
2235 lookaside->nll_l.gl_freefunc = freefunc;
2236
2237 #ifdef __i386__
2238 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2239 #endif
2240
2241 lookaside->nll_l.gl_type = NonPagedPool;
2242 lookaside->nll_l.gl_depth = depth;
2243 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2244
2245 return;
2246 }
2247
2248 static void
2249 ExDeleteNPagedLookasideList(lookaside)
2250 npaged_lookaside_list *lookaside;
2251 {
2252 void *buf;
2253 void (*freefunc)(void *);
2254
2255 freefunc = lookaside->nll_l.gl_freefunc;
2256 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2257 MSCALL1(freefunc, buf);
2258
2259 return;
2260 }
2261
2262 slist_entry *
2263 InterlockedPushEntrySList(head, entry)
2264 slist_header *head;
2265 slist_entry *entry;
2266 {
2267 slist_entry *oldhead;
2268
2269 mtx_lock_spin(&ntoskrnl_interlock);
2270 oldhead = ntoskrnl_pushsl(head, entry);
2271 mtx_unlock_spin(&ntoskrnl_interlock);
2272
2273 return(oldhead);
2274 }
2275
2276 slist_entry *
2277 InterlockedPopEntrySList(head)
2278 slist_header *head;
2279 {
2280 slist_entry *first;
2281
2282 mtx_lock_spin(&ntoskrnl_interlock);
2283 first = ntoskrnl_popsl(head);
2284 mtx_unlock_spin(&ntoskrnl_interlock);
2285
2286 return(first);
2287 }
2288
2289 static slist_entry *
2290 ExInterlockedPushEntrySList(head, entry, lock)
2291 slist_header *head;
2292 slist_entry *entry;
2293 kspin_lock *lock;
2294 {
2295 return(InterlockedPushEntrySList(head, entry));
2296 }
2297
2298 static slist_entry *
2299 ExInterlockedPopEntrySList(head, lock)
2300 slist_header *head;
2301 kspin_lock *lock;
2302 {
2303 return(InterlockedPopEntrySList(head));
2304 }
2305
2306 uint16_t
2307 ExQueryDepthSList(head)
2308 slist_header *head;
2309 {
2310 uint16_t depth;
2311
2312 mtx_lock_spin(&ntoskrnl_interlock);
2313 depth = head->slh_list.slh_depth;
2314 mtx_unlock_spin(&ntoskrnl_interlock);
2315
2316 return(depth);
2317 }
2318
2319 void
2320 KeInitializeSpinLock(lock)
2321 kspin_lock *lock;
2322 {
2323 *lock = 0;
2324
2325 return;
2326 }
2327
2328 #ifdef __i386__
2329 void
2330 KefAcquireSpinLockAtDpcLevel(lock)
2331 kspin_lock *lock;
2332 {
2333 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2334 int i = 0;
2335 #endif
2336
2337 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2338 /* sit and spin */;
2339 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2340 i++;
2341 if (i > 200000000)
2342 panic("DEADLOCK!");
2343 #endif
2344 }
2345
2346 return;
2347 }
2348
2349 void
2350 KefReleaseSpinLockFromDpcLevel(lock)
2351 kspin_lock *lock;
2352 {
2353 atomic_store_rel_int((volatile u_int *)lock, 0);
2354
2355 return;
2356 }
2357
2358 uint8_t
2359 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2360 {
2361 uint8_t oldirql;
2362
2363 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2364 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2365
2366 KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2367 KeAcquireSpinLockAtDpcLevel(lock);
2368
2369 return(oldirql);
2370 }
2371 #else
2372 void
2373 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2374 {
2375 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2376 /* sit and spin */;
2377
2378 return;
2379 }
2380
2381 void
2382 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2383 {
2384 atomic_store_rel_int((volatile u_int *)lock, 0);
2385
2386 return;
2387 }
2388 #endif /* __i386__ */
2389
2390 uintptr_t
2391 InterlockedExchange(dst, val)
2392 volatile uint32_t *dst;
2393 uintptr_t val;
2394 {
2395 uintptr_t r;
2396
2397 mtx_lock_spin(&ntoskrnl_interlock);
2398 r = *dst;
2399 *dst = val;
2400 mtx_unlock_spin(&ntoskrnl_interlock);
2401
2402 return(r);
2403 }
2404
2405 static uint32_t
2406 InterlockedIncrement(addend)
2407 volatile uint32_t *addend;
2408 {
2409 atomic_add_long((volatile u_long *)addend, 1);
2410 return(*addend);
2411 }
2412
2413 static uint32_t
2414 InterlockedDecrement(addend)
2415 volatile uint32_t *addend;
2416 {
2417 atomic_subtract_long((volatile u_long *)addend, 1);
2418 return(*addend);
2419 }
2420
2421 static void
2422 ExInterlockedAddLargeStatistic(addend, inc)
2423 uint64_t *addend;
2424 uint32_t inc;
2425 {
2426 mtx_lock_spin(&ntoskrnl_interlock);
2427 *addend += inc;
2428 mtx_unlock_spin(&ntoskrnl_interlock);
2429
2430 return;
2431 };
2432
2433 mdl *
2434 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
2435 void *vaddr;
2436 uint32_t len;
2437 uint8_t secondarybuf;
2438 uint8_t chargequota;
2439 irp *iopkt;
2440 {
2441 mdl *m;
2442 int zone = 0;
2443
2444 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2445 m = ExAllocatePoolWithTag(NonPagedPool,
2446 MmSizeOfMdl(vaddr, len), 0);
2447 else {
2448 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
2449 zone++;
2450 }
2451
2452 if (m == NULL)
2453 return (NULL);
2454
2455 MmInitializeMdl(m, vaddr, len);
2456
2457 /*
2458 * MmInitializMdl() clears the flags field, so we
2459 * have to set this here. If the MDL came from the
2460 * MDL UMA zone, tag it so we can release it to
2461 * the right place later.
2462 */
2463 if (zone)
2464 m->mdl_flags = MDL_ZONE_ALLOCED;
2465
2466 if (iopkt != NULL) {
2467 if (secondarybuf == TRUE) {
2468 mdl *last;
2469 last = iopkt->irp_mdl;
2470 while (last->mdl_next != NULL)
2471 last = last->mdl_next;
2472 last->mdl_next = m;
2473 } else {
2474 if (iopkt->irp_mdl != NULL)
2475 panic("leaking an MDL in IoAllocateMdl()");
2476 iopkt->irp_mdl = m;
2477 }
2478 }
2479
2480 return (m);
2481 }
2482
2483 void
2484 IoFreeMdl(m)
2485 mdl *m;
2486 {
2487 if (m == NULL)
2488 return;
2489
2490 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2491 uma_zfree(mdl_zone, m);
2492 else
2493 ExFreePool(m);
2494
2495 return;
2496 }
2497
2498 static void *
2499 MmAllocateContiguousMemory(size, highest)
2500 uint32_t size;
2501 uint64_t highest;
2502 {
2503 void *addr;
2504 size_t pagelength = roundup(size, PAGE_SIZE);
2505
2506 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2507
2508 return(addr);
2509 }
2510
2511 static void *
2512 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
2513 boundary, cachetype)
2514 uint32_t size;
2515 uint64_t lowest;
2516 uint64_t highest;
2517 uint64_t boundary;
2518 uint32_t cachetype;
2519 {
2520 void *addr;
2521 size_t pagelength = roundup(size, PAGE_SIZE);
2522
2523 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2524
2525 return(addr);
2526 }
2527
2528 static void
2529 MmFreeContiguousMemory(base)
2530 void *base;
2531 {
2532 ExFreePool(base);
2533 }
2534
2535 static void
2536 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
2537 void *base;
2538 uint32_t size;
2539 uint32_t cachetype;
2540 {
2541 ExFreePool(base);
2542 }
2543
2544 static uint32_t
2545 MmSizeOfMdl(vaddr, len)
2546 void *vaddr;
2547 size_t len;
2548 {
2549 uint32_t l;
2550
2551 l = sizeof(struct mdl) +
2552 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2553
2554 return(l);
2555 }
2556
2557 /*
2558 * The Microsoft documentation says this routine fills in the
2559 * page array of an MDL with the _physical_ page addresses that
2560 * comprise the buffer, but we don't really want to do that here.
2561 * Instead, we just fill in the page array with the kernel virtual
2562 * addresses of the buffers.
2563 */
2564 void
2565 MmBuildMdlForNonPagedPool(m)
2566 mdl *m;
2567 {
2568 vm_offset_t *mdl_pages;
2569 int pagecnt, i;
2570
2571 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2572
2573 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2574 panic("not enough pages in MDL to describe buffer");
2575
2576 mdl_pages = MmGetMdlPfnArray(m);
2577
2578 for (i = 0; i < pagecnt; i++)
2579 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2580
2581 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2582 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2583
2584 return;
2585 }
2586
2587 static void *
2588 MmMapLockedPages(buf, accessmode)
2589 mdl *buf;
2590 uint8_t accessmode;
2591 {
2592 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2593 return(MmGetMdlVirtualAddress(buf));
2594 }
2595
2596 static void *
2597 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
2598 bugcheck, prio)
2599 mdl *buf;
2600 uint8_t accessmode;
2601 uint32_t cachetype;
2602 void *vaddr;
2603 uint32_t bugcheck;
2604 uint32_t prio;
2605 {
2606 return(MmMapLockedPages(buf, accessmode));
2607 }
2608
2609 static void
2610 MmUnmapLockedPages(vaddr, buf)
2611 void *vaddr;
2612 mdl *buf;
2613 {
2614 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2615 return;
2616 }
2617
2618 /*
2619 * This function has a problem in that it will break if you
2620 * compile this module without PAE and try to use it on a PAE
2621 * kernel. Unfortunately, there's no way around this at the
2622 * moment. It's slightly less broken that using pmap_kextract().
2623 * You'd think the virtual memory subsystem would help us out
2624 * here, but it doesn't.
2625 */
2626
2627 static uint8_t
2628 MmIsAddressValid(vaddr)
2629 void *vaddr;
2630 {
2631 if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
2632 return(TRUE);
2633
2634 return(FALSE);
2635 }
2636
2637 void *
2638 MmMapIoSpace(paddr, len, cachetype)
2639 uint64_t paddr;
2640 uint32_t len;
2641 uint32_t cachetype;
2642 {
2643 devclass_t nexus_class;
2644 device_t *nexus_devs, devp;
2645 int nexus_count = 0;
2646 device_t matching_dev = NULL;
2647 struct resource *res;
2648 int i;
2649 vm_offset_t v;
2650
2651 /* There will always be at least one nexus. */
2652
2653 nexus_class = devclass_find("nexus");
2654 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2655
2656 for (i = 0; i < nexus_count; i++) {
2657 devp = nexus_devs[i];
2658 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2659 if (matching_dev)
2660 break;
2661 }
2662
2663 free(nexus_devs, M_TEMP);
2664
2665 if (matching_dev == NULL)
2666 return(NULL);
2667
2668 v = (vm_offset_t)rman_get_virtual(res);
2669 if (paddr > rman_get_start(res))
2670 v += paddr - rman_get_start(res);
2671
2672 return((void *)v);
2673 }
2674
2675 void
2676 MmUnmapIoSpace(vaddr, len)
2677 void *vaddr;
2678 size_t len;
2679 {
2680 return;
2681 }
2682
2683
2684 static device_t
2685 ntoskrnl_finddev(dev, paddr, res)
2686 device_t dev;
2687 uint64_t paddr;
2688 struct resource **res;
2689 {
2690 device_t *children = NULL;
2691 device_t matching_dev;
2692 int childcnt;
2693 struct resource *r;
2694 struct resource_list *rl;
2695 struct resource_list_entry *rle;
2696 uint32_t flags;
2697 int i;
2698
2699 /* We only want devices that have been successfully probed. */
2700
2701 if (device_is_alive(dev) == FALSE)
2702 return(NULL);
2703
2704 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2705 if (rl != NULL) {
2706 #if __FreeBSD_version < 600022
2707 SLIST_FOREACH(rle, rl, link) {
2708 #else
2709 STAILQ_FOREACH(rle, rl, link) {
2710 #endif
2711 r = rle->res;
2712
2713 if (r == NULL)
2714 continue;
2715
2716 flags = rman_get_flags(r);
2717
2718 if (rle->type == SYS_RES_MEMORY &&
2719 paddr >= rman_get_start(r) &&
2720 paddr <= rman_get_end(r)) {
2721 if (!(flags & RF_ACTIVE))
2722 bus_activate_resource(dev,
2723 SYS_RES_MEMORY, 0, r);
2724 *res = r;
2725 return(dev);
2726 }
2727 }
2728 }
2729
2730 /*
2731 * If this device has children, do another
2732 * level of recursion to inspect them.
2733 */
2734
2735 device_get_children(dev, &children, &childcnt);
2736
2737 for (i = 0; i < childcnt; i++) {
2738 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2739 if (matching_dev != NULL) {
2740 free(children, M_TEMP);
2741 return(matching_dev);
2742 }
2743 }
2744
2745
2746 /* Won't somebody please think of the children! */
2747
2748 if (children != NULL)
2749 free(children, M_TEMP);
2750
2751 return(NULL);
2752 }
2753
2754 /*
2755 * Workitems are unlike DPCs, in that they run in a user-mode thread
2756 * context rather than at DISPATCH_LEVEL in kernel context. In our
2757 * case we run them in kernel context anyway.
2758 */
2759 static void
2760 ntoskrnl_workitem_thread(arg)
2761 void *arg;
2762 {
2763 kdpc_queue *kq;
2764 list_entry *l;
2765 io_workitem *iw;
2766 uint8_t irql;
2767
2768 kq = arg;
2769
2770 InitializeListHead(&kq->kq_disp);
2771 kq->kq_td = curthread;
2772 kq->kq_exit = 0;
2773 KeInitializeSpinLock(&kq->kq_lock);
2774 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2775
2776 while (1) {
2777 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2778
2779 KeAcquireSpinLock(&kq->kq_lock, &irql);
2780
2781 if (kq->kq_exit) {
2782 KeReleaseSpinLock(&kq->kq_lock, irql);
2783 break;
2784 }
2785
2786 while (!IsListEmpty(&kq->kq_disp)) {
2787 l = RemoveHeadList(&kq->kq_disp);
2788 iw = CONTAINING_RECORD(l,
2789 io_workitem, iw_listentry);
2790 InitializeListHead((&iw->iw_listentry));
2791 if (iw->iw_func == NULL)
2792 continue;
2793 KeReleaseSpinLock(&kq->kq_lock, irql);
2794 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2795 KeAcquireSpinLock(&kq->kq_lock, &irql);
2796 }
2797
2798 KeReleaseSpinLock(&kq->kq_lock, irql);
2799 }
2800
2801 #if __FreeBSD_version < 502113
2802 mtx_lock(&Giant);
2803 #endif
2804 kthread_exit(0);
2805 return; /* notreached */
2806 }
2807
2808 static void
2809 ntoskrnl_destroy_workitem_threads(void)
2810 {
2811 kdpc_queue *kq;
2812 int i;
2813
2814 for (i = 0; i < WORKITEM_THREADS; i++) {
2815 kq = wq_queues + i;
2816 kq->kq_exit = 1;
2817 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2818 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", 0);
2819 }
2820
2821 return;
2822 }
2823
2824 io_workitem *
2825 IoAllocateWorkItem(dobj)
2826 device_object *dobj;
2827 {
2828 io_workitem *iw;
2829
2830 iw = uma_zalloc(iw_zone, M_NOWAIT);
2831 if (iw == NULL)
2832 return(NULL);
2833
2834 InitializeListHead(&iw->iw_listentry);
2835 iw->iw_dobj = dobj;
2836
2837 mtx_lock(&ntoskrnl_dispatchlock);
2838 iw->iw_idx = wq_idx;
2839 WORKIDX_INC(wq_idx);
2840 mtx_unlock(&ntoskrnl_dispatchlock);
2841
2842 return(iw);
2843 }
2844
2845 void
2846 IoFreeWorkItem(iw)
2847 io_workitem *iw;
2848 {
2849 uma_zfree(iw_zone, iw);
2850 return;
2851 }
2852
2853 void
2854 IoQueueWorkItem(iw, iw_func, qtype, ctx)
2855 io_workitem *iw;
2856 io_workitem_func iw_func;
2857 uint32_t qtype;
2858 void *ctx;
2859 {
2860 kdpc_queue *kq;
2861 list_entry *l;
2862 io_workitem *cur;
2863 uint8_t irql;
2864
2865 kq = wq_queues + iw->iw_idx;
2866
2867 KeAcquireSpinLock(&kq->kq_lock, &irql);
2868
2869 /*
2870 * Traverse the list and make sure this workitem hasn't
2871 * already been inserted. Queuing the same workitem
2872 * twice will hose the list but good.
2873 */
2874
2875 l = kq->kq_disp.nle_flink;
2876 while (l != &kq->kq_disp) {
2877 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2878 if (cur == iw) {
2879 /* Already queued -- do nothing. */
2880 KeReleaseSpinLock(&kq->kq_lock, irql);
2881 return;
2882 }
2883 l = l->nle_flink;
2884 }
2885
2886 iw->iw_func = iw_func;
2887 iw->iw_ctx = ctx;
2888
2889 InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2890 KeReleaseSpinLock(&kq->kq_lock, irql);
2891
2892 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2893
2894 return;
2895 }
2896
2897 static void
2898 ntoskrnl_workitem(dobj, arg)
2899 device_object *dobj;
2900 void *arg;
2901 {
2902 io_workitem *iw;
2903 work_queue_item *w;
2904 work_item_func f;
2905
2906 iw = arg;
2907 w = (work_queue_item *)dobj;
2908 f = (work_item_func)w->wqi_func;
2909 uma_zfree(iw_zone, iw);
2910 MSCALL2(f, w, w->wqi_ctx);
2911
2912 return;
2913 }
2914
2915 /*
2916 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2917 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2918 * problem with ExQueueWorkItem() is that it can't guard against
2919 * the condition where a driver submits a job to the work queue and
2920 * is then unloaded before the job is able to run. IoQueueWorkItem()
2921 * acquires a reference to the device's device_object via the
2922 * object manager and retains it until after the job has completed,
2923 * which prevents the driver from being unloaded before the job
2924 * runs. (We don't currently support this behavior, though hopefully
2925 * that will change once the object manager API is fleshed out a bit.)
2926 *
2927 * Having said all that, the ExQueueWorkItem() API remains, because
2928 * there are still other parts of Windows that use it, including
2929 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2930 * We fake up the ExQueueWorkItem() API on top of our implementation
2931 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2932 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2933 * queue item (provided by the caller) in to IoAllocateWorkItem()
2934 * instead of the device_object. We need to save this pointer so
2935 * we can apply a sanity check: as with the DPC queue and other
2936 * workitem queues, we can't allow the same work queue item to
2937 * be queued twice. If it's already pending, we silently return
2938 */
2939
2940 void
2941 ExQueueWorkItem(w, qtype)
2942 work_queue_item *w;
2943 uint32_t qtype;
2944 {
2945 io_workitem *iw;
2946 io_workitem_func iwf;
2947 kdpc_queue *kq;
2948 list_entry *l;
2949 io_workitem *cur;
2950 uint8_t irql;
2951
2952
2953 /*
2954 * We need to do a special sanity test to make sure
2955 * the ExQueueWorkItem() API isn't used to queue
2956 * the same workitem twice. Rather than checking the
2957 * io_workitem pointer itself, we test the attached
2958 * device object, which is really a pointer to the
2959 * legacy work queue item structure.
2960 */
2961
2962 kq = wq_queues + WORKITEM_LEGACY_THREAD;
2963 KeAcquireSpinLock(&kq->kq_lock, &irql);
2964 l = kq->kq_disp.nle_flink;
2965 while (l != &kq->kq_disp) {
2966 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2967 if (cur->iw_dobj == (device_object *)w) {
2968 /* Already queued -- do nothing. */
2969 KeReleaseSpinLock(&kq->kq_lock, irql);
2970 return;
2971 }
2972 l = l->nle_flink;
2973 }
2974 KeReleaseSpinLock(&kq->kq_lock, irql);
2975
2976 iw = IoAllocateWorkItem((device_object *)w);
2977 if (iw == NULL)
2978 return;
2979
2980 iw->iw_idx = WORKITEM_LEGACY_THREAD;
2981 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2982 IoQueueWorkItem(iw, iwf, qtype, iw);
2983
2984 return;
2985 }
2986
2987 static void
2988 RtlZeroMemory(dst, len)
2989 void *dst;
2990 size_t len;
2991 {
2992 bzero(dst, len);
2993 return;
2994 }
2995
2996 static void
2997 RtlCopyMemory(dst, src, len)
2998 void *dst;
2999 const void *src;
3000 size_t len;
3001 {
3002 bcopy(src, dst, len);
3003 return;
3004 }
3005
3006 static size_t
3007 RtlCompareMemory(s1, s2, len)
3008 const void *s1;
3009 const void *s2;
3010 size_t len;
3011 {
3012 size_t i, total = 0;
3013 uint8_t *m1, *m2;
3014
3015 m1 = __DECONST(char *, s1);
3016 m2 = __DECONST(char *, s2);
3017
3018 for (i = 0; i < len; i++) {
3019 if (m1[i] == m2[i])
3020 total++;
3021 }
3022 return(total);
3023 }
3024
3025 void
3026 RtlInitAnsiString(dst, src)
3027 ansi_string *dst;
3028 char *src;
3029 {
3030 ansi_string *a;
3031
3032 a = dst;
3033 if (a == NULL)
3034 return;
3035 if (src == NULL) {
3036 a->as_len = a->as_maxlen = 0;
3037 a->as_buf = NULL;
3038 } else {
3039 a->as_buf = src;
3040 a->as_len = a->as_maxlen = strlen(src);
3041 }
3042
3043 return;
3044 }
3045
3046 void
3047 RtlInitUnicodeString(dst, src)
3048 unicode_string *dst;
3049 uint16_t *src;
3050 {
3051 unicode_string *u;
3052 int i;
3053
3054 u = dst;
3055 if (u == NULL)
3056 return;
3057 if (src == NULL) {
3058 u->us_len = u->us_maxlen = 0;
3059 u->us_buf = NULL;
3060 } else {
3061 i = 0;
3062 while(src[i] != 0)
3063 i++;
3064 u->us_buf = src;
3065 u->us_len = u->us_maxlen = i * 2;
3066 }
3067
3068 return;
3069 }
3070
3071 ndis_status
3072 RtlUnicodeStringToInteger(ustr, base, val)
3073 unicode_string *ustr;
3074 uint32_t base;
3075 uint32_t *val;
3076 {
3077 uint16_t *uchr;
3078 int len, neg = 0;
3079 char abuf[64];
3080 char *astr;
3081
3082 uchr = ustr->us_buf;
3083 len = ustr->us_len;
3084 bzero(abuf, sizeof(abuf));
3085
3086 if ((char)((*uchr) & 0xFF) == '-') {
3087 neg = 1;
3088 uchr++;
3089 len -= 2;
3090 } else if ((char)((*uchr) & 0xFF) == '+') {
3091 neg = 0;
3092 uchr++;
3093 len -= 2;
3094 }
3095
3096 if (base == 0) {
3097 if ((char)((*uchr) & 0xFF) == 'b') {
3098 base = 2;
3099 uchr++;
3100 len -= 2;
3101 } else if ((char)((*uchr) & 0xFF) == 'o') {
3102 base = 8;
3103 uchr++;
3104 len -= 2;
3105 } else if ((char)((*uchr) & 0xFF) == 'x') {
3106 base = 16;
3107 uchr++;
3108 len -= 2;
3109 } else
3110 base = 10;
3111 }
3112
3113 astr = abuf;
3114 if (neg) {
3115 strcpy(astr, "-");
3116 astr++;
3117 }
3118
3119 ntoskrnl_unicode_to_ascii(uchr, astr, len);
3120 *val = strtoul(abuf, NULL, base);
3121
3122 return(STATUS_SUCCESS);
3123 }
3124
3125 void
3126 RtlFreeUnicodeString(ustr)
3127 unicode_string *ustr;
3128 {
3129 if (ustr->us_buf == NULL)
3130 return;
3131 ExFreePool(ustr->us_buf);
3132 ustr->us_buf = NULL;
3133 return;
3134 }
3135
3136 void
3137 RtlFreeAnsiString(astr)
3138 ansi_string *astr;
3139 {
3140 if (astr->as_buf == NULL)
3141 return;
3142 ExFreePool(astr->as_buf);
3143 astr->as_buf = NULL;
3144 return;
3145 }
3146
3147 static int
3148 atoi(str)
3149 const char *str;
3150 {
3151 return (int)strtol(str, (char **)NULL, 10);
3152 }
3153
3154 static long
3155 atol(str)
3156 const char *str;
3157 {
3158 return strtol(str, (char **)NULL, 10);
3159 }
3160
3161 static int
3162 rand(void)
3163 {
3164 struct timeval tv;
3165
3166 microtime(&tv);
3167 srandom(tv.tv_usec);
3168 return((int)random());
3169 }
3170
3171 static void
3172 srand(seed)
3173 unsigned int seed;
3174 {
3175 srandom(seed);
3176 return;
3177 }
3178
3179 static uint8_t
3180 IoIsWdmVersionAvailable(major, minor)
3181 uint8_t major;
3182 uint8_t minor;
3183 {
3184 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3185 return(TRUE);
3186 return(FALSE);
3187 }
3188
3189 static ndis_status
3190 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
3191 device_object *devobj;
3192 uint32_t regprop;
3193 uint32_t buflen;
3194 void *prop;
3195 uint32_t *reslen;
3196 {
3197 driver_object *drv;
3198 uint16_t **name;
3199
3200 drv = devobj->do_drvobj;
3201
3202 switch (regprop) {
3203 case DEVPROP_DRIVER_KEYNAME:
3204 name = prop;
3205 *name = drv->dro_drivername.us_buf;
3206 *reslen = drv->dro_drivername.us_len;
3207 break;
3208 default:
3209 return(STATUS_INVALID_PARAMETER_2);
3210 break;
3211 }
3212
3213 return(STATUS_SUCCESS);
3214 }
3215
3216 static void
3217 KeInitializeMutex(kmutex, level)
3218 kmutant *kmutex;
3219 uint32_t level;
3220 {
3221 InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3222 kmutex->km_abandoned = FALSE;
3223 kmutex->km_apcdisable = 1;
3224 kmutex->km_header.dh_sigstate = 1;
3225 kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3226 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3227 kmutex->km_ownerthread = NULL;
3228 return;
3229 }
3230
3231 static uint32_t
3232 KeReleaseMutex(kmutex, kwait)
3233 kmutant *kmutex;
3234 uint8_t kwait;
3235 {
3236 uint32_t prevstate;
3237
3238 mtx_lock(&ntoskrnl_dispatchlock);
3239 prevstate = kmutex->km_header.dh_sigstate;
3240 if (kmutex->km_ownerthread != curthread) {
3241 mtx_unlock(&ntoskrnl_dispatchlock);
3242 return(STATUS_MUTANT_NOT_OWNED);
3243 }
3244
3245 kmutex->km_header.dh_sigstate++;
3246 kmutex->km_abandoned = FALSE;
3247
3248 if (kmutex->km_header.dh_sigstate == 1) {
3249 kmutex->km_ownerthread = NULL;
3250 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3251 }
3252
3253 mtx_unlock(&ntoskrnl_dispatchlock);
3254
3255 return(prevstate);
3256 }
3257
3258 static uint32_t
3259 KeReadStateMutex(kmutex)
3260 kmutant *kmutex;
3261 {
3262 return(kmutex->km_header.dh_sigstate);
3263 }
3264
3265 void
3266 KeInitializeEvent(kevent, type, state)
3267 nt_kevent *kevent;
3268 uint32_t type;
3269 uint8_t state;
3270 {
3271 InitializeListHead((&kevent->k_header.dh_waitlisthead));
3272 kevent->k_header.dh_sigstate = state;
3273 if (type == EVENT_TYPE_NOTIFY)
3274 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3275 else
3276 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3277 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3278 return;
3279 }
3280
3281 uint32_t
3282 KeResetEvent(kevent)
3283 nt_kevent *kevent;
3284 {
3285 uint32_t prevstate;
3286
3287 mtx_lock(&ntoskrnl_dispatchlock);
3288 prevstate = kevent->k_header.dh_sigstate;
3289 kevent->k_header.dh_sigstate = FALSE;
3290 mtx_unlock(&ntoskrnl_dispatchlock);
3291
3292 return(prevstate);
3293 }
3294
3295 uint32_t
3296 KeSetEvent(kevent, increment, kwait)
3297 nt_kevent *kevent;
3298 uint32_t increment;
3299 uint8_t kwait;
3300 {
3301 uint32_t prevstate;
3302 wait_block *w;
3303 nt_dispatch_header *dh;
3304 struct thread *td;
3305 wb_ext *we;
3306
3307 mtx_lock(&ntoskrnl_dispatchlock);
3308 prevstate = kevent->k_header.dh_sigstate;
3309 dh = &kevent->k_header;
3310
3311 if (IsListEmpty(&dh->dh_waitlisthead))
3312 /*
3313 * If there's nobody in the waitlist, just set
3314 * the state to signalled.
3315 */
3316 dh->dh_sigstate = 1;
3317 else {
3318 /*
3319 * Get the first waiter. If this is a synchronization
3320 * event, just wake up that one thread (don't bother
3321 * setting the state to signalled since we're supposed
3322 * to automatically clear synchronization events anyway).
3323 *
3324 * If it's a notification event, or the the first
3325 * waiter is doing a WAITTYPE_ALL wait, go through
3326 * the full wait satisfaction process.
3327 */
3328 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3329 wait_block, wb_waitlist);
3330 we = w->wb_ext;
3331 td = we->we_td;
3332 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3333 w->wb_waittype == WAITTYPE_ALL) {
3334 if (prevstate == 0) {
3335 dh->dh_sigstate = 1;
3336 ntoskrnl_waittest(dh, increment);
3337 }
3338 } else {
3339 w->wb_awakened |= TRUE;
3340 cv_broadcastpri(&we->we_cv, w->wb_oldpri -
3341 (increment * 4));
3342 }
3343 }
3344
3345 mtx_unlock(&ntoskrnl_dispatchlock);
3346
3347 return(prevstate);
3348 }
3349
3350 void
3351 KeClearEvent(kevent)
3352 nt_kevent *kevent;
3353 {
3354 kevent->k_header.dh_sigstate = FALSE;
3355 return;
3356 }
3357
3358 uint32_t
3359 KeReadStateEvent(kevent)
3360 nt_kevent *kevent;
3361 {
3362 return(kevent->k_header.dh_sigstate);
3363 }
3364
3365 /*
3366 * The object manager in Windows is responsible for managing
3367 * references and access to various types of objects, including
3368 * device_objects, events, threads, timers and so on. However,
3369 * there's a difference in the way objects are handled in user
3370 * mode versus kernel mode.
3371 *
3372 * In user mode (i.e. Win32 applications), all objects are
3373 * managed by the object manager. For example, when you create
3374 * a timer or event object, you actually end up with an
3375 * object_header (for the object manager's bookkeeping
3376 * purposes) and an object body (which contains the actual object
3377 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3378 * to manage resource quotas and to enforce access restrictions
3379 * on basically every kind of system object handled by the kernel.
3380 *
3381 * However, in kernel mode, you only end up using the object
3382 * manager some of the time. For example, in a driver, you create
3383 * a timer object by simply allocating the memory for a ktimer
3384 * structure and initializing it with KeInitializeTimer(). Hence,
3385 * the timer has no object_header and no reference counting or
3386 * security/resource checks are done on it. The assumption in
3387 * this case is that if you're running in kernel mode, you know
3388 * what you're doing, and you're already at an elevated privilege
3389 * anyway.
3390 *
3391 * There are some exceptions to this. The two most important ones
3392 * for our purposes are device_objects and threads. We need to use
3393 * the object manager to do reference counting on device_objects,
3394 * and for threads, you can only get a pointer to a thread's
3395 * dispatch header by using ObReferenceObjectByHandle() on the
3396 * handle returned by PsCreateSystemThread().
3397 */
3398
3399 static ndis_status
3400 ObReferenceObjectByHandle(handle, reqaccess, otype,
3401 accessmode, object, handleinfo)
3402 ndis_handle handle;
3403 uint32_t reqaccess;
3404 void *otype;
3405 uint8_t accessmode;
3406 void **object;
3407 void **handleinfo;
3408 {
3409 nt_objref *nr;
3410
3411 nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3412 if (nr == NULL)
3413 return(STATUS_INSUFFICIENT_RESOURCES);
3414
3415 InitializeListHead((&nr->no_dh.dh_waitlisthead));
3416 nr->no_obj = handle;
3417 nr->no_dh.dh_type = DISP_TYPE_THREAD;
3418 nr->no_dh.dh_sigstate = 0;
3419 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3420 sizeof(uint32_t));
3421 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3422 *object = nr;
3423
3424 return(STATUS_SUCCESS);
3425 }
3426
3427 static void
3428 ObfDereferenceObject(object)
3429 void *object;
3430 {
3431 nt_objref *nr;
3432
3433 nr = object;
3434 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3435 free(nr, M_DEVBUF);
3436
3437 return;
3438 }
3439
3440 static uint32_t
3441 ZwClose(handle)
3442 ndis_handle handle;
3443 {
3444 return(STATUS_SUCCESS);
3445 }
3446
3447 static uint32_t
3448 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
3449 uint32_t traceclass;
3450 void *traceinfo;
3451 uint32_t infolen;
3452 uint32_t reqlen;
3453 void *buf;
3454 {
3455 return(STATUS_NOT_FOUND);
3456 }
3457
3458 static uint32_t
3459 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3460 void *guid, uint16_t messagenum, ...)
3461 {
3462 return(STATUS_SUCCESS);
3463 }
3464
3465 static uint32_t
3466 IoWMIRegistrationControl(dobj, action)
3467 device_object *dobj;
3468 uint32_t action;
3469 {
3470 return(STATUS_SUCCESS);
3471 }
3472
3473 /*
3474 * This is here just in case the thread returns without calling
3475 * PsTerminateSystemThread().
3476 */
3477 static void
3478 ntoskrnl_thrfunc(arg)
3479 void *arg;
3480 {
3481 thread_context *thrctx;
3482 uint32_t (*tfunc)(void *);
3483 void *tctx;
3484 uint32_t rval;
3485
3486 thrctx = arg;
3487 tfunc = thrctx->tc_thrfunc;
3488 tctx = thrctx->tc_thrctx;
3489 free(thrctx, M_TEMP);
3490
3491 rval = MSCALL1(tfunc, tctx);
3492
3493 PsTerminateSystemThread(rval);
3494 return; /* notreached */
3495 }
3496
3497 static ndis_status
3498 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
3499 clientid, thrfunc, thrctx)
3500 ndis_handle *handle;
3501 uint32_t reqaccess;
3502 void *objattrs;
3503 ndis_handle phandle;
3504 void *clientid;
3505 void *thrfunc;
3506 void *thrctx;
3507 {
3508 int error;
3509 char tname[128];
3510 thread_context *tc;
3511 struct proc *p;
3512
3513 tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3514 if (tc == NULL)
3515 return(STATUS_INSUFFICIENT_RESOURCES);
3516
3517 tc->tc_thrctx = thrctx;
3518 tc->tc_thrfunc = thrfunc;
3519
3520 sprintf(tname, "windows kthread %d", ntoskrnl_kth);
3521 error = kthread_create(ntoskrnl_thrfunc, tc, &p,
3522 RFHIGHPID, NDIS_KSTACK_PAGES, tname);
3523
3524 if (error) {
3525 free(tc, M_TEMP);
3526 return(STATUS_INSUFFICIENT_RESOURCES);
3527 }
3528
3529 *handle = p;
3530 ntoskrnl_kth++;
3531
3532 return(STATUS_SUCCESS);
3533 }
3534
3535 /*
3536 * In Windows, the exit of a thread is an event that you're allowed
3537 * to wait on, assuming you've obtained a reference to the thread using
3538 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3539 * simulate this behavior is to register each thread we create in a
3540 * reference list, and if someone holds a reference to us, we poke
3541 * them.
3542 */
3543 static ndis_status
3544 PsTerminateSystemThread(status)
3545 ndis_status status;
3546 {
3547 struct nt_objref *nr;
3548
3549 mtx_lock(&ntoskrnl_dispatchlock);
3550 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3551 if (nr->no_obj != curthread->td_proc)
3552 continue;
3553 nr->no_dh.dh_sigstate = 1;
3554 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3555 break;
3556 }
3557 mtx_unlock(&ntoskrnl_dispatchlock);
3558
3559 ntoskrnl_kth--;
3560
3561 #if __FreeBSD_version < 502113
3562 mtx_lock(&Giant);
3563 #endif
3564 kthread_exit(0);
3565 return(0); /* notreached */
3566 }
3567
3568 static uint32_t
3569 DbgPrint(char *fmt, ...)
3570 {
3571 va_list ap;
3572
3573 if (bootverbose) {
3574 va_start(ap, fmt);
3575 vprintf(fmt, ap);
3576 }
3577
3578 return(STATUS_SUCCESS);
3579 }
3580
3581 static void
3582 DbgBreakPoint(void)
3583 {
3584
3585 #if __FreeBSD_version < 502113
3586 Debugger("DbgBreakPoint(): breakpoint");
3587 #else
3588 kdb_enter("DbgBreakPoint(): breakpoint");
3589 #endif
3590 }
3591
3592 static void
3593 ntoskrnl_timercall(arg)
3594 void *arg;
3595 {
3596 ktimer *timer;
3597 struct timeval tv;
3598 kdpc *dpc;
3599
3600 mtx_lock(&ntoskrnl_dispatchlock);
3601
3602 timer = arg;
3603
3604 #ifdef NTOSKRNL_DEBUG_TIMERS
3605 ntoskrnl_timer_fires++;
3606 #endif
3607 ntoskrnl_remove_timer(timer);
3608
3609 /*
3610 * This should never happen, but complain
3611 * if it does.
3612 */
3613
3614 if (timer->k_header.dh_inserted == FALSE) {
3615 mtx_unlock(&ntoskrnl_dispatchlock);
3616 printf("NTOS: timer %p fired even though "
3617 "it was canceled\n", timer);
3618 return;
3619 }
3620
3621 /* Mark the timer as no longer being on the timer queue. */
3622
3623 timer->k_header.dh_inserted = FALSE;
3624
3625 /* Now signal the object and satisfy any waits on it. */
3626
3627 timer->k_header.dh_sigstate = 1;
3628 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3629
3630 /*
3631 * If this is a periodic timer, re-arm it
3632 * so it will fire again. We do this before
3633 * calling any deferred procedure calls because
3634 * it's possible the DPC might cancel the timer,
3635 * in which case it would be wrong for us to
3636 * re-arm it again afterwards.
3637 */
3638
3639 if (timer->k_period) {
3640 tv.tv_sec = 0;
3641 tv.tv_usec = timer->k_period * 1000;
3642 timer->k_header.dh_inserted = TRUE;
3643 ntoskrnl_insert_timer(timer, tvtohz(&tv));
3644 #ifdef NTOSKRNL_DEBUG_TIMERS
3645 ntoskrnl_timer_reloads++;
3646 #endif
3647 }
3648
3649 dpc = timer->k_dpc;
3650
3651 mtx_unlock(&ntoskrnl_dispatchlock);
3652
3653 /* If there's a DPC associated with the timer, queue it up. */
3654
3655 if (dpc != NULL)
3656 KeInsertQueueDpc(dpc, NULL, NULL);
3657
3658 return;
3659 }
3660
3661 #ifdef NTOSKRNL_DEBUG_TIMERS
3662 static int
3663 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3664 {
3665 int ret;
3666
3667 ret = 0;
3668 ntoskrnl_show_timers();
3669 return (sysctl_handle_int(oidp, &ret, 0, req));
3670 }
3671
3672 static void
3673 ntoskrnl_show_timers()
3674 {
3675 int i = 0;
3676 list_entry *l;
3677
3678 mtx_lock_spin(&ntoskrnl_calllock);
3679 l = ntoskrnl_calllist.nle_flink;
3680 while(l != &ntoskrnl_calllist) {
3681 i++;
3682 l = l->nle_flink;
3683 }
3684 mtx_unlock_spin(&ntoskrnl_calllock);
3685
3686 printf("\n");
3687 printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3688 printf("timer sets: %qu\n", ntoskrnl_timer_sets);
3689 printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3690 printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3691 printf("timer fires: %qu\n", ntoskrnl_timer_fires);
3692 printf("\n");
3693
3694 return;
3695 }
3696 #endif
3697
3698 /*
3699 * Must be called with dispatcher lock held.
3700 */
3701
3702 static void
3703 ntoskrnl_insert_timer(timer, ticks)
3704 ktimer *timer;
3705 int ticks;
3706 {
3707 callout_entry *e;
3708 list_entry *l;
3709 struct callout *c;
3710
3711 /*
3712 * Try and allocate a timer.
3713 */
3714 mtx_lock_spin(&ntoskrnl_calllock);
3715 if (IsListEmpty(&ntoskrnl_calllist)) {
3716 mtx_unlock_spin(&ntoskrnl_calllock);
3717 #ifdef NTOSKRNL_DEBUG_TIMERS
3718 ntoskrnl_show_timers();
3719 #endif
3720 panic("out of timers!");
3721 }
3722 l = RemoveHeadList(&ntoskrnl_calllist);
3723 mtx_unlock_spin(&ntoskrnl_calllock);
3724
3725 e = CONTAINING_RECORD(l, callout_entry, ce_list);
3726 c = &e->ce_callout;
3727
3728 timer->k_callout = c;
3729
3730 callout_init(c, CALLOUT_MPSAFE);
3731 callout_reset(c, ticks, ntoskrnl_timercall, timer);
3732
3733 return;
3734 }
3735
3736 static void
3737 ntoskrnl_remove_timer(timer)
3738 ktimer *timer;
3739 {
3740 callout_entry *e;
3741
3742 e = (callout_entry *)timer->k_callout;
3743 callout_stop(timer->k_callout);
3744
3745 mtx_lock_spin(&ntoskrnl_calllock);
3746 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3747 mtx_unlock_spin(&ntoskrnl_calllock);
3748
3749 return;
3750 }
3751
3752 void
3753 KeInitializeTimer(timer)
3754 ktimer *timer;
3755 {
3756 if (timer == NULL)
3757 return;
3758
3759 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
3760
3761 return;
3762 }
3763
3764 void
3765 KeInitializeTimerEx(timer, type)
3766 ktimer *timer;
3767 uint32_t type;
3768 {
3769 if (timer == NULL)
3770 return;
3771
3772 bzero((char *)timer, sizeof(ktimer));
3773 InitializeListHead((&timer->k_header.dh_waitlisthead));
3774 timer->k_header.dh_sigstate = FALSE;
3775 timer->k_header.dh_inserted = FALSE;
3776 if (type == EVENT_TYPE_NOTIFY)
3777 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3778 else
3779 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3780 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3781
3782 return;
3783 }
3784
3785 /*
3786 * DPC subsystem. A Windows Defered Procedure Call has the following
3787 * properties:
3788 * - It runs at DISPATCH_LEVEL.
3789 * - It can have one of 3 importance values that control when it
3790 * runs relative to other DPCs in the queue.
3791 * - On SMP systems, it can be set to run on a specific processor.
3792 * In order to satisfy the last property, we create a DPC thread for
3793 * each CPU in the system and bind it to that CPU. Each thread
3794 * maintains three queues with different importance levels, which
3795 * will be processed in order from lowest to highest.
3796 *
3797 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3798 * with ISRs, which run in interrupt context and can preempt DPCs.)
3799 * ISRs are given the highest importance so that they'll take
3800 * precedence over timers and other things.
3801 */
3802
3803 static void
3804 ntoskrnl_dpc_thread(arg)
3805 void *arg;
3806 {
3807 kdpc_queue *kq;
3808 kdpc *d;
3809 list_entry *l;
3810 uint8_t irql;
3811
3812 kq = arg;
3813
3814 InitializeListHead(&kq->kq_disp);
3815 kq->kq_td = curthread;
3816 kq->kq_exit = 0;
3817 kq->kq_running = FALSE;
3818 KeInitializeSpinLock(&kq->kq_lock);
3819 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3820 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3821
3822 /*
3823 * Elevate our priority. DPCs are used to run interrupt
3824 * handlers, and they should trigger as soon as possible
3825 * once scheduled by an ISR.
3826 */
3827
3828 mtx_lock_spin(&sched_lock);
3829 #ifdef NTOSKRNL_MULTIPLE_DPCS
3830 #if __FreeBSD_version >= 502102
3831 sched_bind(curthread, kq->kq_cpu);
3832 #endif
3833 #endif
3834 sched_prio(curthread, PRI_MIN_KERN);
3835 #if __FreeBSD_version < 600000
3836 curthread->td_base_pri = PRI_MIN_KERN;
3837 #endif
3838 mtx_unlock_spin(&sched_lock);
3839
3840 while (1) {
3841 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3842
3843 KeAcquireSpinLock(&kq->kq_lock, &irql);
3844
3845 if (kq->kq_exit) {
3846 KeReleaseSpinLock(&kq->kq_lock, irql);
3847 break;
3848 }
3849
3850 kq->kq_running = TRUE;
3851
3852 while (!IsListEmpty(&kq->kq_disp)) {
3853 l = RemoveHeadList((&kq->kq_disp));
3854 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3855 InitializeListHead((&d->k_dpclistentry));
3856 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3857 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3858 d->k_sysarg1, d->k_sysarg2);
3859 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3860 }
3861
3862 kq->kq_running = FALSE;
3863
3864 KeReleaseSpinLock(&kq->kq_lock, irql);
3865
3866 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3867 }
3868
3869 #if __FreeBSD_version < 502113
3870 mtx_lock(&Giant);
3871 #endif
3872 kthread_exit(0);
3873 return; /* notreached */
3874 }
3875
3876 static void
3877 ntoskrnl_destroy_dpc_threads(void)
3878 {
3879 kdpc_queue *kq;
3880 kdpc dpc;
3881 int i;
3882
3883 kq = kq_queues;
3884 #ifdef NTOSKRNL_MULTIPLE_DPCS
3885 for (i = 0; i < mp_ncpus; i++) {
3886 #else
3887 for (i = 0; i < 1; i++) {
3888 #endif
3889 kq += i;
3890
3891 kq->kq_exit = 1;
3892 KeInitializeDpc(&dpc, NULL, NULL);
3893 KeSetTargetProcessorDpc(&dpc, i);
3894 KeInsertQueueDpc(&dpc, NULL, NULL);
3895 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", 0);
3896 }
3897
3898 return;
3899 }
3900
3901 static uint8_t
3902 ntoskrnl_insert_dpc(head, dpc)
3903 list_entry *head;
3904 kdpc *dpc;
3905 {
3906 list_entry *l;
3907 kdpc *d;
3908
3909 l = head->nle_flink;
3910 while (l != head) {
3911 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3912 if (d == dpc)
3913 return(FALSE);
3914 l = l->nle_flink;
3915 }
3916
3917 if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3918 InsertTailList((head), (&dpc->k_dpclistentry));
3919 else
3920 InsertHeadList((head), (&dpc->k_dpclistentry));
3921
3922 return (TRUE);
3923 }
3924
3925 void
3926 KeInitializeDpc(dpc, dpcfunc, dpcctx)
3927 kdpc *dpc;
3928 void *dpcfunc;
3929 void *dpcctx;
3930 {
3931
3932 if (dpc == NULL)
3933 return;
3934
3935 dpc->k_deferedfunc = dpcfunc;
3936 dpc->k_deferredctx = dpcctx;
3937 dpc->k_num = KDPC_CPU_DEFAULT;
3938 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3939 InitializeListHead((&dpc->k_dpclistentry));
3940
3941 return;
3942 }
3943
3944 uint8_t
3945 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
3946 kdpc *dpc;
3947 void *sysarg1;
3948 void *sysarg2;
3949 {
3950 kdpc_queue *kq;
3951 uint8_t r;
3952 uint8_t irql;
3953
3954 if (dpc == NULL)
3955 return(FALSE);
3956
3957 kq = kq_queues;
3958
3959 #ifdef NTOSKRNL_MULTIPLE_DPCS
3960 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3961
3962 /*
3963 * By default, the DPC is queued to run on the same CPU
3964 * that scheduled it.
3965 */
3966
3967 if (dpc->k_num == KDPC_CPU_DEFAULT)
3968 kq += curthread->td_oncpu;
3969 else
3970 kq += dpc->k_num;
3971 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3972 #else
3973 KeAcquireSpinLock(&kq->kq_lock, &irql);
3974 #endif
3975
3976 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3977 if (r == TRUE) {
3978 dpc->k_sysarg1 = sysarg1;
3979 dpc->k_sysarg2 = sysarg2;
3980 }
3981 KeReleaseSpinLock(&kq->kq_lock, irql);
3982
3983 if (r == FALSE)
3984 return(r);
3985
3986 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3987
3988 return(r);
3989 }
3990
3991 uint8_t
3992 KeRemoveQueueDpc(dpc)
3993 kdpc *dpc;
3994 {
3995 kdpc_queue *kq;
3996 uint8_t irql;
3997
3998 if (dpc == NULL)
3999 return(FALSE);
4000
4001 #ifdef NTOSKRNL_MULTIPLE_DPCS
4002 KeRaiseIrql(DISPATCH_LEVEL, &irql);
4003
4004 kq = kq_queues + dpc->k_num;
4005
4006 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
4007 #else
4008 kq = kq_queues;
4009 KeAcquireSpinLock(&kq->kq_lock, &irql);
4010 #endif
4011
4012 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
4013 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
4014 KeLowerIrql(irql);
4015 return(FALSE);
4016 }
4017
4018 RemoveEntryList((&dpc->k_dpclistentry));
4019 InitializeListHead((&dpc->k_dpclistentry));
4020
4021 KeReleaseSpinLock(&kq->kq_lock, irql);
4022
4023 return(TRUE);
4024 }
4025
4026 void
4027 KeSetImportanceDpc(dpc, imp)
4028 kdpc *dpc;
4029 uint32_t imp;
4030 {
4031 if (imp != KDPC_IMPORTANCE_LOW &&
4032 imp != KDPC_IMPORTANCE_MEDIUM &&
4033 imp != KDPC_IMPORTANCE_HIGH)
4034 return;
4035
4036 dpc->k_importance = (uint8_t)imp;
4037 return;
4038 }
4039
4040 void
4041 KeSetTargetProcessorDpc(dpc, cpu)
4042 kdpc *dpc;
4043 uint8_t cpu;
4044 {
4045 if (cpu > mp_ncpus)
4046 return;
4047
4048 dpc->k_num = cpu;
4049 return;
4050 }
4051
4052 void
4053 KeFlushQueuedDpcs(void)
4054 {
4055 kdpc_queue *kq;
4056 int i;
4057
4058 /*
4059 * Poke each DPC queue and wait
4060 * for them to drain.
4061 */
4062
4063 #ifdef NTOSKRNL_MULTIPLE_DPCS
4064 for (i = 0; i < mp_ncpus; i++) {
4065 #else
4066 for (i = 0; i < 1; i++) {
4067 #endif
4068 kq = kq_queues + i;
4069 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
4070 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
4071 }
4072
4073 return;
4074 }
4075
4076 uint32_t
4077 KeGetCurrentProcessorNumber(void)
4078 {
4079 return((uint32_t)curthread->td_oncpu);
4080 }
4081
4082 uint8_t
4083 KeSetTimerEx(timer, duetime, period, dpc)
4084 ktimer *timer;
4085 int64_t duetime;
4086 uint32_t period;
4087 kdpc *dpc;
4088 {
4089 struct timeval tv;
4090 uint64_t curtime;
4091 uint8_t pending;
4092
4093 if (timer == NULL)
4094 return(FALSE);
4095
4096 mtx_lock(&ntoskrnl_dispatchlock);
4097
4098 if (timer->k_header.dh_inserted == TRUE) {
4099 ntoskrnl_remove_timer(timer);
4100 #ifdef NTOSKRNL_DEBUG_TIMERS
4101 ntoskrnl_timer_cancels++;
4102 #endif
4103 timer->k_header.dh_inserted = FALSE;
4104 pending = TRUE;
4105 } else
4106 pending = FALSE;
4107
4108 timer->k_duetime = duetime;
4109 timer->k_period = period;
4110 timer->k_header.dh_sigstate = FALSE;
4111 timer->k_dpc = dpc;
4112
4113 if (duetime < 0) {
4114 tv.tv_sec = - (duetime) / 10000000;
4115 tv.tv_usec = (- (duetime) / 10) -
4116 (tv.tv_sec * 1000000);
4117 } else {
4118 ntoskrnl_time(&curtime);
4119 if (duetime < curtime)
4120 tv.tv_sec = tv.tv_usec = 0;
4121 else {
4122 tv.tv_sec = ((duetime) - curtime) / 10000000;
4123 tv.tv_usec = ((duetime) - curtime) / 10 -
4124 (tv.tv_sec * 1000000);
4125 }
4126 }
4127
4128 timer->k_header.dh_inserted = TRUE;
4129 ntoskrnl_insert_timer(timer, tvtohz(&tv));
4130 #ifdef NTOSKRNL_DEBUG_TIMERS
4131 ntoskrnl_timer_sets++;
4132 #endif
4133
4134 mtx_unlock(&ntoskrnl_dispatchlock);
4135
4136 return(pending);
4137 }
4138
4139 uint8_t
4140 KeSetTimer(timer, duetime, dpc)
4141 ktimer *timer;
4142 int64_t duetime;
4143 kdpc *dpc;
4144 {
4145 return (KeSetTimerEx(timer, duetime, 0, dpc));
4146 }
4147
4148 /*
4149 * The Windows DDK documentation seems to say that cancelling
4150 * a timer that has a DPC will result in the DPC also being
4151 * cancelled, but this isn't really the case.
4152 */
4153
4154 uint8_t
4155 KeCancelTimer(timer)
4156 ktimer *timer;
4157 {
4158 uint8_t pending;
4159
4160 if (timer == NULL)
4161 return(FALSE);
4162
4163 mtx_lock(&ntoskrnl_dispatchlock);
4164
4165 pending = timer->k_header.dh_inserted;
4166
4167 if (timer->k_header.dh_inserted == TRUE) {
4168 timer->k_header.dh_inserted = FALSE;
4169 ntoskrnl_remove_timer(timer);
4170 #ifdef NTOSKRNL_DEBUG_TIMERS
4171 ntoskrnl_timer_cancels++;
4172 #endif
4173 }
4174
4175 mtx_unlock(&ntoskrnl_dispatchlock);
4176
4177 return(pending);
4178 }
4179
4180 uint8_t
4181 KeReadStateTimer(timer)
4182 ktimer *timer;
4183 {
4184 return(timer->k_header.dh_sigstate);
4185 }
4186
4187 static void
4188 dummy()
4189 {
4190 printf ("ntoskrnl dummy called...\n");
4191 return;
4192 }
4193
4194
4195 image_patch_table ntoskrnl_functbl[] = {
4196 IMPORT_SFUNC(RtlZeroMemory, 2),
4197 IMPORT_SFUNC(RtlCopyMemory, 3),
4198 IMPORT_SFUNC(RtlCompareMemory, 3),
4199 IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4200 IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4201 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4202 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4203 IMPORT_SFUNC(RtlInitAnsiString, 2),
4204 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4205 IMPORT_SFUNC(RtlInitUnicodeString, 2),
4206 IMPORT_SFUNC(RtlFreeAnsiString, 1),
4207 IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4208 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4209 IMPORT_CFUNC(sprintf, 0),
4210 IMPORT_CFUNC(vsprintf, 0),
4211 IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
4212 IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
4213 IMPORT_CFUNC(DbgPrint, 0),
4214 IMPORT_SFUNC(DbgBreakPoint, 0),
4215 IMPORT_CFUNC(strncmp, 0),
4216 IMPORT_CFUNC(strcmp, 0),
4217 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4218 IMPORT_CFUNC(strncpy, 0),
4219 IMPORT_CFUNC(strcpy, 0),
4220 IMPORT_CFUNC(strlen, 0),
4221 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4222 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4223 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4224 IMPORT_CFUNC_MAP(strchr, index, 0),
4225 IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4226 IMPORT_CFUNC(memcpy, 0),
4227 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4228 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4229 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4230 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4231 IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4232 IMPORT_FFUNC(IofCallDriver, 2),
4233 IMPORT_FFUNC(IofCompleteRequest, 2),
4234 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4235 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4236 IMPORT_SFUNC(IoCancelIrp, 1),
4237 IMPORT_SFUNC(IoConnectInterrupt, 11),
4238 IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4239 IMPORT_SFUNC(IoCreateDevice, 7),
4240 IMPORT_SFUNC(IoDeleteDevice, 1),
4241 IMPORT_SFUNC(IoGetAttachedDevice, 1),
4242 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4243 IMPORT_SFUNC(IoDetachDevice, 1),
4244 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4245 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4246 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4247 IMPORT_SFUNC(IoAllocateIrp, 2),
4248 IMPORT_SFUNC(IoReuseIrp, 2),
4249 IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4250 IMPORT_SFUNC(IoFreeIrp, 1),
4251 IMPORT_SFUNC(IoInitializeIrp, 3),
4252 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4253 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4254 IMPORT_SFUNC(KeSynchronizeExecution, 3),
4255 IMPORT_SFUNC(KeWaitForSingleObject, 5),
4256 IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4257 IMPORT_SFUNC(_allmul, 4),
4258 IMPORT_SFUNC(_alldiv, 4),
4259 IMPORT_SFUNC(_allrem, 4),
4260 IMPORT_RFUNC(_allshr, 0),
4261 IMPORT_RFUNC(_allshl, 0),
4262 IMPORT_SFUNC(_aullmul, 4),
4263 IMPORT_SFUNC(_aulldiv, 4),
4264 IMPORT_SFUNC(_aullrem, 4),
4265 IMPORT_RFUNC(_aullshr, 0),
4266 IMPORT_RFUNC(_aullshl, 0),
4267 IMPORT_CFUNC(atoi, 0),
4268 IMPORT_CFUNC(atol, 0),
4269 IMPORT_CFUNC(rand, 0),
4270 IMPORT_CFUNC(srand, 0),
4271 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4272 IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4273 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4274 IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4275 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4276 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4277 IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4278 IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4279 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4280 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4281 IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4282 IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4283 IMPORT_SFUNC(ExQueryDepthSList, 1),
4284 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4285 InterlockedPopEntrySList, 1),
4286 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4287 InterlockedPushEntrySList, 2),
4288 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4289 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4290 IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4291 IMPORT_SFUNC(ExFreePool, 1),
4292 #ifdef __i386__
4293 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4294 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4295 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4296 #else
4297 /*
4298 * For AMD64, we can get away with just mapping
4299 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4300 * because the calling conventions end up being the same.
4301 * On i386, we have to be careful because KfAcquireSpinLock()
4302 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4303 */
4304 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4305 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4306 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4307 #endif
4308 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4309 IMPORT_FFUNC(InterlockedIncrement, 1),
4310 IMPORT_FFUNC(InterlockedDecrement, 1),
4311 IMPORT_FFUNC(InterlockedExchange, 2),
4312 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4313 IMPORT_SFUNC(IoAllocateMdl, 5),
4314 IMPORT_SFUNC(IoFreeMdl, 1),
4315 IMPORT_SFUNC(MmAllocateContiguousMemory, 2),
4316 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5),
4317 IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4318 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4319 IMPORT_SFUNC_MAP(MmGetPhysicalAddress, pmap_kextract, 1),
4320 IMPORT_SFUNC(MmSizeOfMdl, 1),
4321 IMPORT_SFUNC(MmMapLockedPages, 2),
4322 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4323 IMPORT_SFUNC(MmUnmapLockedPages, 2),
4324 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4325 IMPORT_SFUNC(MmIsAddressValid, 1),
4326 IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4327 IMPORT_SFUNC(MmUnmapIoSpace, 2),
4328 IMPORT_SFUNC(KeInitializeSpinLock, 1),
4329 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4330 IMPORT_SFUNC(IoGetDeviceProperty, 5),
4331 IMPORT_SFUNC(IoAllocateWorkItem, 1),
4332 IMPORT_SFUNC(IoFreeWorkItem, 1),
4333 IMPORT_SFUNC(IoQueueWorkItem, 4),
4334 IMPORT_SFUNC(ExQueueWorkItem, 2),
4335 IMPORT_SFUNC(ntoskrnl_workitem, 2),
4336 IMPORT_SFUNC(KeInitializeMutex, 2),
4337 IMPORT_SFUNC(KeReleaseMutex, 2),
4338 IMPORT_SFUNC(KeReadStateMutex, 1),
4339 IMPORT_SFUNC(KeInitializeEvent, 3),
4340 IMPORT_SFUNC(KeSetEvent, 3),
4341 IMPORT_SFUNC(KeResetEvent, 1),
4342 IMPORT_SFUNC(KeClearEvent, 1),
4343 IMPORT_SFUNC(KeReadStateEvent, 1),
4344 IMPORT_SFUNC(KeInitializeTimer, 1),
4345 IMPORT_SFUNC(KeInitializeTimerEx, 2),
4346 IMPORT_SFUNC(KeSetTimer, 3),
4347 IMPORT_SFUNC(KeSetTimerEx, 4),
4348 IMPORT_SFUNC(KeCancelTimer, 1),
4349 IMPORT_SFUNC(KeReadStateTimer, 1),
4350 IMPORT_SFUNC(KeInitializeDpc, 3),
4351 IMPORT_SFUNC(KeInsertQueueDpc, 3),
4352 IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4353 IMPORT_SFUNC(KeSetImportanceDpc, 2),
4354 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4355 IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4356 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4357 IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4358 IMPORT_FFUNC(ObfDereferenceObject, 1),
4359 IMPORT_SFUNC(ZwClose, 1),
4360 IMPORT_SFUNC(PsCreateSystemThread, 7),
4361 IMPORT_SFUNC(PsTerminateSystemThread, 1),
4362 IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4363 IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4364 IMPORT_CFUNC(WmiTraceMessage, 0),
4365
4366 /*
4367 * This last entry is a catch-all for any function we haven't
4368 * implemented yet. The PE import list patching routine will
4369 * use it for any function that doesn't have an explicit match
4370 * in this table.
4371 */
4372
4373 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4374
4375 /* End of list. */
4376
4377 { NULL, NULL, NULL }
4378 };
Cache object: e9fa0232909bdf2edf195777a891f62f
|