1 /*-
2 * Copyright (c) 2003
3 * Bill Paul <wpaul@windriver.com>. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Bill Paul.
16 * 4. Neither the name of the author nor the names of any co-contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * $FreeBSD: src/sys/compat/ndis/subr_ntoskrnl.c,v 1.117 2012/11/17 01:51:26 svnexp Exp $
33 */
34
35 #include <sys/ctype.h>
36 #include <sys/unistd.h>
37 #include <sys/param.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/lock.h>
43 #include <sys/thread2.h>
44 #include <sys/mutex.h>
45 #include <sys/mutex2.h>
46
47 #include <sys/callout.h>
48 #include <sys/kernel.h>
49 #include <sys/proc.h>
50 #include <sys/condvar.h>
51 #include <sys/kthread.h>
52 #include <sys/module.h>
53 #include <sys/sched.h>
54 #include <sys/sysctl.h>
55
56 #include <machine/atomic.h>
57 #include <machine/stdarg.h>
58
59 #include <sys/bus.h>
60 #include <sys/rman.h>
61 #include <sys/objcache.h>
62
63 #include <vm/vm.h>
64 #include <vm/vm_param.h>
65 #include <vm/pmap.h>
66 #include <vm/vm_kern.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_extern.h>
69
70 #include <emulation/ndis/pe_var.h>
71 #include <emulation/ndis/cfg_var.h>
72 #include <emulation/ndis/resource_var.h>
73 #include <emulation/ndis/ntoskrnl_var.h>
74 #include <emulation/ndis/hal_var.h>
75 #include <emulation/ndis/ndis_var.h>
76
77 #include <stdarg.h>
78
79 #ifdef NTOSKRNL_DEBUG_TIMERS
80 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
81
82 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW,
83 NULL, 0, sysctl_show_timers, "I",
84 "Show ntoskrnl timer stats");
85 #endif
86
87 struct kdpc_queue {
88 list_entry kq_disp;
89 struct thread *kq_td;
90 int kq_cpu;
91 int kq_exit;
92 int kq_running;
93 kspin_lock kq_lock;
94 nt_kevent kq_proc;
95 nt_kevent kq_done;
96 };
97
98 typedef struct kdpc_queue kdpc_queue;
99
100 struct wb_ext {
101 struct cv we_cv;
102 struct thread *we_td;
103 };
104
105 typedef struct wb_ext wb_ext;
106
107 #define NTOSKRNL_TIMEOUTS 256
108 #ifdef NTOSKRNL_DEBUG_TIMERS
109 static uint64_t ntoskrnl_timer_fires;
110 static uint64_t ntoskrnl_timer_sets;
111 static uint64_t ntoskrnl_timer_reloads;
112 static uint64_t ntoskrnl_timer_cancels;
113 #endif
114
115 struct callout_entry {
116 struct callout ce_callout;
117 list_entry ce_list;
118 };
119
120 typedef struct callout_entry callout_entry;
121
122 static struct list_entry ntoskrnl_calllist;
123 static struct mtx ntoskrnl_calllock;
124 struct kuser_shared_data kuser_shared_data;
125
126 static struct list_entry ntoskrnl_intlist;
127 static kspin_lock ntoskrnl_intlock;
128
129 static uint8_t RtlEqualUnicodeString(unicode_string *,
130 unicode_string *, uint8_t);
131 static void RtlCopyString(ansi_string *, const ansi_string *);
132 static void RtlCopyUnicodeString(unicode_string *,
133 unicode_string *);
134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
135 void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
137 device_object *, void *, uint32_t, uint64_t *, io_status_block *);
138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
139 device_object *, void *, uint32_t, void *, uint32_t,
140 uint8_t, nt_kevent *, io_status_block *);
141 static irp *IoAllocateIrp(uint8_t, uint8_t);
142 static void IoReuseIrp(irp *, uint32_t);
143 static void IoFreeIrp(irp *);
144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
146 static uint32_t KeWaitForMultipleObjects(uint32_t,
147 nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
148 int64_t *, wait_block *);
149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
153 static void ntoskrnl_insert_timer(ktimer *, int);
154 static void ntoskrnl_remove_timer(ktimer *);
155 #ifdef NTOSKRNL_DEBUG_TIMERS
156 static void ntoskrnl_show_timers(void);
157 #endif
158 static void ntoskrnl_timercall(void *);
159 static void ntoskrnl_dpc_thread(void *);
160 static void ntoskrnl_destroy_dpc_threads(void);
161 static void ntoskrnl_destroy_workitem_threads(void);
162 static void ntoskrnl_workitem_thread(void *);
163 static void ntoskrnl_workitem(device_object *, void *);
164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
173 static int64_t _allmul(int64_t, int64_t);
174 static int64_t _alldiv(int64_t, int64_t);
175 static int64_t _allrem(int64_t, int64_t);
176 static int64_t _allshr(int64_t, uint8_t);
177 static int64_t _allshl(int64_t, uint8_t);
178 static uint64_t _aullmul(uint64_t, uint64_t);
179 static uint64_t _aulldiv(uint64_t, uint64_t);
180 static uint64_t _aullrem(uint64_t, uint64_t);
181 static uint64_t _aullshr(uint64_t, uint8_t);
182 static uint64_t _aullshl(uint64_t, uint8_t);
183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
184 static void InitializeSListHead(slist_header *);
185 static slist_entry *ntoskrnl_popsl(slist_header *);
186 static void ExFreePoolWithTag(void *, uint32_t);
187 static void ExInitializePagedLookasideList(paged_lookaside_list *,
188 lookaside_alloc_func *, lookaside_free_func *,
189 uint32_t, size_t, uint32_t, uint16_t);
190 static void ExDeletePagedLookasideList(paged_lookaside_list *);
191 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
192 lookaside_alloc_func *, lookaside_free_func *,
193 uint32_t, size_t, uint32_t, uint16_t);
194 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
195 static slist_entry
196 *ExInterlockedPushEntrySList(slist_header *,
197 slist_entry *, kspin_lock *);
198 static slist_entry
199 *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
200 static uint32_t InterlockedIncrement(volatile uint32_t *);
201 static uint32_t InterlockedDecrement(volatile uint32_t *);
202 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
203 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
204 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
205 uint64_t, uint64_t, uint64_t, enum nt_caching_type);
206 static void MmFreeContiguousMemory(void *);
207 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
208 enum nt_caching_type);
209 static uint32_t MmSizeOfMdl(void *, size_t);
210 static void *MmMapLockedPages(mdl *, uint8_t);
211 static void *MmMapLockedPagesSpecifyCache(mdl *,
212 uint8_t, uint32_t, void *, uint32_t, uint32_t);
213 static void MmUnmapLockedPages(void *, mdl *);
214 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
215 static void RtlZeroMemory(void *, size_t);
216 static void RtlSecureZeroMemory(void *, size_t);
217 static void RtlFillMemory(void *, size_t, uint8_t);
218 static void RtlMoveMemory(void *, const void *, size_t);
219 static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *);
220 static void RtlCopyMemory(void *, const void *, size_t);
221 static size_t RtlCompareMemory(const void *, const void *, size_t);
222 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
223 uint32_t, uint32_t *);
224 static int atoi (const char *);
225 static long atol (const char *);
226 static int rand(void);
227 static void srand(unsigned int);
228 static void KeQuerySystemTime(uint64_t *);
229 static uint32_t KeTickCount(void);
230 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
231 static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t,
232 uint32_t, void **);
233 static void ntoskrnl_thrfunc(void *);
234 static ndis_status PsCreateSystemThread(ndis_handle *,
235 uint32_t, void *, ndis_handle, void *, void *, void *);
236 static ndis_status PsTerminateSystemThread(ndis_status);
237 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
238 uint32_t, void *, device_object *);
239 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
240 uint32_t, void *, uint32_t *);
241 static void KeInitializeMutex(kmutant *, uint32_t);
242 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
243 static uint32_t KeReadStateMutex(kmutant *);
244 static ndis_status ObReferenceObjectByHandle(ndis_handle,
245 uint32_t, void *, uint8_t, void **, void **);
246 static void ObfDereferenceObject(void *);
247 static uint32_t ZwClose(ndis_handle);
248 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
249 uint32_t, void *);
250 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
251 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
252 static void *ntoskrnl_memset(void *, int, size_t);
253 static void *ntoskrnl_memmove(void *, void *, size_t);
254 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
255 static char *ntoskrnl_strstr(char *, char *);
256 static char *ntoskrnl_strncat(char *, char *, size_t);
257 static int ntoskrnl_toupper(int);
258 static int ntoskrnl_tolower(int);
259 static funcptr ntoskrnl_findwrap(funcptr);
260 static uint32_t DbgPrint(char *, ...) __printflike(1, 2);
261 static void DbgBreakPoint(void);
262 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
263 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
264 static int32_t KeSetPriorityThread(struct thread *, int32_t);
265 static void dummy(void);
266
267 static struct lock ntoskrnl_dispatchlock;
268 static struct mtx ntoskrnl_interlock;
269 static kspin_lock ntoskrnl_cancellock;
270 static int ntoskrnl_kth = 0;
271 static struct nt_objref_head ntoskrnl_reflist;
272 static struct objcache *mdl_cache;
273 static struct objcache *iw_cache;
274 static struct kdpc_queue *kq_queues;
275 static struct kdpc_queue *wq_queues;
276 static int wq_idx = 0;
277
278 static struct objcache_malloc_args mdl_alloc_args = {
279 MDL_ZONE_SIZE, M_DEVBUF
280 };
281 static struct objcache_malloc_args iw_alloc_args = {
282 sizeof(io_workitem), M_DEVBUF
283 };
284
285 int
286 ntoskrnl_libinit(void)
287 {
288 image_patch_table *patch;
289 int error;
290 struct thread *p;
291 kdpc_queue *kq;
292 callout_entry *e;
293 int i;
294
295 lockinit(&ntoskrnl_dispatchlock, MTX_NDIS_LOCK, 0, LK_CANRECURSE);
296 mtx_init(&ntoskrnl_interlock);
297 KeInitializeSpinLock(&ntoskrnl_cancellock);
298 KeInitializeSpinLock(&ntoskrnl_intlock);
299 TAILQ_INIT(&ntoskrnl_reflist);
300
301 InitializeListHead(&ntoskrnl_calllist);
302 InitializeListHead(&ntoskrnl_intlist);
303 mtx_init(&ntoskrnl_calllock);
304
305 kq_queues = ExAllocatePoolWithTag(NonPagedPool,
306 #ifdef NTOSKRNL_MULTIPLE_DPCS
307 sizeof(kdpc_queue) * ncpus, 0);
308 #else
309 sizeof(kdpc_queue), 0);
310 #endif
311
312 if (kq_queues == NULL)
313 return (ENOMEM);
314
315 wq_queues = ExAllocatePoolWithTag(NonPagedPool,
316 sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
317
318 if (wq_queues == NULL)
319 return (ENOMEM);
320
321 #ifdef NTOSKRNL_MULTIPLE_DPCS
322 bzero((char *)kq_queues, sizeof(kdpc_queue) * ncpus);
323 #else
324 bzero((char *)kq_queues, sizeof(kdpc_queue));
325 #endif
326 bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
327
328 /*
329 * Launch the DPC threads.
330 */
331
332 #ifdef NTOSKRNL_MULTIPLE_DPCS
333 for (i = 0; i < ncpus; i++) {
334 #else
335 for (i = 0; i < 1; i++) {
336 #endif
337 kq = kq_queues + i;
338 kq->kq_cpu = i;
339 error = kthread_create_cpu(ntoskrnl_dpc_thread, kq, &p, i,
340 "Win DPC %d", i);
341 if (error)
342 panic("failed to launch DPC thread");
343 }
344
345 /*
346 * Launch the workitem threads.
347 */
348
349 for (i = 0; i < WORKITEM_THREADS; i++) {
350 kq = wq_queues + i;
351 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
352 "Win Workitem %d", i);
353 if (error)
354 panic("failed to launch workitem thread");
355 }
356
357 patch = ntoskrnl_functbl;
358 while (patch->ipt_func != NULL) {
359 windrv_wrap((funcptr)patch->ipt_func,
360 (funcptr *)&patch->ipt_wrap,
361 patch->ipt_argcnt, patch->ipt_ftype);
362 patch++;
363 }
364
365 for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
366 e = ExAllocatePoolWithTag(NonPagedPool,
367 sizeof(callout_entry), 0);
368 if (e == NULL)
369 panic("failed to allocate timeouts");
370 mtx_spinlock(&ntoskrnl_calllock);
371 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
372 mtx_spinunlock(&ntoskrnl_calllock);
373 }
374
375 /*
376 * MDLs are supposed to be variable size (they describe
377 * buffers containing some number of pages, but we don't
378 * know ahead of time how many pages that will be). But
379 * always allocating them off the heap is very slow. As
380 * a compromise, we create an MDL UMA zone big enough to
381 * handle any buffer requiring up to 16 pages, and we
382 * use those for any MDLs for buffers of 16 pages or less
383 * in size. For buffers larger than that (which we assume
384 * will be few and far between, we allocate the MDLs off
385 * the heap.
386 *
387 * CHANGED TO USING objcache(9) IN DRAGONFLY
388 */
389
390 mdl_cache = objcache_create("Windows MDL", 0, 0,
391 NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
392 &mdl_alloc_args);
393
394 iw_cache = objcache_create("Windows WorkItem", 0, 0,
395 NULL, NULL, NULL, objcache_malloc_alloc, objcache_malloc_free,
396 &iw_alloc_args);
397
398 return (0);
399 }
400
401 int
402 ntoskrnl_libfini(void)
403 {
404 image_patch_table *patch;
405 callout_entry *e;
406 list_entry *l;
407
408 patch = ntoskrnl_functbl;
409 while (patch->ipt_func != NULL) {
410 windrv_unwrap(patch->ipt_wrap);
411 patch++;
412 }
413
414 /* Stop the workitem queues. */
415 ntoskrnl_destroy_workitem_threads();
416 /* Stop the DPC queues. */
417 ntoskrnl_destroy_dpc_threads();
418
419 ExFreePool(kq_queues);
420 ExFreePool(wq_queues);
421
422 objcache_destroy(mdl_cache);
423 objcache_destroy(iw_cache);
424
425 mtx_spinlock(&ntoskrnl_calllock);
426 while(!IsListEmpty(&ntoskrnl_calllist)) {
427 l = RemoveHeadList(&ntoskrnl_calllist);
428 e = CONTAINING_RECORD(l, callout_entry, ce_list);
429 mtx_spinunlock(&ntoskrnl_calllock);
430 ExFreePool(e);
431 mtx_spinlock(&ntoskrnl_calllock);
432 }
433 mtx_spinunlock(&ntoskrnl_calllock);
434
435 lockuninit(&ntoskrnl_dispatchlock);
436 mtx_uninit(&ntoskrnl_interlock);
437 mtx_uninit(&ntoskrnl_calllock);
438
439 return (0);
440 }
441
442 /*
443 * We need to be able to reference this externally from the wrapper;
444 * GCC only generates a local implementation of memset.
445 */
446 static void *
447 ntoskrnl_memset(void *buf, int ch, size_t size)
448 {
449 return (memset(buf, ch, size));
450 }
451
452 static void *
453 ntoskrnl_memmove(void *dst, void *src, size_t size)
454 {
455 bcopy(src, dst, size);
456 return (dst);
457 }
458
459 static void *
460 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
461 {
462 if (len != 0) {
463 unsigned char *p = buf;
464
465 do {
466 if (*p++ == ch)
467 return (p - 1);
468 } while (--len != 0);
469 }
470 return (NULL);
471 }
472
473 static char *
474 ntoskrnl_strstr(char *s, char *find)
475 {
476 char c, sc;
477 size_t len;
478
479 if ((c = *find++) != 0) {
480 len = strlen(find);
481 do {
482 do {
483 if ((sc = *s++) == 0)
484 return (NULL);
485 } while (sc != c);
486 } while (strncmp(s, find, len) != 0);
487 s--;
488 }
489 return (s);
490 }
491
492 /* Taken from libc */
493 static char *
494 ntoskrnl_strncat(char *dst, char *src, size_t n)
495 {
496 if (n != 0) {
497 char *d = dst;
498 const char *s = src;
499
500 while (*d != 0)
501 d++;
502 do {
503 if ((*d = *s++) == 0)
504 break;
505 d++;
506 } while (--n != 0);
507 *d = 0;
508 }
509 return (dst);
510 }
511
512 static int
513 ntoskrnl_toupper(int c)
514 {
515 return (toupper(c));
516 }
517
518 static int
519 ntoskrnl_tolower(int c)
520 {
521 return (tolower(c));
522 }
523
524 static uint8_t
525 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
526 uint8_t caseinsensitive)
527 {
528 int i;
529
530 if (str1->us_len != str2->us_len)
531 return (FALSE);
532
533 for (i = 0; i < str1->us_len; i++) {
534 if (caseinsensitive == TRUE) {
535 if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
536 toupper((char)(str2->us_buf[i] & 0xFF)))
537 return (FALSE);
538 } else {
539 if (str1->us_buf[i] != str2->us_buf[i])
540 return (FALSE);
541 }
542 }
543
544 return (TRUE);
545 }
546
547 static void
548 RtlCopyString(ansi_string *dst, const ansi_string *src)
549 {
550 if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) {
551 dst->as_len = min(src->as_len, dst->as_maxlen);
552 memcpy(dst->as_buf, src->as_buf, dst->as_len);
553 if (dst->as_len < dst->as_maxlen)
554 dst->as_buf[dst->as_len] = 0;
555 } else
556 dst->as_len = 0;
557 }
558
559 static void
560 RtlCopyUnicodeString(unicode_string *dest, unicode_string *src)
561 {
562
563 if (dest->us_maxlen >= src->us_len)
564 dest->us_len = src->us_len;
565 else
566 dest->us_len = dest->us_maxlen;
567 memcpy(dest->us_buf, src->us_buf, dest->us_len);
568 }
569
570 static void
571 ntoskrnl_ascii_to_unicode(char *ascii, uint16_t *unicode, int len)
572 {
573 int i;
574 uint16_t *ustr;
575
576 ustr = unicode;
577 for (i = 0; i < len; i++) {
578 *ustr = (uint16_t)ascii[i];
579 ustr++;
580 }
581 }
582
583 static void
584 ntoskrnl_unicode_to_ascii(uint16_t *unicode, char *ascii, int len)
585 {
586 int i;
587 uint8_t *astr;
588
589 astr = ascii;
590 for (i = 0; i < len / 2; i++) {
591 *astr = (uint8_t)unicode[i];
592 astr++;
593 }
594 }
595
596 uint32_t
597 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
598 {
599 if (dest == NULL || src == NULL)
600 return (STATUS_INVALID_PARAMETER);
601
602 dest->as_len = src->us_len / 2;
603 if (dest->as_maxlen < dest->as_len)
604 dest->as_len = dest->as_maxlen;
605
606 if (allocate == TRUE) {
607 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
608 (src->us_len / 2) + 1, 0);
609 if (dest->as_buf == NULL)
610 return (STATUS_INSUFFICIENT_RESOURCES);
611 dest->as_len = dest->as_maxlen = src->us_len / 2;
612 } else {
613 dest->as_len = src->us_len / 2; /* XXX */
614 if (dest->as_maxlen < dest->as_len)
615 dest->as_len = dest->as_maxlen;
616 }
617
618 ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
619 dest->as_len * 2);
620
621 return (STATUS_SUCCESS);
622 }
623
624 uint32_t
625 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
626 uint8_t allocate)
627 {
628 if (dest == NULL || src == NULL)
629 return (STATUS_INVALID_PARAMETER);
630
631 if (allocate == TRUE) {
632 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
633 src->as_len * 2, 0);
634 if (dest->us_buf == NULL)
635 return (STATUS_INSUFFICIENT_RESOURCES);
636 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
637 } else {
638 dest->us_len = src->as_len * 2; /* XXX */
639 if (dest->us_maxlen < dest->us_len)
640 dest->us_len = dest->us_maxlen;
641 }
642
643 ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
644 dest->us_len / 2);
645
646 return (STATUS_SUCCESS);
647 }
648
649 void *
650 ExAllocatePoolWithTag(uint32_t pooltype, size_t len, uint32_t tag)
651 {
652 void *buf;
653
654 buf = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
655 if (buf == NULL)
656 return (NULL);
657
658 return (buf);
659 }
660
661 static void
662 ExFreePoolWithTag(void *buf, uint32_t tag)
663 {
664 ExFreePool(buf);
665 }
666
667 void
668 ExFreePool(void *buf)
669 {
670 kfree(buf, M_DEVBUF);
671 }
672
673 uint32_t
674 IoAllocateDriverObjectExtension(driver_object *drv, void *clid,
675 uint32_t extlen, void **ext)
676 {
677 custom_extension *ce;
678
679 ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
680 + extlen, 0);
681
682 if (ce == NULL)
683 return (STATUS_INSUFFICIENT_RESOURCES);
684
685 ce->ce_clid = clid;
686 InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
687
688 *ext = (void *)(ce + 1);
689
690 return (STATUS_SUCCESS);
691 }
692
693 void *
694 IoGetDriverObjectExtension(driver_object *drv, void *clid)
695 {
696 list_entry *e;
697 custom_extension *ce;
698
699 /*
700 * Sanity check. Our dummy bus drivers don't have
701 * any driver extentions.
702 */
703
704 if (drv->dro_driverext == NULL)
705 return (NULL);
706
707 e = drv->dro_driverext->dre_usrext.nle_flink;
708 while (e != &drv->dro_driverext->dre_usrext) {
709 ce = (custom_extension *)e;
710 if (ce->ce_clid == clid)
711 return ((void *)(ce + 1));
712 e = e->nle_flink;
713 }
714
715 return (NULL);
716 }
717
718
719 uint32_t
720 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
721 uint32_t devtype, uint32_t devchars, uint8_t exclusive,
722 device_object **newdev)
723 {
724 device_object *dev;
725
726 dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
727 if (dev == NULL)
728 return (STATUS_INSUFFICIENT_RESOURCES);
729
730 dev->do_type = devtype;
731 dev->do_drvobj = drv;
732 dev->do_currirp = NULL;
733 dev->do_flags = 0;
734
735 if (devextlen) {
736 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
737 devextlen, 0);
738
739 if (dev->do_devext == NULL) {
740 ExFreePool(dev);
741 return (STATUS_INSUFFICIENT_RESOURCES);
742 }
743
744 bzero(dev->do_devext, devextlen);
745 } else
746 dev->do_devext = NULL;
747
748 dev->do_size = sizeof(device_object) + devextlen;
749 dev->do_refcnt = 1;
750 dev->do_attacheddev = NULL;
751 dev->do_nextdev = NULL;
752 dev->do_devtype = devtype;
753 dev->do_stacksize = 1;
754 dev->do_alignreq = 1;
755 dev->do_characteristics = devchars;
756 dev->do_iotimer = NULL;
757 KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
758
759 /*
760 * Vpd is used for disk/tape devices,
761 * but we don't support those. (Yet.)
762 */
763 dev->do_vpb = NULL;
764
765 dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
766 sizeof(devobj_extension), 0);
767
768 if (dev->do_devobj_ext == NULL) {
769 if (dev->do_devext != NULL)
770 ExFreePool(dev->do_devext);
771 ExFreePool(dev);
772 return (STATUS_INSUFFICIENT_RESOURCES);
773 }
774
775 dev->do_devobj_ext->dve_type = 0;
776 dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
777 dev->do_devobj_ext->dve_devobj = dev;
778
779 /*
780 * Attach this device to the driver object's list
781 * of devices. Note: this is not the same as attaching
782 * the device to the device stack. The driver's AddDevice
783 * routine must explicitly call IoAddDeviceToDeviceStack()
784 * to do that.
785 */
786
787 if (drv->dro_devobj == NULL) {
788 drv->dro_devobj = dev;
789 dev->do_nextdev = NULL;
790 } else {
791 dev->do_nextdev = drv->dro_devobj;
792 drv->dro_devobj = dev;
793 }
794
795 *newdev = dev;
796
797 return (STATUS_SUCCESS);
798 }
799
800 void
801 IoDeleteDevice(device_object *dev)
802 {
803 device_object *prev;
804
805 if (dev == NULL)
806 return;
807
808 if (dev->do_devobj_ext != NULL)
809 ExFreePool(dev->do_devobj_ext);
810
811 if (dev->do_devext != NULL)
812 ExFreePool(dev->do_devext);
813
814 /* Unlink the device from the driver's device list. */
815
816 prev = dev->do_drvobj->dro_devobj;
817 if (prev == dev)
818 dev->do_drvobj->dro_devobj = dev->do_nextdev;
819 else {
820 while (prev->do_nextdev != dev)
821 prev = prev->do_nextdev;
822 prev->do_nextdev = dev->do_nextdev;
823 }
824
825 ExFreePool(dev);
826 }
827
828 device_object *
829 IoGetAttachedDevice(device_object *dev)
830 {
831 device_object *d;
832
833 if (dev == NULL)
834 return (NULL);
835
836 d = dev;
837
838 while (d->do_attacheddev != NULL)
839 d = d->do_attacheddev;
840
841 return (d);
842 }
843
844 static irp *
845 IoBuildSynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
846 uint32_t len, uint64_t *off, nt_kevent *event, io_status_block *status)
847 {
848 irp *ip;
849
850 ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
851 if (ip == NULL)
852 return (NULL);
853 ip->irp_usrevent = event;
854
855 return (ip);
856 }
857
858 static irp *
859 IoBuildAsynchronousFsdRequest(uint32_t func, device_object *dobj, void *buf,
860 uint32_t len, uint64_t *off, io_status_block *status)
861 {
862 irp *ip;
863 io_stack_location *sl;
864
865 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
866 if (ip == NULL)
867 return (NULL);
868
869 ip->irp_usriostat = status;
870 ip->irp_tail.irp_overlay.irp_thread = NULL;
871
872 sl = IoGetNextIrpStackLocation(ip);
873 sl->isl_major = func;
874 sl->isl_minor = 0;
875 sl->isl_flags = 0;
876 sl->isl_ctl = 0;
877 sl->isl_devobj = dobj;
878 sl->isl_fileobj = NULL;
879 sl->isl_completionfunc = NULL;
880
881 ip->irp_userbuf = buf;
882
883 if (dobj->do_flags & DO_BUFFERED_IO) {
884 ip->irp_assoc.irp_sysbuf =
885 ExAllocatePoolWithTag(NonPagedPool, len, 0);
886 if (ip->irp_assoc.irp_sysbuf == NULL) {
887 IoFreeIrp(ip);
888 return (NULL);
889 }
890 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
891 }
892
893 if (dobj->do_flags & DO_DIRECT_IO) {
894 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
895 if (ip->irp_mdl == NULL) {
896 if (ip->irp_assoc.irp_sysbuf != NULL)
897 ExFreePool(ip->irp_assoc.irp_sysbuf);
898 IoFreeIrp(ip);
899 return (NULL);
900 }
901 ip->irp_userbuf = NULL;
902 ip->irp_assoc.irp_sysbuf = NULL;
903 }
904
905 if (func == IRP_MJ_READ) {
906 sl->isl_parameters.isl_read.isl_len = len;
907 if (off != NULL)
908 sl->isl_parameters.isl_read.isl_byteoff = *off;
909 else
910 sl->isl_parameters.isl_read.isl_byteoff = 0;
911 }
912
913 if (func == IRP_MJ_WRITE) {
914 sl->isl_parameters.isl_write.isl_len = len;
915 if (off != NULL)
916 sl->isl_parameters.isl_write.isl_byteoff = *off;
917 else
918 sl->isl_parameters.isl_write.isl_byteoff = 0;
919 }
920
921 return (ip);
922 }
923
924 static irp *
925 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
926 uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
927 nt_kevent *event, io_status_block *status)
928 {
929 irp *ip;
930 io_stack_location *sl;
931 uint32_t buflen;
932
933 ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
934 if (ip == NULL)
935 return (NULL);
936 ip->irp_usrevent = event;
937 ip->irp_usriostat = status;
938 ip->irp_tail.irp_overlay.irp_thread = NULL;
939
940 sl = IoGetNextIrpStackLocation(ip);
941 sl->isl_major = isinternal == TRUE ?
942 IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
943 sl->isl_minor = 0;
944 sl->isl_flags = 0;
945 sl->isl_ctl = 0;
946 sl->isl_devobj = dobj;
947 sl->isl_fileobj = NULL;
948 sl->isl_completionfunc = NULL;
949 sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
950 sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
951 sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
952
953 switch(IO_METHOD(iocode)) {
954 case METHOD_BUFFERED:
955 if (ilen > olen)
956 buflen = ilen;
957 else
958 buflen = olen;
959 if (buflen) {
960 ip->irp_assoc.irp_sysbuf =
961 ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
962 if (ip->irp_assoc.irp_sysbuf == NULL) {
963 IoFreeIrp(ip);
964 return (NULL);
965 }
966 }
967 if (ilen && ibuf != NULL) {
968 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
969 bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
970 buflen - ilen);
971 } else
972 bzero(ip->irp_assoc.irp_sysbuf, ilen);
973 ip->irp_userbuf = obuf;
974 break;
975 case METHOD_IN_DIRECT:
976 case METHOD_OUT_DIRECT:
977 if (ilen && ibuf != NULL) {
978 ip->irp_assoc.irp_sysbuf =
979 ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
980 if (ip->irp_assoc.irp_sysbuf == NULL) {
981 IoFreeIrp(ip);
982 return (NULL);
983 }
984 bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
985 }
986 if (olen && obuf != NULL) {
987 ip->irp_mdl = IoAllocateMdl(obuf, olen,
988 FALSE, FALSE, ip);
989 /*
990 * Normally we would MmProbeAndLockPages()
991 * here, but we don't have to in our
992 * imlementation.
993 */
994 }
995 break;
996 case METHOD_NEITHER:
997 ip->irp_userbuf = obuf;
998 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
999 break;
1000 default:
1001 break;
1002 }
1003
1004 /*
1005 * Ideally, we should associate this IRP with the calling
1006 * thread here.
1007 */
1008
1009 return (ip);
1010 }
1011
1012 static irp *
1013 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
1014 {
1015 irp *i;
1016
1017 i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
1018 if (i == NULL)
1019 return (NULL);
1020
1021 IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
1022
1023 return (i);
1024 }
1025
1026 static irp *
1027 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
1028 {
1029 irp *associrp;
1030
1031 associrp = IoAllocateIrp(stsize, FALSE);
1032 if (associrp == NULL)
1033 return (NULL);
1034
1035 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1036 associrp->irp_flags |= IRP_ASSOCIATED_IRP;
1037 associrp->irp_tail.irp_overlay.irp_thread =
1038 ip->irp_tail.irp_overlay.irp_thread;
1039 associrp->irp_assoc.irp_master = ip;
1040 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1041
1042 return (associrp);
1043 }
1044
1045 static void
1046 IoFreeIrp(irp *ip)
1047 {
1048 ExFreePool(ip);
1049 }
1050
1051 static void
1052 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
1053 {
1054 bzero((char *)io, IoSizeOfIrp(ssize));
1055 io->irp_size = psize;
1056 io->irp_stackcnt = ssize;
1057 io->irp_currentstackloc = ssize;
1058 InitializeListHead(&io->irp_thlist);
1059 io->irp_tail.irp_overlay.irp_csl =
1060 (io_stack_location *)(io + 1) + ssize;
1061 }
1062
1063 static void
1064 IoReuseIrp(irp *ip, uint32_t status)
1065 {
1066 uint8_t allocflags;
1067
1068 allocflags = ip->irp_allocflags;
1069 IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
1070 ip->irp_iostat.isb_status = status;
1071 ip->irp_allocflags = allocflags;
1072 }
1073
1074 void
1075 IoAcquireCancelSpinLock(uint8_t *irql)
1076 {
1077 KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
1078 }
1079
1080 void
1081 IoReleaseCancelSpinLock(uint8_t irql)
1082 {
1083 KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
1084 }
1085
1086 uint8_t
1087 IoCancelIrp(irp *ip)
1088 {
1089 cancel_func cfunc;
1090 uint8_t cancelirql;
1091
1092 IoAcquireCancelSpinLock(&cancelirql);
1093 cfunc = IoSetCancelRoutine(ip, NULL);
1094 ip->irp_cancel = TRUE;
1095 if (cfunc == NULL) {
1096 IoReleaseCancelSpinLock(cancelirql);
1097 return (FALSE);
1098 }
1099 ip->irp_cancelirql = cancelirql;
1100 MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
1101 return (uint8_t)IoSetCancelValue(ip, TRUE);
1102 }
1103
1104 uint32_t
1105 IofCallDriver(device_object *dobj, irp *ip)
1106 {
1107 driver_object *drvobj;
1108 io_stack_location *sl;
1109 uint32_t status;
1110 driver_dispatch disp;
1111
1112 drvobj = dobj->do_drvobj;
1113
1114 if (ip->irp_currentstackloc <= 0)
1115 panic("IoCallDriver(): out of stack locations");
1116
1117 IoSetNextIrpStackLocation(ip);
1118 sl = IoGetCurrentIrpStackLocation(ip);
1119
1120 sl->isl_devobj = dobj;
1121
1122 disp = drvobj->dro_dispatch[sl->isl_major];
1123 status = MSCALL2(disp, dobj, ip);
1124
1125 return (status);
1126 }
1127
1128 void
1129 IofCompleteRequest(irp *ip, uint8_t prioboost)
1130 {
1131 uint32_t status;
1132 device_object *dobj;
1133 io_stack_location *sl;
1134 completion_func cf;
1135
1136 KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
1137 ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
1138
1139 sl = IoGetCurrentIrpStackLocation(ip);
1140 IoSkipCurrentIrpStackLocation(ip);
1141
1142 do {
1143 if (sl->isl_ctl & SL_PENDING_RETURNED)
1144 ip->irp_pendingreturned = TRUE;
1145
1146 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
1147 dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
1148 else
1149 dobj = NULL;
1150
1151 if (sl->isl_completionfunc != NULL &&
1152 ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
1153 sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
1154 (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
1155 sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
1156 (ip->irp_cancel == TRUE &&
1157 sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
1158 cf = sl->isl_completionfunc;
1159 status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
1160 if (status == STATUS_MORE_PROCESSING_REQUIRED)
1161 return;
1162 } else {
1163 if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
1164 (ip->irp_pendingreturned == TRUE))
1165 IoMarkIrpPending(ip);
1166 }
1167
1168 /* move to the next. */
1169 IoSkipCurrentIrpStackLocation(ip);
1170 sl++;
1171 } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
1172
1173 if (ip->irp_usriostat != NULL)
1174 *ip->irp_usriostat = ip->irp_iostat;
1175 if (ip->irp_usrevent != NULL)
1176 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
1177
1178 /* Handle any associated IRPs. */
1179
1180 if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
1181 uint32_t masterirpcnt;
1182 irp *masterirp;
1183 mdl *m;
1184
1185 masterirp = ip->irp_assoc.irp_master;
1186 masterirpcnt =
1187 InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
1188
1189 while ((m = ip->irp_mdl) != NULL) {
1190 ip->irp_mdl = m->mdl_next;
1191 IoFreeMdl(m);
1192 }
1193 IoFreeIrp(ip);
1194 if (masterirpcnt == 0)
1195 IoCompleteRequest(masterirp, IO_NO_INCREMENT);
1196 return;
1197 }
1198
1199 /* With any luck, these conditions will never arise. */
1200
1201 if (ip->irp_flags & IRP_PAGING_IO) {
1202 if (ip->irp_mdl != NULL)
1203 IoFreeMdl(ip->irp_mdl);
1204 IoFreeIrp(ip);
1205 }
1206 }
1207
1208 void
1209 ntoskrnl_intr(void *arg)
1210 {
1211 kinterrupt *iobj;
1212 uint8_t irql;
1213 uint8_t claimed;
1214 list_entry *l;
1215
1216 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1217 l = ntoskrnl_intlist.nle_flink;
1218 while (l != &ntoskrnl_intlist) {
1219 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
1220 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
1221 if (claimed == TRUE)
1222 break;
1223 l = l->nle_flink;
1224 }
1225 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1226 }
1227
1228 uint8_t
1229 KeAcquireInterruptSpinLock(kinterrupt *iobj)
1230 {
1231 uint8_t irql;
1232 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1233 return (irql);
1234 }
1235
1236 void
1237 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
1238 {
1239 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1240 }
1241
1242 uint8_t
1243 KeSynchronizeExecution(kinterrupt *iobj, void *syncfunc, void *syncctx)
1244 {
1245 uint8_t irql;
1246
1247 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1248 MSCALL1(syncfunc, syncctx);
1249 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1250
1251 return (TRUE);
1252 }
1253
1254 /*
1255 * IoConnectInterrupt() is passed only the interrupt vector and
1256 * irql that a device wants to use, but no device-specific tag
1257 * of any kind. This conflicts rather badly with FreeBSD's
1258 * bus_setup_intr(), which needs the device_t for the device
1259 * requesting interrupt delivery. In order to bypass this
1260 * inconsistency, we implement a second level of interrupt
1261 * dispatching on top of bus_setup_intr(). All devices use
1262 * ntoskrnl_intr() as their ISR, and any device requesting
1263 * interrupts will be registered with ntoskrnl_intr()'s interrupt
1264 * dispatch list. When an interrupt arrives, we walk the list
1265 * and invoke all the registered ISRs. This effectively makes all
1266 * interrupts shared, but it's the only way to duplicate the
1267 * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
1268 */
1269
1270 uint32_t
1271 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
1272 kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
1273 uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
1274 {
1275 uint8_t curirql;
1276
1277 *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
1278 if (*iobj == NULL)
1279 return (STATUS_INSUFFICIENT_RESOURCES);
1280
1281 (*iobj)->ki_svcfunc = svcfunc;
1282 (*iobj)->ki_svcctx = svcctx;
1283
1284 if (lock == NULL) {
1285 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
1286 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
1287 } else
1288 (*iobj)->ki_lock = lock;
1289
1290 KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
1291 InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
1292 KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
1293
1294 return (STATUS_SUCCESS);
1295 }
1296
1297 void
1298 IoDisconnectInterrupt(kinterrupt *iobj)
1299 {
1300 uint8_t irql;
1301
1302 if (iobj == NULL)
1303 return;
1304
1305 KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
1306 RemoveEntryList((&iobj->ki_list));
1307 KeReleaseSpinLock(&ntoskrnl_intlock, irql);
1308
1309 ExFreePool(iobj);
1310 }
1311
1312 device_object *
1313 IoAttachDeviceToDeviceStack(device_object *src, device_object *dst)
1314 {
1315 device_object *attached;
1316
1317 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1318 attached = IoGetAttachedDevice(dst);
1319 attached->do_attacheddev = src;
1320 src->do_attacheddev = NULL;
1321 src->do_stacksize = attached->do_stacksize + 1;
1322 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1323
1324 return (attached);
1325 }
1326
1327 void
1328 IoDetachDevice(device_object *topdev)
1329 {
1330 device_object *tail;
1331
1332 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1333
1334 /* First, break the chain. */
1335 tail = topdev->do_attacheddev;
1336 if (tail == NULL) {
1337 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1338 return;
1339 }
1340 topdev->do_attacheddev = tail->do_attacheddev;
1341 topdev->do_refcnt--;
1342
1343 /* Now reduce the stacksize count for the takm_il objects. */
1344
1345 tail = topdev->do_attacheddev;
1346 while (tail != NULL) {
1347 tail->do_stacksize--;
1348 tail = tail->do_attacheddev;
1349 }
1350
1351 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1352 }
1353
1354 /*
1355 * For the most part, an object is considered signalled if
1356 * dh_sigstate == TRUE. The exception is for mutant objects
1357 * (mutexes), where the logic works like this:
1358 *
1359 * - If the thread already owns the object and sigstate is
1360 * less than or equal to 0, then the object is considered
1361 * signalled (recursive acquisition).
1362 * - If dh_sigstate == 1, the object is also considered
1363 * signalled.
1364 */
1365
1366 static int
1367 ntoskrnl_is_signalled(nt_dispatch_header *obj, struct thread *td)
1368 {
1369 kmutant *km;
1370
1371 if (obj->dh_type == DISP_TYPE_MUTANT) {
1372 km = (kmutant *)obj;
1373 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
1374 obj->dh_sigstate == 1)
1375 return (TRUE);
1376 return (FALSE);
1377 }
1378
1379 if (obj->dh_sigstate > 0)
1380 return (TRUE);
1381 return (FALSE);
1382 }
1383
1384 static void
1385 ntoskrnl_satisfy_wait(nt_dispatch_header *obj, struct thread *td)
1386 {
1387 kmutant *km;
1388
1389 switch (obj->dh_type) {
1390 case DISP_TYPE_MUTANT:
1391 km = (struct kmutant *)obj;
1392 obj->dh_sigstate--;
1393 /*
1394 * If sigstate reaches 0, the mutex is now
1395 * non-signalled (the new thread owns it).
1396 */
1397 if (obj->dh_sigstate == 0) {
1398 km->km_ownerthread = td;
1399 if (km->km_abandoned == TRUE)
1400 km->km_abandoned = FALSE;
1401 }
1402 break;
1403 /* Synchronization objects get reset to unsignalled. */
1404 case DISP_TYPE_SYNCHRONIZATION_EVENT:
1405 case DISP_TYPE_SYNCHRONIZATION_TIMER:
1406 obj->dh_sigstate = 0;
1407 break;
1408 case DISP_TYPE_SEMAPHORE:
1409 obj->dh_sigstate--;
1410 break;
1411 default:
1412 break;
1413 }
1414 }
1415
1416 static void
1417 ntoskrnl_satisfy_multiple_waits(wait_block *wb)
1418 {
1419 wait_block *cur;
1420 struct thread *td;
1421
1422 cur = wb;
1423 td = wb->wb_kthread;
1424
1425 do {
1426 ntoskrnl_satisfy_wait(wb->wb_object, td);
1427 cur->wb_awakened = TRUE;
1428 cur = cur->wb_next;
1429 } while (cur != wb);
1430 }
1431
1432 /* Always called with dispatcher lock held. */
1433 static void
1434 ntoskrnl_waittest(nt_dispatch_header *obj, uint32_t increment)
1435 {
1436 wait_block *w, *next;
1437 list_entry *e;
1438 struct thread *td;
1439 wb_ext *we;
1440 int satisfied;
1441
1442 /*
1443 * Once an object has been signalled, we walk its list of
1444 * wait blocks. If a wait block can be awakened, then satisfy
1445 * waits as necessary and wake the thread.
1446 *
1447 * The rules work like this:
1448 *
1449 * If a wait block is marked as WAITTYPE_ANY, then
1450 * we can satisfy the wait conditions on the current
1451 * object and wake the thread right away. Satisfying
1452 * the wait also has the effect of breaking us out
1453 * of the search loop.
1454 *
1455 * If the object is marked as WAITTYLE_ALL, then the
1456 * wait block will be part of a circularly linked
1457 * list of wait blocks belonging to a waiting thread
1458 * that's sleeping in KeWaitForMultipleObjects(). In
1459 * order to wake the thread, all the objects in the
1460 * wait list must be in the signalled state. If they
1461 * are, we then satisfy all of them and wake the
1462 * thread.
1463 *
1464 */
1465
1466 e = obj->dh_waitlisthead.nle_flink;
1467
1468 while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
1469 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
1470 we = w->wb_ext;
1471 td = we->we_td;
1472 satisfied = FALSE;
1473 if (w->wb_waittype == WAITTYPE_ANY) {
1474 /*
1475 * Thread can be awakened if
1476 * any wait is satisfied.
1477 */
1478 ntoskrnl_satisfy_wait(obj, td);
1479 satisfied = TRUE;
1480 w->wb_awakened = TRUE;
1481 } else {
1482 /*
1483 * Thread can only be woken up
1484 * if all waits are satisfied.
1485 * If the thread is waiting on multiple
1486 * objects, they should all be linked
1487 * through the wb_next pointers in the
1488 * wait blocks.
1489 */
1490 satisfied = TRUE;
1491 next = w->wb_next;
1492 while (next != w) {
1493 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
1494 satisfied = FALSE;
1495 break;
1496 }
1497 next = next->wb_next;
1498 }
1499 ntoskrnl_satisfy_multiple_waits(w);
1500 }
1501
1502 if (satisfied == TRUE)
1503 cv_broadcastpri(&we->we_cv,
1504 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
1505 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
1506
1507 e = e->nle_flink;
1508 }
1509 }
1510
1511 /*
1512 * Return the number of 100 nanosecond intervals since
1513 * January 1, 1601. (?!?!)
1514 */
1515 void
1516 ntoskrnl_time(uint64_t *tval)
1517 {
1518 struct timespec ts;
1519
1520 nanotime(&ts);
1521 *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
1522 11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
1523 }
1524
1525 static void
1526 KeQuerySystemTime(uint64_t *current_time)
1527 {
1528 ntoskrnl_time(current_time);
1529 }
1530
1531 static uint32_t
1532 KeTickCount(void)
1533 {
1534 struct timeval tv;
1535 getmicrouptime(&tv);
1536 return tvtohz_high(&tv);
1537 }
1538
1539
1540 /*
1541 * KeWaitForSingleObject() is a tricky beast, because it can be used
1542 * with several different object types: semaphores, timers, events,
1543 * mutexes and threads. Semaphores don't appear very often, but the
1544 * other object types are quite common. KeWaitForSingleObject() is
1545 * what's normally used to acquire a mutex, and it can be used to
1546 * wait for a thread termination.
1547 *
1548 * The Windows NDIS API is implemented in terms of Windows kernel
1549 * primitives, and some of the object manipulation is duplicated in
1550 * NDIS. For example, NDIS has timers and events, which are actually
1551 * Windows kevents and ktimers. Now, you're supposed to only use the
1552 * NDIS variants of these objects within the confines of the NDIS API,
1553 * but there are some naughty developers out there who will use
1554 * KeWaitForSingleObject() on NDIS timer and event objects, so we
1555 * have to support that as well. Conseqently, our NDIS timer and event
1556 * code has to be closely tied into our ntoskrnl timer and event code,
1557 * just as it is in Windows.
1558 *
1559 * KeWaitForSingleObject() may do different things for different kinds
1560 * of objects:
1561 *
1562 * - For events, we check if the event has been signalled. If the
1563 * event is already in the signalled state, we just return immediately,
1564 * otherwise we wait for it to be set to the signalled state by someone
1565 * else calling KeSetEvent(). Events can be either synchronization or
1566 * notification events.
1567 *
1568 * - For timers, if the timer has already fired and the timer is in
1569 * the signalled state, we just return, otherwise we wait on the
1570 * timer. Unlike an event, timers get signalled automatically when
1571 * they expire rather than someone having to trip them manually.
1572 * Timers initialized with KeInitializeTimer() are always notification
1573 * events: KeInitializeTimerEx() lets you initialize a timer as
1574 * either a notification or synchronization event.
1575 *
1576 * - For mutexes, we try to acquire the mutex and if we can't, we wait
1577 * on the mutex until it's available and then grab it. When a mutex is
1578 * released, it enters the signalled state, which wakes up one of the
1579 * threads waiting to acquire it. Mutexes are always synchronization
1580 * events.
1581 *
1582 * - For threads, the only thing we do is wait until the thread object
1583 * enters a signalled state, which occurs when the thread terminates.
1584 * Threads are always notification events.
1585 *
1586 * A notification event wakes up all threads waiting on an object. A
1587 * synchronization event wakes up just one. Also, a synchronization event
1588 * is auto-clearing, which means we automatically set the event back to
1589 * the non-signalled state once the wakeup is done.
1590 */
1591
1592 uint32_t
1593 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
1594 uint8_t alertable, int64_t *duetime)
1595 {
1596 wait_block w;
1597 struct thread *td = curthread;
1598 struct timeval tv;
1599 int error = 0;
1600 uint64_t curtime;
1601 wb_ext we;
1602 nt_dispatch_header *obj;
1603
1604 obj = arg;
1605
1606 if (obj == NULL)
1607 return (STATUS_INVALID_PARAMETER);
1608
1609 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1610
1611 cv_init(&we.we_cv, "KeWFS");
1612 we.we_td = td;
1613
1614 /*
1615 * Check to see if this object is already signalled,
1616 * and just return without waiting if it is.
1617 */
1618 if (ntoskrnl_is_signalled(obj, td) == TRUE) {
1619 /* Sanity check the signal state value. */
1620 if (obj->dh_sigstate != INT32_MIN) {
1621 ntoskrnl_satisfy_wait(obj, curthread);
1622 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1623 return (STATUS_SUCCESS);
1624 } else {
1625 /*
1626 * There's a limit to how many times we can
1627 * recursively acquire a mutant. If we hit
1628 * the limit, something is very wrong.
1629 */
1630 if (obj->dh_type == DISP_TYPE_MUTANT) {
1631 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1632 panic("mutant limit exceeded");
1633 }
1634 }
1635 }
1636
1637 bzero((char *)&w, sizeof(wait_block));
1638 w.wb_object = obj;
1639 w.wb_ext = &we;
1640 w.wb_waittype = WAITTYPE_ANY;
1641 w.wb_next = &w;
1642 w.wb_waitkey = 0;
1643 w.wb_awakened = FALSE;
1644 w.wb_oldpri = td->td_pri;
1645
1646 InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
1647
1648 /*
1649 * The timeout value is specified in 100 nanosecond units
1650 * and can be a positive or negative number. If it's positive,
1651 * then the duetime is absolute, and we need to convert it
1652 * to an absolute offset relative to now in order to use it.
1653 * If it's negative, then the duetime is relative and we
1654 * just have to convert the units.
1655 */
1656
1657 if (duetime != NULL) {
1658 if (*duetime < 0) {
1659 tv.tv_sec = - (*duetime) / 10000000;
1660 tv.tv_usec = (- (*duetime) / 10) -
1661 (tv.tv_sec * 1000000);
1662 } else {
1663 ntoskrnl_time(&curtime);
1664 if (*duetime < curtime)
1665 tv.tv_sec = tv.tv_usec = 0;
1666 else {
1667 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1668 tv.tv_usec = ((*duetime) - curtime) / 10 -
1669 (tv.tv_sec * 1000000);
1670 }
1671 }
1672 }
1673
1674 if (duetime == NULL)
1675 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1676 else
1677 error = cv_timedwait(&we.we_cv,
1678 &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1679
1680 RemoveEntryList(&w.wb_waitlist);
1681
1682 cv_destroy(&we.we_cv);
1683
1684 /* We timed out. Leave the object alone and return status. */
1685
1686 if (error == EWOULDBLOCK) {
1687 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1688 return (STATUS_TIMEOUT);
1689 }
1690
1691 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1692
1693 return (STATUS_SUCCESS);
1694 /*
1695 return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
1696 mode, alertable, duetime, &w));
1697 */
1698 }
1699
1700 static uint32_t
1701 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
1702 uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
1703 wait_block *wb_array)
1704 {
1705 struct thread *td = curthread;
1706 wait_block *whead, *w;
1707 wait_block _wb_array[MAX_WAIT_OBJECTS];
1708 nt_dispatch_header *cur;
1709 struct timeval tv;
1710 int i, wcnt = 0, error = 0;
1711 uint64_t curtime;
1712 struct timespec t1, t2;
1713 uint32_t status = STATUS_SUCCESS;
1714 wb_ext we;
1715
1716 if (cnt > MAX_WAIT_OBJECTS)
1717 return (STATUS_INVALID_PARAMETER);
1718 if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
1719 return (STATUS_INVALID_PARAMETER);
1720
1721 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
1722
1723 cv_init(&we.we_cv, "KeWFM");
1724 we.we_td = td;
1725
1726 if (wb_array == NULL)
1727 whead = _wb_array;
1728 else
1729 whead = wb_array;
1730
1731 bzero((char *)whead, sizeof(wait_block) * cnt);
1732
1733 /* First pass: see if we can satisfy any waits immediately. */
1734
1735 wcnt = 0;
1736 w = whead;
1737
1738 for (i = 0; i < cnt; i++) {
1739 InsertTailList((&obj[i]->dh_waitlisthead),
1740 (&w->wb_waitlist));
1741 w->wb_ext = &we;
1742 w->wb_object = obj[i];
1743 w->wb_waittype = wtype;
1744 w->wb_waitkey = i;
1745 w->wb_awakened = FALSE;
1746 w->wb_oldpri = td->td_pri;
1747 w->wb_next = w + 1;
1748 w++;
1749 wcnt++;
1750 if (ntoskrnl_is_signalled(obj[i], td)) {
1751 /*
1752 * There's a limit to how many times
1753 * we can recursively acquire a mutant.
1754 * If we hit the limit, something
1755 * is very wrong.
1756 */
1757 if (obj[i]->dh_sigstate == INT32_MIN &&
1758 obj[i]->dh_type == DISP_TYPE_MUTANT) {
1759 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1760 panic("mutant limit exceeded");
1761 }
1762
1763 /*
1764 * If this is a WAITTYPE_ANY wait, then
1765 * satisfy the waited object and exit
1766 * right now.
1767 */
1768
1769 if (wtype == WAITTYPE_ANY) {
1770 ntoskrnl_satisfy_wait(obj[i], td);
1771 status = STATUS_WAIT_0 + i;
1772 goto wait_done;
1773 } else {
1774 w--;
1775 wcnt--;
1776 w->wb_object = NULL;
1777 RemoveEntryList(&w->wb_waitlist);
1778 }
1779 }
1780 }
1781
1782 /*
1783 * If this is a WAITTYPE_ALL wait and all objects are
1784 * already signalled, satisfy the waits and exit now.
1785 */
1786
1787 if (wtype == WAITTYPE_ALL && wcnt == 0) {
1788 for (i = 0; i < cnt; i++)
1789 ntoskrnl_satisfy_wait(obj[i], td);
1790 status = STATUS_SUCCESS;
1791 goto wait_done;
1792 }
1793
1794 /*
1795 * Create a circular waitblock list. The waitcount
1796 * must always be non-zero when we get here.
1797 */
1798
1799 (w - 1)->wb_next = whead;
1800
1801 /* Wait on any objects that aren't yet signalled. */
1802
1803 /* Calculate timeout, if any. */
1804
1805 if (duetime != NULL) {
1806 if (*duetime < 0) {
1807 tv.tv_sec = - (*duetime) / 10000000;
1808 tv.tv_usec = (- (*duetime) / 10) -
1809 (tv.tv_sec * 1000000);
1810 } else {
1811 ntoskrnl_time(&curtime);
1812 if (*duetime < curtime)
1813 tv.tv_sec = tv.tv_usec = 0;
1814 else {
1815 tv.tv_sec = ((*duetime) - curtime) / 10000000;
1816 tv.tv_usec = ((*duetime) - curtime) / 10 -
1817 (tv.tv_sec * 1000000);
1818 }
1819 }
1820 }
1821
1822 while (wcnt) {
1823 nanotime(&t1);
1824
1825 if (duetime == NULL)
1826 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
1827 else
1828 error = cv_timedwait(&we.we_cv,
1829 &ntoskrnl_dispatchlock, tvtohz_high(&tv));
1830
1831 /* Wait with timeout expired. */
1832
1833 if (error) {
1834 status = STATUS_TIMEOUT;
1835 goto wait_done;
1836 }
1837
1838 nanotime(&t2);
1839
1840 /* See what's been signalled. */
1841
1842 w = whead;
1843 do {
1844 cur = w->wb_object;
1845 if (ntoskrnl_is_signalled(cur, td) == TRUE ||
1846 w->wb_awakened == TRUE) {
1847 /* Sanity check the signal state value. */
1848 if (cur->dh_sigstate == INT32_MIN &&
1849 cur->dh_type == DISP_TYPE_MUTANT) {
1850 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1851 panic("mutant limit exceeded");
1852 }
1853 wcnt--;
1854 if (wtype == WAITTYPE_ANY) {
1855 status = w->wb_waitkey &
1856 STATUS_WAIT_0;
1857 goto wait_done;
1858 }
1859 }
1860 w = w->wb_next;
1861 } while (w != whead);
1862
1863 /*
1864 * If all objects have been signalled, or if this
1865 * is a WAITTYPE_ANY wait and we were woke up by
1866 * someone, we can bail.
1867 */
1868
1869 if (wcnt == 0) {
1870 status = STATUS_SUCCESS;
1871 goto wait_done;
1872 }
1873
1874 /*
1875 * If this is WAITTYPE_ALL wait, and there's still
1876 * objects that haven't been signalled, deduct the
1877 * time that's elapsed so far from the timeout and
1878 * wait again (or continue waiting indefinitely if
1879 * there's no timeout).
1880 */
1881
1882 if (duetime != NULL) {
1883 tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
1884 tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
1885 }
1886 }
1887
1888
1889 wait_done:
1890
1891 cv_destroy(&we.we_cv);
1892
1893 for (i = 0; i < cnt; i++) {
1894 if (whead[i].wb_object != NULL)
1895 RemoveEntryList(&whead[i].wb_waitlist);
1896
1897 }
1898 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
1899
1900 return (status);
1901 }
1902
1903 static void
1904 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
1905 {
1906 bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1907 }
1908
1909 static uint16_t
1910 READ_REGISTER_USHORT(uint16_t *reg)
1911 {
1912 return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1913 }
1914
1915 static void
1916 WRITE_REGISTER_ULONG(uint32_t *reg, uint32_t val)
1917 {
1918 bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1919 }
1920
1921 static uint32_t
1922 READ_REGISTER_ULONG(uint32_t *reg)
1923 {
1924 return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1925 }
1926
1927 static uint8_t
1928 READ_REGISTER_UCHAR(uint8_t *reg)
1929 {
1930 return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
1931 }
1932
1933 static void
1934 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
1935 {
1936 bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
1937 }
1938
1939 static int64_t
1940 _allmul(int64_t a, int64_t b)
1941 {
1942 return (a * b);
1943 }
1944
1945 static int64_t
1946 _alldiv(int64_t a, int64_t b)
1947 {
1948 return (a / b);
1949 }
1950
1951 static int64_t
1952 _allrem(int64_t a, int64_t b)
1953 {
1954 return (a % b);
1955 }
1956
1957 static uint64_t
1958 _aullmul(uint64_t a, uint64_t b)
1959 {
1960 return (a * b);
1961 }
1962
1963 static uint64_t
1964 _aulldiv(uint64_t a, uint64_t b)
1965 {
1966 return (a / b);
1967 }
1968
1969 static uint64_t
1970 _aullrem(uint64_t a, uint64_t b)
1971 {
1972 return (a % b);
1973 }
1974
1975 static int64_t
1976 _allshl(int64_t a, uint8_t b)
1977 {
1978 return (a << b);
1979 }
1980
1981 static uint64_t
1982 _aullshl(uint64_t a, uint8_t b)
1983 {
1984 return (a << b);
1985 }
1986
1987 static int64_t
1988 _allshr(int64_t a, uint8_t b)
1989 {
1990 return (a >> b);
1991 }
1992
1993 static uint64_t
1994 _aullshr(uint64_t a, uint8_t b)
1995 {
1996 return (a >> b);
1997 }
1998
1999 static slist_entry *
2000 ntoskrnl_pushsl(slist_header *head, slist_entry *entry)
2001 {
2002 slist_entry *oldhead;
2003
2004 oldhead = head->slh_list.slh_next;
2005 entry->sl_next = head->slh_list.slh_next;
2006 head->slh_list.slh_next = entry;
2007 head->slh_list.slh_depth++;
2008 head->slh_list.slh_seq++;
2009
2010 return (oldhead);
2011 }
2012
2013 static void
2014 InitializeSListHead(slist_header *head)
2015 {
2016 memset(head, 0, sizeof(*head));
2017 }
2018
2019 static slist_entry *
2020 ntoskrnl_popsl(slist_header *head)
2021 {
2022 slist_entry *first;
2023
2024 first = head->slh_list.slh_next;
2025 if (first != NULL) {
2026 head->slh_list.slh_next = first->sl_next;
2027 head->slh_list.slh_depth--;
2028 head->slh_list.slh_seq++;
2029 }
2030
2031 return (first);
2032 }
2033
2034 /*
2035 * We need this to make lookaside lists work for amd64.
2036 * We pass a pointer to ExAllocatePoolWithTag() the lookaside
2037 * list structure. For amd64 to work right, this has to be a
2038 * pointer to the wrapped version of the routine, not the
2039 * original. Letting the Windows driver invoke the original
2040 * function directly will result in a convention calling
2041 * mismatch and a pretty crash. On x86, this effectively
2042 * becomes a no-op since ipt_func and ipt_wrap are the same.
2043 */
2044
2045 static funcptr
2046 ntoskrnl_findwrap(funcptr func)
2047 {
2048 image_patch_table *patch;
2049
2050 patch = ntoskrnl_functbl;
2051 while (patch->ipt_func != NULL) {
2052 if ((funcptr)patch->ipt_func == func)
2053 return ((funcptr)patch->ipt_wrap);
2054 patch++;
2055 }
2056
2057 return (NULL);
2058 }
2059
2060 static void
2061 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
2062 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2063 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2064 {
2065 bzero((char *)lookaside, sizeof(paged_lookaside_list));
2066
2067 if (size < sizeof(slist_entry))
2068 lookaside->nll_l.gl_size = sizeof(slist_entry);
2069 else
2070 lookaside->nll_l.gl_size = size;
2071 lookaside->nll_l.gl_tag = tag;
2072 if (allocfunc == NULL)
2073 lookaside->nll_l.gl_allocfunc =
2074 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2075 else
2076 lookaside->nll_l.gl_allocfunc = allocfunc;
2077
2078 if (freefunc == NULL)
2079 lookaside->nll_l.gl_freefunc =
2080 ntoskrnl_findwrap((funcptr)ExFreePool);
2081 else
2082 lookaside->nll_l.gl_freefunc = freefunc;
2083
2084 #ifdef __i386__
2085 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2086 #endif
2087
2088 lookaside->nll_l.gl_type = NonPagedPool;
2089 lookaside->nll_l.gl_depth = depth;
2090 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2091 }
2092
2093 static void
2094 ExDeletePagedLookasideList(paged_lookaside_list *lookaside)
2095 {
2096 void *buf;
2097 void (*freefunc)(void *);
2098
2099 freefunc = lookaside->nll_l.gl_freefunc;
2100 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2101 MSCALL1(freefunc, buf);
2102 }
2103
2104 static void
2105 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
2106 lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
2107 uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
2108 {
2109 bzero((char *)lookaside, sizeof(npaged_lookaside_list));
2110
2111 if (size < sizeof(slist_entry))
2112 lookaside->nll_l.gl_size = sizeof(slist_entry);
2113 else
2114 lookaside->nll_l.gl_size = size;
2115 lookaside->nll_l.gl_tag = tag;
2116 if (allocfunc == NULL)
2117 lookaside->nll_l.gl_allocfunc =
2118 ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
2119 else
2120 lookaside->nll_l.gl_allocfunc = allocfunc;
2121
2122 if (freefunc == NULL)
2123 lookaside->nll_l.gl_freefunc =
2124 ntoskrnl_findwrap((funcptr)ExFreePool);
2125 else
2126 lookaside->nll_l.gl_freefunc = freefunc;
2127
2128 #ifdef __i386__
2129 KeInitializeSpinLock(&lookaside->nll_obsoletelock);
2130 #endif
2131
2132 lookaside->nll_l.gl_type = NonPagedPool;
2133 lookaside->nll_l.gl_depth = depth;
2134 lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
2135 }
2136
2137 static void
2138 ExDeleteNPagedLookasideList(npaged_lookaside_list *lookaside)
2139 {
2140 void *buf;
2141 void (*freefunc)(void *);
2142
2143 freefunc = lookaside->nll_l.gl_freefunc;
2144 while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
2145 MSCALL1(freefunc, buf);
2146 }
2147
2148 slist_entry *
2149 InterlockedPushEntrySList(slist_header *head, slist_entry *entry)
2150 {
2151 slist_entry *oldhead;
2152
2153 mtx_spinlock(&ntoskrnl_interlock);
2154 oldhead = ntoskrnl_pushsl(head, entry);
2155 mtx_spinunlock(&ntoskrnl_interlock);
2156
2157 return (oldhead);
2158 }
2159
2160 slist_entry *
2161 InterlockedPopEntrySList(slist_header *head)
2162 {
2163 slist_entry *first;
2164
2165 mtx_spinlock(&ntoskrnl_interlock);
2166 first = ntoskrnl_popsl(head);
2167 mtx_spinunlock(&ntoskrnl_interlock);
2168
2169 return (first);
2170 }
2171
2172 static slist_entry *
2173 ExInterlockedPushEntrySList(slist_header *head, slist_entry *entry,
2174 kspin_lock *lock)
2175 {
2176 return (InterlockedPushEntrySList(head, entry));
2177 }
2178
2179 static slist_entry *
2180 ExInterlockedPopEntrySList(slist_header *head, kspin_lock *lock)
2181 {
2182 return (InterlockedPopEntrySList(head));
2183 }
2184
2185 uint16_t
2186 ExQueryDepthSList(slist_header *head)
2187 {
2188 uint16_t depth;
2189
2190 mtx_spinlock(&ntoskrnl_interlock);
2191 depth = head->slh_list.slh_depth;
2192 mtx_spinunlock(&ntoskrnl_interlock);
2193
2194 return (depth);
2195 }
2196
2197 void
2198 KeInitializeSpinLock(kspin_lock *lock)
2199 {
2200 *lock = 0;
2201 }
2202
2203 #ifdef __i386__
2204 void
2205 KefAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2206 {
2207 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2208 int i = 0;
2209 #endif
2210
2211 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
2212 /* sit and spin */;
2213 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
2214 i++;
2215 if (i > 200000000)
2216 panic("DEADLOCK!");
2217 #endif
2218 }
2219 }
2220
2221 void
2222 KefReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2223 {
2224 atomic_store_rel_int((volatile u_int *)lock, 0);
2225 }
2226
2227 uint8_t
2228 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
2229 {
2230 uint8_t oldirql;
2231
2232 if (KeGetCurrentIrql() > DISPATCH_LEVEL)
2233 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
2234
2235 KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
2236 KeAcquireSpinLockAtDpcLevel(lock);
2237
2238 return (oldirql);
2239 }
2240 #else
2241 void
2242 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
2243 {
2244 while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
2245 /* sit and spin */;
2246 }
2247
2248 void
2249 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
2250 {
2251 atomic_store_rel_int((volatile u_int *)lock, 0);
2252 }
2253 #endif /* __i386__ */
2254
2255 uintptr_t
2256 InterlockedExchange(volatile uint32_t *dst, uintptr_t val)
2257 {
2258 uintptr_t r;
2259
2260 mtx_spinlock(&ntoskrnl_interlock);
2261 r = *dst;
2262 *dst = val;
2263 mtx_spinunlock(&ntoskrnl_interlock);
2264
2265 return (r);
2266 }
2267
2268 static uint32_t
2269 InterlockedIncrement(volatile uint32_t *addend)
2270 {
2271 atomic_add_long((volatile u_long *)addend, 1);
2272 return (*addend);
2273 }
2274
2275 static uint32_t
2276 InterlockedDecrement(volatile uint32_t *addend)
2277 {
2278 atomic_subtract_long((volatile u_long *)addend, 1);
2279 return (*addend);
2280 }
2281
2282 static void
2283 ExInterlockedAddLargeStatistic(uint64_t *addend, uint32_t inc)
2284 {
2285 mtx_spinlock(&ntoskrnl_interlock);
2286 *addend += inc;
2287 mtx_spinunlock(&ntoskrnl_interlock);
2288 };
2289
2290 mdl *
2291 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
2292 uint8_t chargequota, irp *iopkt)
2293 {
2294 mdl *m;
2295 int zone = 0;
2296
2297 if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
2298 m = ExAllocatePoolWithTag(NonPagedPool,
2299 MmSizeOfMdl(vaddr, len), 0);
2300 else {
2301 m = objcache_get(mdl_cache, M_NOWAIT);
2302 bzero(m, sizeof(mdl));
2303 zone++;
2304 }
2305
2306 if (m == NULL)
2307 return (NULL);
2308
2309 MmInitializeMdl(m, vaddr, len);
2310
2311 /*
2312 * MmInitializMdl() clears the flags field, so we
2313 * have to set this here. If the MDL came from the
2314 * MDL UMA zone, tag it so we can release it to
2315 * the right place later.
2316 */
2317 if (zone)
2318 m->mdl_flags = MDL_ZONE_ALLOCED;
2319
2320 if (iopkt != NULL) {
2321 if (secondarybuf == TRUE) {
2322 mdl *last;
2323 last = iopkt->irp_mdl;
2324 while (last->mdl_next != NULL)
2325 last = last->mdl_next;
2326 last->mdl_next = m;
2327 } else {
2328 if (iopkt->irp_mdl != NULL)
2329 panic("leaking an MDL in IoAllocateMdl()");
2330 iopkt->irp_mdl = m;
2331 }
2332 }
2333
2334 return (m);
2335 }
2336
2337 void
2338 IoFreeMdl(mdl *m)
2339 {
2340 if (m == NULL)
2341 return;
2342
2343 if (m->mdl_flags & MDL_ZONE_ALLOCED)
2344 objcache_put(mdl_cache, m);
2345 else
2346 ExFreePool(m);
2347 }
2348
2349 static void *
2350 MmAllocateContiguousMemory(uint32_t size, uint64_t highest)
2351 {
2352 void *addr;
2353 size_t pagelength = roundup(size, PAGE_SIZE);
2354
2355 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2356
2357 return (addr);
2358 }
2359
2360 #if 0 /* XXX swildner */
2361 static void *
2362 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2363 uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2364 {
2365 vm_memattr_t memattr;
2366 void *ret;
2367
2368 switch (cachetype) {
2369 case MmNonCached:
2370 memattr = VM_MEMATTR_UNCACHEABLE;
2371 break;
2372 case MmWriteCombined:
2373 memattr = VM_MEMATTR_WRITE_COMBINING;
2374 break;
2375 case MmNonCachedUnordered:
2376 memattr = VM_MEMATTR_UNCACHEABLE;
2377 break;
2378 case MmCached:
2379 case MmHardwareCoherentCached:
2380 case MmUSWCCached:
2381 default:
2382 memattr = VM_MEMATTR_DEFAULT;
2383 break;
2384 }
2385
2386 ret = (void *)kmem_alloc_contig(kernel_map, size, M_ZERO | M_NOWAIT,
2387 lowest, highest, PAGE_SIZE, boundary, memattr);
2388 if (ret != NULL)
2389 malloc_type_allocated(M_DEVBUF, round_page(size));
2390 return (ret);
2391 }
2392 #else
2393 static void *
2394 MmAllocateContiguousMemorySpecifyCache(uint32_t size, uint64_t lowest,
2395 uint64_t highest, uint64_t boundary, enum nt_caching_type cachetype)
2396 {
2397 #if 0
2398 void *addr;
2399 size_t pagelength = roundup(size, PAGE_SIZE);
2400
2401 addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
2402
2403 return(addr);
2404 #else
2405 panic("%s", __func__);
2406 #endif
2407 }
2408 #endif
2409
2410 static void
2411 MmFreeContiguousMemory(void *base)
2412 {
2413 ExFreePool(base);
2414 }
2415
2416 static void
2417 MmFreeContiguousMemorySpecifyCache(void *base, uint32_t size,
2418 enum nt_caching_type cachetype)
2419 {
2420 contigfree(base, size, M_DEVBUF);
2421 }
2422
2423 static uint32_t
2424 MmSizeOfMdl(void *vaddr, size_t len)
2425 {
2426 uint32_t l;
2427
2428 l = sizeof(struct mdl) +
2429 (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
2430
2431 return (l);
2432 }
2433
2434 /*
2435 * The Microsoft documentation says this routine fills in the
2436 * page array of an MDL with the _physical_ page addresses that
2437 * comprise the buffer, but we don't really want to do that here.
2438 * Instead, we just fill in the page array with the kernel virtual
2439 * addresses of the buffers.
2440 */
2441 void
2442 MmBuildMdlForNonPagedPool(mdl *m)
2443 {
2444 vm_offset_t *mdl_pages;
2445 int pagecnt, i;
2446
2447 pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
2448
2449 if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
2450 panic("not enough pages in MDL to describe buffer");
2451
2452 mdl_pages = MmGetMdlPfnArray(m);
2453
2454 for (i = 0; i < pagecnt; i++)
2455 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
2456
2457 m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2458 m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
2459 }
2460
2461 static void *
2462 MmMapLockedPages(mdl *buf, uint8_t accessmode)
2463 {
2464 buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
2465 return (MmGetMdlVirtualAddress(buf));
2466 }
2467
2468 static void *
2469 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
2470 void *vaddr, uint32_t bugcheck, uint32_t prio)
2471 {
2472 return (MmMapLockedPages(buf, accessmode));
2473 }
2474
2475 static void
2476 MmUnmapLockedPages(void *vaddr, mdl *buf)
2477 {
2478 buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2479 }
2480
2481 /*
2482 * This function has a problem in that it will break if you
2483 * compile this module without PAE and try to use it on a PAE
2484 * kernel. Unfortunately, there's no way around this at the
2485 * moment. It's slightly less broken that using pmap_kextract().
2486 * You'd think the virtual memory subsystem would help us out
2487 * here, but it doesn't.
2488 */
2489
2490 static uint64_t
2491 MmGetPhysicalAddress(void *base)
2492 {
2493 return (pmap_extract(kernel_map.pmap, (vm_offset_t)base));
2494 }
2495
2496 void *
2497 MmGetSystemRoutineAddress(unicode_string *ustr)
2498 {
2499 ansi_string astr;
2500
2501 if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE))
2502 return (NULL);
2503 return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf));
2504 }
2505
2506 uint8_t
2507 MmIsAddressValid(void *vaddr)
2508 {
2509 if (pmap_extract(kernel_map.pmap, (vm_offset_t)vaddr))
2510 return (TRUE);
2511
2512 return (FALSE);
2513 }
2514
2515 void *
2516 MmMapIoSpace(uint64_t paddr, uint32_t len, uint32_t cachetype)
2517 {
2518 devclass_t nexus_class;
2519 device_t *nexus_devs, devp;
2520 int nexus_count = 0;
2521 device_t matching_dev = NULL;
2522 struct resource *res;
2523 int i;
2524 vm_offset_t v;
2525
2526 /* There will always be at least one nexus. */
2527
2528 nexus_class = devclass_find("nexus");
2529 devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
2530
2531 for (i = 0; i < nexus_count; i++) {
2532 devp = nexus_devs[i];
2533 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
2534 if (matching_dev)
2535 break;
2536 }
2537
2538 kfree(nexus_devs, M_TEMP);
2539
2540 if (matching_dev == NULL)
2541 return (NULL);
2542
2543 v = (vm_offset_t)rman_get_virtual(res);
2544 if (paddr > rman_get_start(res))
2545 v += paddr - rman_get_start(res);
2546
2547 return ((void *)v);
2548 }
2549
2550 void
2551 MmUnmapIoSpace(void *vaddr, size_t len)
2552 {
2553 }
2554
2555
2556 static device_t
2557 ntoskrnl_finddev(device_t dev, uint64_t paddr, struct resource **res)
2558 {
2559 device_t *children = NULL;
2560 device_t matching_dev;
2561 int childcnt;
2562 struct resource *r;
2563 struct resource_list *rl;
2564 struct resource_list_entry *rle;
2565 uint32_t flags;
2566 int i;
2567
2568 /* We only want devices that have been successfully probed. */
2569
2570 if (device_is_alive(dev) == FALSE)
2571 return (NULL);
2572
2573 rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
2574 if (rl != NULL) {
2575 SLIST_FOREACH(rle, rl, link) {
2576 r = rle->res;
2577
2578 if (r == NULL)
2579 continue;
2580
2581 flags = rman_get_flags(r);
2582
2583 if (rle->type == SYS_RES_MEMORY &&
2584 paddr >= rman_get_start(r) &&
2585 paddr <= rman_get_end(r)) {
2586 if (!(flags & RF_ACTIVE))
2587 bus_activate_resource(dev,
2588 SYS_RES_MEMORY, 0, r);
2589 *res = r;
2590 return (dev);
2591 }
2592 }
2593 }
2594
2595 /*
2596 * If this device has children, do another
2597 * level of recursion to inspect them.
2598 */
2599
2600 device_get_children(dev, &children, &childcnt);
2601
2602 for (i = 0; i < childcnt; i++) {
2603 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
2604 if (matching_dev != NULL) {
2605 kfree(children, M_TEMP);
2606 return (matching_dev);
2607 }
2608 }
2609
2610
2611 /* Won't somebody please think of the children! */
2612
2613 if (children != NULL)
2614 kfree(children, M_TEMP);
2615
2616 return (NULL);
2617 }
2618
2619 /*
2620 * Workitems are unlike DPCs, in that they run in a user-mode thread
2621 * context rather than at DISPATCH_LEVEL in kernel context. In our
2622 * case we run them in kernel context anyway.
2623 */
2624 static void
2625 ntoskrnl_workitem_thread(void *arg)
2626 {
2627 kdpc_queue *kq;
2628 list_entry *l;
2629 io_workitem *iw;
2630 uint8_t irql;
2631
2632 kq = arg;
2633
2634 InitializeListHead(&kq->kq_disp);
2635 kq->kq_td = curthread;
2636 kq->kq_exit = 0;
2637 KeInitializeSpinLock(&kq->kq_lock);
2638 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
2639
2640 while (1) {
2641 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
2642
2643 KeAcquireSpinLock(&kq->kq_lock, &irql);
2644
2645 if (kq->kq_exit) {
2646 kq->kq_exit = 0;
2647 KeReleaseSpinLock(&kq->kq_lock, irql);
2648 break;
2649 }
2650
2651 while (!IsListEmpty(&kq->kq_disp)) {
2652 l = RemoveHeadList(&kq->kq_disp);
2653 iw = CONTAINING_RECORD(l,
2654 io_workitem, iw_listentry);
2655 InitializeListHead((&iw->iw_listentry));
2656 if (iw->iw_func == NULL)
2657 continue;
2658 KeReleaseSpinLock(&kq->kq_lock, irql);
2659 MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
2660 KeAcquireSpinLock(&kq->kq_lock, &irql);
2661 }
2662
2663 KeReleaseSpinLock(&kq->kq_lock, irql);
2664 }
2665
2666 wakeup(curthread);
2667 kthread_exit();
2668 return; /* notreached */
2669 }
2670
2671 static ndis_status
2672 RtlCharToInteger(const char *src, uint32_t base, uint32_t *val)
2673 {
2674 int negative = 0;
2675 uint32_t res;
2676
2677 if (!src || !val)
2678 return (STATUS_ACCESS_VIOLATION);
2679 while (*src != '\0' && *src <= ' ')
2680 src++;
2681 if (*src == '+')
2682 src++;
2683 else if (*src == '-') {
2684 src++;
2685 negative = 1;
2686 }
2687 if (base == 0) {
2688 base = 10;
2689 if (*src == '') {
2690 src++;
2691 if (*src == 'b') {
2692 base = 2;
2693 src++;
2694 } else if (*src == 'o') {
2695 base = 8;
2696 src++;
2697 } else if (*src == 'x') {
2698 base = 16;
2699 src++;
2700 }
2701 }
2702 } else if (!(base == 2 || base == 8 || base == 10 || base == 16))
2703 return (STATUS_INVALID_PARAMETER);
2704
2705 for (res = 0; *src; src++) {
2706 int v;
2707 if (isdigit(*src))
2708 v = *src - '';
2709 else if (isxdigit(*src))
2710 v = tolower(*src) - 'a' + 10;
2711 else
2712 v = base;
2713 if (v >= base)
2714 return (STATUS_INVALID_PARAMETER);
2715 res = res * base + v;
2716 }
2717 *val = negative ? -res : res;
2718 return (STATUS_SUCCESS);
2719 }
2720
2721 static void
2722 ntoskrnl_destroy_workitem_threads(void)
2723 {
2724 kdpc_queue *kq;
2725 int i;
2726
2727 for (i = 0; i < WORKITEM_THREADS; i++) {
2728 kq = wq_queues + i;
2729 kq->kq_exit = 1;
2730 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2731 while (kq->kq_exit)
2732 tsleep(kq->kq_td, 0, "waitiw", hz/10);
2733 }
2734 }
2735
2736 io_workitem *
2737 IoAllocateWorkItem(device_object *dobj)
2738 {
2739 io_workitem *iw;
2740
2741 iw = objcache_get(iw_cache, M_NOWAIT);
2742 if (iw == NULL)
2743 return (NULL);
2744
2745 InitializeListHead(&iw->iw_listentry);
2746 iw->iw_dobj = dobj;
2747
2748 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
2749 iw->iw_idx = wq_idx;
2750 WORKIDX_INC(wq_idx);
2751 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
2752
2753 return (iw);
2754 }
2755
2756 void
2757 IoFreeWorkItem(io_workitem *iw)
2758 {
2759 objcache_put(iw_cache, iw);
2760 }
2761
2762 void
2763 IoQueueWorkItem(io_workitem *iw, io_workitem_func iw_func, uint32_t qtype,
2764 void *ctx)
2765 {
2766 kdpc_queue *kq;
2767 list_entry *l;
2768 io_workitem *cur;
2769 uint8_t irql;
2770
2771 kq = wq_queues + iw->iw_idx;
2772
2773 KeAcquireSpinLock(&kq->kq_lock, &irql);
2774
2775 /*
2776 * Traverse the list and make sure this workitem hasn't
2777 * already been inserted. Queuing the same workitem
2778 * twice will hose the list but good.
2779 */
2780
2781 l = kq->kq_disp.nle_flink;
2782 while (l != &kq->kq_disp) {
2783 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2784 if (cur == iw) {
2785 /* Already queued -- do nothing. */
2786 KeReleaseSpinLock(&kq->kq_lock, irql);
2787 return;
2788 }
2789 l = l->nle_flink;
2790 }
2791
2792 iw->iw_func = iw_func;
2793 iw->iw_ctx = ctx;
2794
2795 InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
2796 KeReleaseSpinLock(&kq->kq_lock, irql);
2797
2798 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
2799 }
2800
2801 static void
2802 ntoskrnl_workitem(device_object *dobj, void *arg)
2803 {
2804 io_workitem *iw;
2805 work_queue_item *w;
2806 work_item_func f;
2807
2808 iw = arg;
2809 w = (work_queue_item *)dobj;
2810 f = (work_item_func)w->wqi_func;
2811 objcache_put(iw_cache, iw);
2812 MSCALL2(f, w, w->wqi_ctx);
2813 }
2814
2815 /*
2816 * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
2817 * warns that it's unsafe and to use IoQueueWorkItem() instead. The
2818 * problem with ExQueueWorkItem() is that it can't guard against
2819 * the condition where a driver submits a job to the work queue and
2820 * is then unloaded before the job is able to run. IoQueueWorkItem()
2821 * acquires a reference to the device's device_object via the
2822 * object manager and retains it until after the job has completed,
2823 * which prevents the driver from being unloaded before the job
2824 * runs. (We don't currently support this behavior, though hopefully
2825 * that will change once the object manager API is fleshed out a bit.)
2826 *
2827 * Having said all that, the ExQueueWorkItem() API remains, because
2828 * there are still other parts of Windows that use it, including
2829 * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
2830 * We fake up the ExQueueWorkItem() API on top of our implementation
2831 * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
2832 * for ExQueueWorkItem() jobs, and we pass a pointer to the work
2833 * queue item (provided by the caller) in to IoAllocateWorkItem()
2834 * instead of the device_object. We need to save this pointer so
2835 * we can apply a sanity check: as with the DPC queue and other
2836 * workitem queues, we can't allow the same work queue item to
2837 * be queued twice. If it's already pending, we silently return
2838 */
2839
2840 void
2841 ExQueueWorkItem(work_queue_item *w, uint32_t qtype)
2842 {
2843 io_workitem *iw;
2844 io_workitem_func iwf;
2845 kdpc_queue *kq;
2846 list_entry *l;
2847 io_workitem *cur;
2848 uint8_t irql;
2849
2850
2851 /*
2852 * We need to do a special sanity test to make sure
2853 * the ExQueueWorkItem() API isn't used to queue
2854 * the same workitem twice. Rather than checking the
2855 * io_workitem pointer itself, we test the attached
2856 * device object, which is really a pointer to the
2857 * legacy work queue item structure.
2858 */
2859
2860 kq = wq_queues + WORKITEM_LEGACY_THREAD;
2861 KeAcquireSpinLock(&kq->kq_lock, &irql);
2862 l = kq->kq_disp.nle_flink;
2863 while (l != &kq->kq_disp) {
2864 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
2865 if (cur->iw_dobj == (device_object *)w) {
2866 /* Already queued -- do nothing. */
2867 KeReleaseSpinLock(&kq->kq_lock, irql);
2868 return;
2869 }
2870 l = l->nle_flink;
2871 }
2872 KeReleaseSpinLock(&kq->kq_lock, irql);
2873
2874 iw = IoAllocateWorkItem((device_object *)w);
2875 if (iw == NULL)
2876 return;
2877
2878 iw->iw_idx = WORKITEM_LEGACY_THREAD;
2879 iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
2880 IoQueueWorkItem(iw, iwf, qtype, iw);
2881 }
2882
2883 static void
2884 RtlZeroMemory(void *dst, size_t len)
2885 {
2886 bzero(dst, len);
2887 }
2888
2889 static void
2890 RtlSecureZeroMemory(void *dst, size_t len)
2891 {
2892 memset(dst, 0, len);
2893 }
2894
2895 static void
2896 RtlFillMemory(void *dst, size_t len, uint8_t c)
2897 {
2898 memset(dst, c, len);
2899 }
2900
2901 static void
2902 RtlMoveMemory(void *dst, const void *src, size_t len)
2903 {
2904 memmove(dst, src, len);
2905 }
2906
2907 static void
2908 RtlCopyMemory(void *dst, const void *src, size_t len)
2909 {
2910 bcopy(src, dst, len);
2911 }
2912
2913 static size_t
2914 RtlCompareMemory(const void *s1, const void *s2, size_t len)
2915 {
2916 size_t i;
2917 uint8_t *m1, *m2;
2918
2919 m1 = __DECONST(char *, s1);
2920 m2 = __DECONST(char *, s2);
2921
2922 for (i = 0; i < len && m1[i] == m2[i]; i++);
2923 return (i);
2924 }
2925
2926 void
2927 RtlInitAnsiString(ansi_string *dst, char *src)
2928 {
2929 ansi_string *a;
2930
2931 a = dst;
2932 if (a == NULL)
2933 return;
2934 if (src == NULL) {
2935 a->as_len = a->as_maxlen = 0;
2936 a->as_buf = NULL;
2937 } else {
2938 a->as_buf = src;
2939 a->as_len = a->as_maxlen = strlen(src);
2940 }
2941 }
2942
2943 void
2944 RtlInitUnicodeString(unicode_string *dst, uint16_t *src)
2945 {
2946 unicode_string *u;
2947 int i;
2948
2949 u = dst;
2950 if (u == NULL)
2951 return;
2952 if (src == NULL) {
2953 u->us_len = u->us_maxlen = 0;
2954 u->us_buf = NULL;
2955 } else {
2956 i = 0;
2957 while(src[i] != 0)
2958 i++;
2959 u->us_buf = src;
2960 u->us_len = u->us_maxlen = i * 2;
2961 }
2962 }
2963
2964 ndis_status
2965 RtlUnicodeStringToInteger(unicode_string *ustr, uint32_t base, uint32_t *val)
2966 {
2967 uint16_t *uchr;
2968 int len, neg = 0;
2969 char abuf[64];
2970 char *astr;
2971
2972 uchr = ustr->us_buf;
2973 len = ustr->us_len;
2974 bzero(abuf, sizeof(abuf));
2975
2976 if ((char)((*uchr) & 0xFF) == '-') {
2977 neg = 1;
2978 uchr++;
2979 len -= 2;
2980 } else if ((char)((*uchr) & 0xFF) == '+') {
2981 neg = 0;
2982 uchr++;
2983 len -= 2;
2984 }
2985
2986 if (base == 0) {
2987 if ((char)((*uchr) & 0xFF) == 'b') {
2988 base = 2;
2989 uchr++;
2990 len -= 2;
2991 } else if ((char)((*uchr) & 0xFF) == 'o') {
2992 base = 8;
2993 uchr++;
2994 len -= 2;
2995 } else if ((char)((*uchr) & 0xFF) == 'x') {
2996 base = 16;
2997 uchr++;
2998 len -= 2;
2999 } else
3000 base = 10;
3001 }
3002
3003 astr = abuf;
3004 if (neg) {
3005 strcpy(astr, "-");
3006 astr++;
3007 }
3008
3009 ntoskrnl_unicode_to_ascii(uchr, astr, len);
3010 *val = strtoul(abuf, NULL, base);
3011
3012 return (STATUS_SUCCESS);
3013 }
3014
3015 void
3016 RtlFreeUnicodeString(unicode_string *ustr)
3017 {
3018 if (ustr->us_buf == NULL)
3019 return;
3020 ExFreePool(ustr->us_buf);
3021 ustr->us_buf = NULL;
3022 }
3023
3024 void
3025 RtlFreeAnsiString(ansi_string *astr)
3026 {
3027 if (astr->as_buf == NULL)
3028 return;
3029 ExFreePool(astr->as_buf);
3030 astr->as_buf = NULL;
3031 }
3032
3033 static int
3034 atoi(const char *str)
3035 {
3036 return (int)strtol(str, NULL, 10);
3037 }
3038
3039 static long
3040 atol(const char *str)
3041 {
3042 return strtol(str, NULL, 10);
3043 }
3044
3045 static int
3046 rand(void)
3047 {
3048 struct timeval tv;
3049
3050 microtime(&tv);
3051 skrandom(tv.tv_usec);
3052 return ((int)krandom());
3053 }
3054
3055 static void
3056 srand(unsigned int seed)
3057 {
3058 skrandom(seed);
3059 }
3060
3061 static uint8_t
3062 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
3063 {
3064 if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
3065 return (TRUE);
3066 return (FALSE);
3067 }
3068
3069 static int32_t
3070 IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type,
3071 uint32_t mask, void **key)
3072 {
3073 return (NDIS_STATUS_INVALID_DEVICE_REQUEST);
3074 }
3075
3076 static ndis_status
3077 IoGetDeviceObjectPointer(unicode_string *name, uint32_t reqaccess,
3078 void *fileobj, device_object *devobj)
3079 {
3080 return (STATUS_SUCCESS);
3081 }
3082
3083 static ndis_status
3084 IoGetDeviceProperty(device_object *devobj, uint32_t regprop, uint32_t buflen,
3085 void *prop, uint32_t *reslen)
3086 {
3087 driver_object *drv;
3088 uint16_t **name;
3089
3090 drv = devobj->do_drvobj;
3091
3092 switch (regprop) {
3093 case DEVPROP_DRIVER_KEYNAME:
3094 name = prop;
3095 *name = drv->dro_drivername.us_buf;
3096 *reslen = drv->dro_drivername.us_len;
3097 break;
3098 default:
3099 return (STATUS_INVALID_PARAMETER_2);
3100 break;
3101 }
3102
3103 return (STATUS_SUCCESS);
3104 }
3105
3106 static void
3107 KeInitializeMutex(kmutant *kmutex, uint32_t level)
3108 {
3109 InitializeListHead((&kmutex->km_header.dh_waitlisthead));
3110 kmutex->km_abandoned = FALSE;
3111 kmutex->km_apcdisable = 1;
3112 kmutex->km_header.dh_sigstate = 1;
3113 kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
3114 kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
3115 kmutex->km_ownerthread = NULL;
3116 }
3117
3118 static uint32_t
3119 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
3120 {
3121 uint32_t prevstate;
3122
3123 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3124 prevstate = kmutex->km_header.dh_sigstate;
3125 if (kmutex->km_ownerthread != curthread) {
3126 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3127 return (STATUS_MUTANT_NOT_OWNED);
3128 }
3129
3130 kmutex->km_header.dh_sigstate++;
3131 kmutex->km_abandoned = FALSE;
3132
3133 if (kmutex->km_header.dh_sigstate == 1) {
3134 kmutex->km_ownerthread = NULL;
3135 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
3136 }
3137
3138 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3139
3140 return (prevstate);
3141 }
3142
3143 static uint32_t
3144 KeReadStateMutex(kmutant *kmutex)
3145 {
3146 return (kmutex->km_header.dh_sigstate);
3147 }
3148
3149 void
3150 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
3151 {
3152 InitializeListHead((&kevent->k_header.dh_waitlisthead));
3153 kevent->k_header.dh_sigstate = state;
3154 if (type == EVENT_TYPE_NOTIFY)
3155 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
3156 else
3157 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
3158 kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
3159 }
3160
3161 uint32_t
3162 KeResetEvent(nt_kevent *kevent)
3163 {
3164 uint32_t prevstate;
3165
3166 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3167 prevstate = kevent->k_header.dh_sigstate;
3168 kevent->k_header.dh_sigstate = FALSE;
3169 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3170
3171 return (prevstate);
3172 }
3173
3174 uint32_t
3175 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
3176 {
3177 uint32_t prevstate;
3178 wait_block *w;
3179 nt_dispatch_header *dh;
3180 wb_ext *we;
3181
3182 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3183 prevstate = kevent->k_header.dh_sigstate;
3184 dh = &kevent->k_header;
3185
3186 if (IsListEmpty(&dh->dh_waitlisthead))
3187 /*
3188 * If there's nobody in the waitlist, just set
3189 * the state to signalled.
3190 */
3191 dh->dh_sigstate = 1;
3192 else {
3193 /*
3194 * Get the first waiter. If this is a synchronization
3195 * event, just wake up that one thread (don't bother
3196 * setting the state to signalled since we're supposed
3197 * to automatically clear synchronization events anyway).
3198 *
3199 * If it's a notification event, or the first
3200 * waiter is doing a WAITTYPE_ALL wait, go through
3201 * the full wait satisfaction process.
3202 */
3203 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
3204 wait_block, wb_waitlist);
3205 we = w->wb_ext;
3206 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
3207 w->wb_waittype == WAITTYPE_ALL) {
3208 if (prevstate == 0) {
3209 dh->dh_sigstate = 1;
3210 ntoskrnl_waittest(dh, increment);
3211 }
3212 } else {
3213 w->wb_awakened |= TRUE;
3214 cv_broadcastpri(&we->we_cv,
3215 (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
3216 w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
3217 }
3218 }
3219
3220 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3221
3222 return (prevstate);
3223 }
3224
3225 void
3226 KeClearEvent(nt_kevent *kevent)
3227 {
3228 kevent->k_header.dh_sigstate = FALSE;
3229 }
3230
3231 uint32_t
3232 KeReadStateEvent(nt_kevent *kevent)
3233 {
3234 return (kevent->k_header.dh_sigstate);
3235 }
3236
3237 /*
3238 * The object manager in Windows is responsible for managing
3239 * references and access to various types of objects, including
3240 * device_objects, events, threads, timers and so on. However,
3241 * there's a difference in the way objects are handled in user
3242 * mode versus kernel mode.
3243 *
3244 * In user mode (i.e. Win32 applications), all objects are
3245 * managed by the object manager. For example, when you create
3246 * a timer or event object, you actually end up with an
3247 * object_header (for the object manager's bookkeeping
3248 * purposes) and an object body (which contains the actual object
3249 * structure, e.g. ktimer, kevent, etc...). This allows Windows
3250 * to manage resource quotas and to enforce access restrictions
3251 * on basically every kind of system object handled by the kernel.
3252 *
3253 * However, in kernel mode, you only end up using the object
3254 * manager some of the time. For example, in a driver, you create
3255 * a timer object by simply allocating the memory for a ktimer
3256 * structure and initializing it with KeInitializeTimer(). Hence,
3257 * the timer has no object_header and no reference counting or
3258 * security/resource checks are done on it. The assumption in
3259 * this case is that if you're running in kernel mode, you know
3260 * what you're doing, and you're already at an elevated privilege
3261 * anyway.
3262 *
3263 * There are some exceptions to this. The two most important ones
3264 * for our purposes are device_objects and threads. We need to use
3265 * the object manager to do reference counting on device_objects,
3266 * and for threads, you can only get a pointer to a thread's
3267 * dispatch header by using ObReferenceObjectByHandle() on the
3268 * handle returned by PsCreateSystemThread().
3269 */
3270
3271 static ndis_status
3272 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
3273 uint8_t accessmode, void **object, void **handleinfo)
3274 {
3275 nt_objref *nr;
3276
3277 nr = kmalloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
3278 if (nr == NULL)
3279 return (STATUS_INSUFFICIENT_RESOURCES);
3280
3281 InitializeListHead((&nr->no_dh.dh_waitlisthead));
3282 nr->no_obj = handle;
3283 nr->no_dh.dh_type = DISP_TYPE_THREAD;
3284 nr->no_dh.dh_sigstate = 0;
3285 nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
3286 sizeof(uint32_t));
3287 TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
3288 *object = nr;
3289
3290 return (STATUS_SUCCESS);
3291 }
3292
3293 static void
3294 ObfDereferenceObject(void *object)
3295 {
3296 nt_objref *nr;
3297
3298 nr = object;
3299 TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
3300 kfree(nr, M_DEVBUF);
3301 }
3302
3303 static uint32_t
3304 ZwClose(ndis_handle handle)
3305 {
3306 return (STATUS_SUCCESS);
3307 }
3308
3309 static uint32_t
3310 WmiQueryTraceInformation(uint32_t traceclass, void *traceinfo,
3311 uint32_t infolen, uint32_t reqlen, void *buf)
3312 {
3313 return (STATUS_NOT_FOUND);
3314 }
3315
3316 static uint32_t
3317 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
3318 void *guid, uint16_t messagenum, ...)
3319 {
3320 return (STATUS_SUCCESS);
3321 }
3322
3323 static uint32_t
3324 IoWMIRegistrationControl(device_object *dobj, uint32_t action)
3325 {
3326 return (STATUS_SUCCESS);
3327 }
3328
3329 /*
3330 * This is here just in case the thread returns without calling
3331 * PsTerminateSystemThread().
3332 */
3333 static void
3334 ntoskrnl_thrfunc(void *arg)
3335 {
3336 thread_context *thrctx;
3337 uint32_t (*tfunc)(void *);
3338 void *tctx;
3339 uint32_t rval;
3340
3341 thrctx = arg;
3342 tfunc = thrctx->tc_thrfunc;
3343 tctx = thrctx->tc_thrctx;
3344 kfree(thrctx, M_TEMP);
3345
3346 rval = MSCALL1(tfunc, tctx);
3347
3348 PsTerminateSystemThread(rval);
3349 return; /* notreached */
3350 }
3351
3352 static ndis_status
3353 PsCreateSystemThread(ndis_handle *handle, uint32_t reqaccess, void *objattrs,
3354 ndis_handle phandle, void *clientid, void *thrfunc, void *thrctx)
3355 {
3356 int error;
3357 thread_context *tc;
3358 struct thread *p;
3359
3360 tc = kmalloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
3361 if (tc == NULL)
3362 return (STATUS_INSUFFICIENT_RESOURCES);
3363
3364 tc->tc_thrctx = thrctx;
3365 tc->tc_thrfunc = thrfunc;
3366
3367 error = kthread_create(ntoskrnl_thrfunc, tc, &p, "Win kthread %d",
3368 ntoskrnl_kth);
3369
3370 if (error) {
3371 kfree(tc, M_TEMP);
3372 return (STATUS_INSUFFICIENT_RESOURCES);
3373 }
3374
3375 *handle = p;
3376 ntoskrnl_kth++;
3377
3378 return (STATUS_SUCCESS);
3379 }
3380
3381 /*
3382 * In Windows, the exit of a thread is an event that you're allowed
3383 * to wait on, assuming you've obtained a reference to the thread using
3384 * ObReferenceObjectByHandle(). Unfortunately, the only way we can
3385 * simulate this behavior is to register each thread we create in a
3386 * reference list, and if someone holds a reference to us, we poke
3387 * them.
3388 */
3389 static ndis_status
3390 PsTerminateSystemThread(ndis_status status)
3391 {
3392 struct nt_objref *nr;
3393
3394 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3395 TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
3396 if (nr->no_obj != curthread->td_proc)
3397 continue;
3398 nr->no_dh.dh_sigstate = 1;
3399 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
3400 break;
3401 }
3402 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3403
3404 ntoskrnl_kth--;
3405
3406 wakeup(curthread);
3407 kthread_exit();
3408 return (0); /* notreached */
3409 }
3410
3411 static uint32_t
3412 DbgPrint(char *fmt, ...)
3413 {
3414 va_list ap;
3415
3416 if (bootverbose) {
3417 va_start(ap, fmt);
3418 kvprintf(fmt, ap);
3419 va_end(ap);
3420 }
3421
3422 return (STATUS_SUCCESS);
3423 }
3424
3425 static void
3426 DbgBreakPoint(void)
3427 {
3428
3429 Debugger("DbgBreakPoint(): breakpoint");
3430 }
3431
3432 static void
3433 KeBugCheckEx(uint32_t code, u_long param1, u_long param2, u_long param3,
3434 u_long param4)
3435 {
3436 panic("KeBugCheckEx: STOP 0x%X", code);
3437 }
3438
3439 static void
3440 ntoskrnl_timercall(void *arg)
3441 {
3442 ktimer *timer;
3443 struct timeval tv;
3444 kdpc *dpc;
3445
3446 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3447
3448 timer = arg;
3449
3450 #ifdef NTOSKRNL_DEBUG_TIMERS
3451 ntoskrnl_timer_fires++;
3452 #endif
3453 ntoskrnl_remove_timer(timer);
3454
3455 /*
3456 * This should never happen, but complain
3457 * if it does.
3458 */
3459
3460 if (timer->k_header.dh_inserted == FALSE) {
3461 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3462 kprintf("NTOS: timer %p fired even though "
3463 "it was canceled\n", timer);
3464 return;
3465 }
3466
3467 /* Mark the timer as no longer being on the timer queue. */
3468
3469 timer->k_header.dh_inserted = FALSE;
3470
3471 /* Now signal the object and satisfy any waits on it. */
3472
3473 timer->k_header.dh_sigstate = 1;
3474 ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
3475
3476 /*
3477 * If this is a periodic timer, re-arm it
3478 * so it will fire again. We do this before
3479 * calling any deferred procedure calls because
3480 * it's possible the DPC might cancel the timer,
3481 * in which case it would be wrong for us to
3482 * re-arm it again afterwards.
3483 */
3484
3485 if (timer->k_period) {
3486 tv.tv_sec = 0;
3487 tv.tv_usec = timer->k_period * 1000;
3488 timer->k_header.dh_inserted = TRUE;
3489 ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3490 #ifdef NTOSKRNL_DEBUG_TIMERS
3491 ntoskrnl_timer_reloads++;
3492 #endif
3493 }
3494
3495 dpc = timer->k_dpc;
3496
3497 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3498
3499 /* If there's a DPC associated with the timer, queue it up. */
3500
3501 if (dpc != NULL)
3502 KeInsertQueueDpc(dpc, NULL, NULL);
3503 }
3504
3505 #ifdef NTOSKRNL_DEBUG_TIMERS
3506 static int
3507 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
3508 {
3509 int ret;
3510
3511 ret = 0;
3512 ntoskrnl_show_timers();
3513 return (sysctl_handle_int(oidp, &ret, 0, req));
3514 }
3515
3516 static void
3517 ntoskrnl_show_timers(void)
3518 {
3519 int i = 0;
3520 list_entry *l;
3521
3522 mtx_spinlock(&ntoskrnl_calllock);
3523 l = ntoskrnl_calllist.nle_flink;
3524 while(l != &ntoskrnl_calllist) {
3525 i++;
3526 l = l->nle_flink;
3527 }
3528 mtx_spinunlock(&ntoskrnl_calllock);
3529
3530 kprintf("\n");
3531 kprintf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
3532 kprintf("timer sets: %qu\n", ntoskrnl_timer_sets);
3533 kprintf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
3534 kprintf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
3535 kprintf("timer fires: %qu\n", ntoskrnl_timer_fires);
3536 kprintf("\n");
3537 }
3538 #endif
3539
3540 /*
3541 * Must be called with dispatcher lock held.
3542 */
3543
3544 static void
3545 ntoskrnl_insert_timer(ktimer *timer, int ticks)
3546 {
3547 callout_entry *e;
3548 list_entry *l;
3549 struct callout *c;
3550
3551 /*
3552 * Try and allocate a timer.
3553 */
3554 mtx_spinlock(&ntoskrnl_calllock);
3555 if (IsListEmpty(&ntoskrnl_calllist)) {
3556 mtx_spinunlock(&ntoskrnl_calllock);
3557 #ifdef NTOSKRNL_DEBUG_TIMERS
3558 ntoskrnl_show_timers();
3559 #endif
3560 panic("out of timers!");
3561 }
3562 l = RemoveHeadList(&ntoskrnl_calllist);
3563 mtx_spinunlock(&ntoskrnl_calllock);
3564
3565 e = CONTAINING_RECORD(l, callout_entry, ce_list);
3566 c = &e->ce_callout;
3567
3568 timer->k_callout = c;
3569
3570 callout_init_mp(c);
3571 callout_reset(c, ticks, ntoskrnl_timercall, timer);
3572 }
3573
3574 static void
3575 ntoskrnl_remove_timer(ktimer *timer)
3576 {
3577 callout_entry *e;
3578
3579 e = (callout_entry *)timer->k_callout;
3580 callout_stop(timer->k_callout);
3581
3582 mtx_spinlock(&ntoskrnl_calllock);
3583 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
3584 mtx_spinunlock(&ntoskrnl_calllock);
3585 }
3586
3587 void
3588 KeInitializeTimer(ktimer *timer)
3589 {
3590 if (timer == NULL)
3591 return;
3592
3593 KeInitializeTimerEx(timer, EVENT_TYPE_NOTIFY);
3594 }
3595
3596 void
3597 KeInitializeTimerEx(ktimer *timer, uint32_t type)
3598 {
3599 if (timer == NULL)
3600 return;
3601
3602 bzero((char *)timer, sizeof(ktimer));
3603 InitializeListHead((&timer->k_header.dh_waitlisthead));
3604 timer->k_header.dh_sigstate = FALSE;
3605 timer->k_header.dh_inserted = FALSE;
3606 if (type == EVENT_TYPE_NOTIFY)
3607 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
3608 else
3609 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
3610 timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
3611 }
3612
3613 /*
3614 * DPC subsystem. A Windows Defered Procedure Call has the following
3615 * properties:
3616 * - It runs at DISPATCH_LEVEL.
3617 * - It can have one of 3 importance values that control when it
3618 * runs relative to other DPCs in the queue.
3619 * - On SMP systems, it can be set to run on a specific processor.
3620 * In order to satisfy the last property, we create a DPC thread for
3621 * each CPU in the system and bind it to that CPU. Each thread
3622 * maintains three queues with different importance levels, which
3623 * will be processed in order from lowest to highest.
3624 *
3625 * In Windows, interrupt handlers run as DPCs. (Not to be confused
3626 * with ISRs, which run in interrupt context and can preempt DPCs.)
3627 * ISRs are given the highest importance so that they'll take
3628 * precedence over timers and other things.
3629 */
3630
3631 static void
3632 ntoskrnl_dpc_thread(void *arg)
3633 {
3634 kdpc_queue *kq;
3635 kdpc *d;
3636 list_entry *l;
3637 uint8_t irql;
3638
3639 kq = arg;
3640
3641 InitializeListHead(&kq->kq_disp);
3642 kq->kq_td = curthread;
3643 kq->kq_exit = 0;
3644 kq->kq_running = FALSE;
3645 KeInitializeSpinLock(&kq->kq_lock);
3646 KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
3647 KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
3648
3649 /*
3650 * Elevate our priority. DPCs are used to run interrupt
3651 * handlers, and they should trigger as soon as possible
3652 * once scheduled by an ISR.
3653 */
3654
3655 #ifdef NTOSKRNL_MULTIPLE_DPCS
3656 sched_bind(curthread, kq->kq_cpu);
3657 #endif
3658 lwkt_setpri_self(TDPRI_INT_HIGH);
3659
3660 while (1) {
3661 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
3662
3663 KeAcquireSpinLock(&kq->kq_lock, &irql);
3664
3665 if (kq->kq_exit) {
3666 kq->kq_exit = 0;
3667 KeReleaseSpinLock(&kq->kq_lock, irql);
3668 break;
3669 }
3670
3671 kq->kq_running = TRUE;
3672
3673 while (!IsListEmpty(&kq->kq_disp)) {
3674 l = RemoveHeadList((&kq->kq_disp));
3675 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3676 InitializeListHead((&d->k_dpclistentry));
3677 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3678 MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
3679 d->k_sysarg1, d->k_sysarg2);
3680 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3681 }
3682
3683 kq->kq_running = FALSE;
3684
3685 KeReleaseSpinLock(&kq->kq_lock, irql);
3686
3687 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
3688 }
3689
3690 wakeup(curthread);
3691 kthread_exit();
3692 return; /* notreached */
3693 }
3694
3695 static void
3696 ntoskrnl_destroy_dpc_threads(void)
3697 {
3698 kdpc_queue *kq;
3699 kdpc dpc;
3700 int i;
3701
3702 kq = kq_queues;
3703 #ifdef NTOSKRNL_MULTIPLE_DPCS
3704 for (i = 0; i < ncpus; i++) {
3705 #else
3706 for (i = 0; i < 1; i++) {
3707 #endif
3708 kq += i;
3709
3710 kq->kq_exit = 1;
3711 KeInitializeDpc(&dpc, NULL, NULL);
3712 KeSetTargetProcessorDpc(&dpc, i);
3713 KeInsertQueueDpc(&dpc, NULL, NULL);
3714 while (kq->kq_exit)
3715 tsleep(kq->kq_td, 0, "dpcw", hz/10);
3716 }
3717 }
3718
3719 static uint8_t
3720 ntoskrnl_insert_dpc(list_entry *head, kdpc *dpc)
3721 {
3722 list_entry *l;
3723 kdpc *d;
3724
3725 l = head->nle_flink;
3726 while (l != head) {
3727 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
3728 if (d == dpc)
3729 return (FALSE);
3730 l = l->nle_flink;
3731 }
3732
3733 if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
3734 InsertTailList((head), (&dpc->k_dpclistentry));
3735 else
3736 InsertHeadList((head), (&dpc->k_dpclistentry));
3737
3738 return (TRUE);
3739 }
3740
3741 void
3742 KeInitializeDpc(kdpc *dpc, void *dpcfunc, void *dpcctx)
3743 {
3744
3745 if (dpc == NULL)
3746 return;
3747
3748 dpc->k_deferedfunc = dpcfunc;
3749 dpc->k_deferredctx = dpcctx;
3750 dpc->k_num = KDPC_CPU_DEFAULT;
3751 dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
3752 InitializeListHead((&dpc->k_dpclistentry));
3753 }
3754
3755 uint8_t
3756 KeInsertQueueDpc(kdpc *dpc, void *sysarg1, void *sysarg2)
3757 {
3758 kdpc_queue *kq;
3759 uint8_t r;
3760 uint8_t irql;
3761
3762 if (dpc == NULL)
3763 return (FALSE);
3764
3765 kq = kq_queues;
3766
3767 #ifdef NTOSKRNL_MULTIPLE_DPCS
3768 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3769
3770 /*
3771 * By default, the DPC is queued to run on the same CPU
3772 * that scheduled it.
3773 */
3774
3775 if (dpc->k_num == KDPC_CPU_DEFAULT)
3776 kq += curthread->td_oncpu;
3777 else
3778 kq += dpc->k_num;
3779 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3780 #else
3781 KeAcquireSpinLock(&kq->kq_lock, &irql);
3782 #endif
3783
3784 r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
3785 if (r == TRUE) {
3786 dpc->k_sysarg1 = sysarg1;
3787 dpc->k_sysarg2 = sysarg2;
3788 }
3789 KeReleaseSpinLock(&kq->kq_lock, irql);
3790
3791 if (r == FALSE)
3792 return (r);
3793
3794 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3795
3796 return (r);
3797 }
3798
3799 uint8_t
3800 KeRemoveQueueDpc(kdpc *dpc)
3801 {
3802 kdpc_queue *kq;
3803 uint8_t irql;
3804
3805 if (dpc == NULL)
3806 return (FALSE);
3807
3808 #ifdef NTOSKRNL_MULTIPLE_DPCS
3809 KeRaiseIrql(DISPATCH_LEVEL, &irql);
3810
3811 kq = kq_queues + dpc->k_num;
3812
3813 KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
3814 #else
3815 kq = kq_queues;
3816 KeAcquireSpinLock(&kq->kq_lock, &irql);
3817 #endif
3818
3819 if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
3820 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
3821 KeLowerIrql(irql);
3822 return (FALSE);
3823 }
3824
3825 RemoveEntryList((&dpc->k_dpclistentry));
3826 InitializeListHead((&dpc->k_dpclistentry));
3827
3828 KeReleaseSpinLock(&kq->kq_lock, irql);
3829
3830 return (TRUE);
3831 }
3832
3833 void
3834 KeSetImportanceDpc(kdpc *dpc, uint32_t imp)
3835 {
3836 if (imp != KDPC_IMPORTANCE_LOW &&
3837 imp != KDPC_IMPORTANCE_MEDIUM &&
3838 imp != KDPC_IMPORTANCE_HIGH)
3839 return;
3840
3841 dpc->k_importance = (uint8_t)imp;
3842 }
3843
3844 void
3845 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
3846 {
3847 if (cpu > ncpus)
3848 return;
3849
3850 dpc->k_num = cpu;
3851 }
3852
3853 void
3854 KeFlushQueuedDpcs(void)
3855 {
3856 kdpc_queue *kq;
3857 int i;
3858
3859 /*
3860 * Poke each DPC queue and wait
3861 * for them to drain.
3862 */
3863
3864 #ifdef NTOSKRNL_MULTIPLE_DPCS
3865 for (i = 0; i < ncpus; i++) {
3866 #else
3867 for (i = 0; i < 1; i++) {
3868 #endif
3869 kq = kq_queues + i;
3870 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
3871 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
3872 }
3873 }
3874
3875 uint32_t
3876 KeGetCurrentProcessorNumber(void)
3877 {
3878 return (curthread->td_gd->gd_cpuid);
3879 }
3880
3881 uint8_t
3882 KeSetTimerEx(ktimer *timer, int64_t duetime, uint32_t period, kdpc *dpc)
3883 {
3884 struct timeval tv;
3885 uint64_t curtime;
3886 uint8_t pending;
3887
3888 if (timer == NULL)
3889 return (FALSE);
3890
3891 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3892
3893 if (timer->k_header.dh_inserted == TRUE) {
3894 ntoskrnl_remove_timer(timer);
3895 #ifdef NTOSKRNL_DEBUG_TIMERS
3896 ntoskrnl_timer_cancels++;
3897 #endif
3898 timer->k_header.dh_inserted = FALSE;
3899 pending = TRUE;
3900 } else
3901 pending = FALSE;
3902
3903 timer->k_duetime = duetime;
3904 timer->k_period = period;
3905 timer->k_header.dh_sigstate = FALSE;
3906 timer->k_dpc = dpc;
3907
3908 if (duetime < 0) {
3909 tv.tv_sec = - (duetime) / 10000000;
3910 tv.tv_usec = (- (duetime) / 10) -
3911 (tv.tv_sec * 1000000);
3912 } else {
3913 ntoskrnl_time(&curtime);
3914 if (duetime < curtime)
3915 tv.tv_sec = tv.tv_usec = 0;
3916 else {
3917 tv.tv_sec = ((duetime) - curtime) / 10000000;
3918 tv.tv_usec = ((duetime) - curtime) / 10 -
3919 (tv.tv_sec * 1000000);
3920 }
3921 }
3922
3923 timer->k_header.dh_inserted = TRUE;
3924 ntoskrnl_insert_timer(timer, tvtohz_high(&tv));
3925 #ifdef NTOSKRNL_DEBUG_TIMERS
3926 ntoskrnl_timer_sets++;
3927 #endif
3928
3929 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3930
3931 return (pending);
3932 }
3933
3934 uint8_t
3935 KeSetTimer(ktimer *timer, int64_t duetime, kdpc *dpc)
3936 {
3937 return (KeSetTimerEx(timer, duetime, 0, dpc));
3938 }
3939
3940 /*
3941 * The Windows DDK documentation seems to say that cancelling
3942 * a timer that has a DPC will result in the DPC also being
3943 * cancelled, but this isn't really the case.
3944 */
3945
3946 uint8_t
3947 KeCancelTimer(ktimer *timer)
3948 {
3949 uint8_t pending;
3950
3951 if (timer == NULL)
3952 return (FALSE);
3953
3954 lockmgr(&ntoskrnl_dispatchlock, LK_EXCLUSIVE);
3955
3956 pending = timer->k_header.dh_inserted;
3957
3958 if (timer->k_header.dh_inserted == TRUE) {
3959 timer->k_header.dh_inserted = FALSE;
3960 ntoskrnl_remove_timer(timer);
3961 #ifdef NTOSKRNL_DEBUG_TIMERS
3962 ntoskrnl_timer_cancels++;
3963 #endif
3964 }
3965
3966 lockmgr(&ntoskrnl_dispatchlock, LK_RELEASE);
3967
3968 return (pending);
3969 }
3970
3971 uint8_t
3972 KeReadStateTimer(ktimer *timer)
3973 {
3974 return (timer->k_header.dh_sigstate);
3975 }
3976
3977 static int32_t
3978 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
3979 {
3980 ktimer timer;
3981
3982 if (wait_mode != 0)
3983 panic("invalid wait_mode %d", wait_mode);
3984
3985 KeInitializeTimer(&timer);
3986 KeSetTimer(&timer, *interval, NULL);
3987 KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
3988
3989 return STATUS_SUCCESS;
3990 }
3991
3992 static uint64_t
3993 KeQueryInterruptTime(void)
3994 {
3995 int ticks;
3996 struct timeval tv;
3997
3998 getmicrouptime(&tv);
3999
4000 ticks = tvtohz_high(&tv);
4001
4002 return ticks * ((10000000 + hz - 1) / hz);
4003 }
4004
4005 static struct thread *
4006 KeGetCurrentThread(void)
4007 {
4008
4009 return curthread;
4010 }
4011
4012 static int32_t
4013 KeSetPriorityThread(struct thread *td, int32_t pri)
4014 {
4015 int32_t old;
4016
4017 if (td == NULL)
4018 return LOW_REALTIME_PRIORITY;
4019
4020 if (td->td_pri >= TDPRI_INT_HIGH)
4021 old = HIGH_PRIORITY;
4022 else if (td->td_pri <= TDPRI_IDLE_WORK)
4023 old = LOW_PRIORITY;
4024 else
4025 old = LOW_REALTIME_PRIORITY;
4026
4027 if (pri == HIGH_PRIORITY)
4028 lwkt_setpri(td, TDPRI_INT_HIGH);
4029 if (pri == LOW_REALTIME_PRIORITY)
4030 lwkt_setpri(td, TDPRI_SOFT_TIMER);
4031 if (pri == LOW_PRIORITY)
4032 lwkt_setpri(td, TDPRI_IDLE_WORK);
4033
4034 return old;
4035 }
4036
4037 static void
4038 dummy(void)
4039 {
4040 kprintf("ntoskrnl dummy called...\n");
4041 }
4042
4043
4044 image_patch_table ntoskrnl_functbl[] = {
4045 IMPORT_SFUNC(RtlZeroMemory, 2),
4046 IMPORT_SFUNC(RtlSecureZeroMemory, 2),
4047 IMPORT_SFUNC(RtlFillMemory, 3),
4048 IMPORT_SFUNC(RtlMoveMemory, 3),
4049 IMPORT_SFUNC(RtlCharToInteger, 3),
4050 IMPORT_SFUNC(RtlCopyMemory, 3),
4051 IMPORT_SFUNC(RtlCopyString, 2),
4052 IMPORT_SFUNC(RtlCompareMemory, 3),
4053 IMPORT_SFUNC(RtlEqualUnicodeString, 3),
4054 IMPORT_SFUNC(RtlCopyUnicodeString, 2),
4055 IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
4056 IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
4057 IMPORT_SFUNC(RtlInitAnsiString, 2),
4058 IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
4059 IMPORT_SFUNC(RtlInitUnicodeString, 2),
4060 IMPORT_SFUNC(RtlFreeAnsiString, 1),
4061 IMPORT_SFUNC(RtlFreeUnicodeString, 1),
4062 IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
4063 IMPORT_CFUNC_MAP(sprintf, ksprintf, 0),
4064 IMPORT_CFUNC_MAP(vsprintf, kvsprintf, 0),
4065 IMPORT_CFUNC_MAP(_snprintf, ksnprintf, 0),
4066 IMPORT_CFUNC_MAP(_vsnprintf, kvsnprintf, 0),
4067 IMPORT_CFUNC(DbgPrint, 0),
4068 IMPORT_SFUNC(DbgBreakPoint, 0),
4069 IMPORT_SFUNC(KeBugCheckEx, 5),
4070 IMPORT_CFUNC(strncmp, 0),
4071 IMPORT_CFUNC(strcmp, 0),
4072 IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
4073 IMPORT_CFUNC(strncpy, 0),
4074 IMPORT_CFUNC(strcpy, 0),
4075 IMPORT_CFUNC(strlen, 0),
4076 IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
4077 IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
4078 IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
4079 IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
4080 IMPORT_CFUNC_MAP(strchr, index, 0),
4081 IMPORT_CFUNC_MAP(strrchr, rindex, 0),
4082 IMPORT_CFUNC(memcpy, 0),
4083 IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
4084 IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
4085 IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
4086 IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
4087 IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
4088 IMPORT_FFUNC(IofCallDriver, 2),
4089 IMPORT_FFUNC(IofCompleteRequest, 2),
4090 IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
4091 IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
4092 IMPORT_SFUNC(IoCancelIrp, 1),
4093 IMPORT_SFUNC(IoConnectInterrupt, 11),
4094 IMPORT_SFUNC(IoDisconnectInterrupt, 1),
4095 IMPORT_SFUNC(IoCreateDevice, 7),
4096 IMPORT_SFUNC(IoDeleteDevice, 1),
4097 IMPORT_SFUNC(IoGetAttachedDevice, 1),
4098 IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
4099 IMPORT_SFUNC(IoDetachDevice, 1),
4100 IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
4101 IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
4102 IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
4103 IMPORT_SFUNC(IoAllocateIrp, 2),
4104 IMPORT_SFUNC(IoReuseIrp, 2),
4105 IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
4106 IMPORT_SFUNC(IoFreeIrp, 1),
4107 IMPORT_SFUNC(IoInitializeIrp, 3),
4108 IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
4109 IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
4110 IMPORT_SFUNC(KeSynchronizeExecution, 3),
4111 IMPORT_SFUNC(KeWaitForSingleObject, 5),
4112 IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
4113 IMPORT_SFUNC(_allmul, 4),
4114 IMPORT_SFUNC(_alldiv, 4),
4115 IMPORT_SFUNC(_allrem, 4),
4116 IMPORT_RFUNC(_allshr, 0),
4117 IMPORT_RFUNC(_allshl, 0),
4118 IMPORT_SFUNC(_aullmul, 4),
4119 IMPORT_SFUNC(_aulldiv, 4),
4120 IMPORT_SFUNC(_aullrem, 4),
4121 IMPORT_RFUNC(_aullshr, 0),
4122 IMPORT_RFUNC(_aullshl, 0),
4123 IMPORT_CFUNC(atoi, 0),
4124 IMPORT_CFUNC(atol, 0),
4125 IMPORT_CFUNC(rand, 0),
4126 IMPORT_CFUNC(srand, 0),
4127 IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
4128 IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
4129 IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
4130 IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
4131 IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
4132 IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
4133 IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
4134 IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
4135 IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
4136 IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
4137 IMPORT_FFUNC(InterlockedPopEntrySList, 1),
4138 IMPORT_FFUNC(InitializeSListHead, 1),
4139 IMPORT_FFUNC(InterlockedPushEntrySList, 2),
4140 IMPORT_SFUNC(ExQueryDepthSList, 1),
4141 IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
4142 InterlockedPopEntrySList, 1),
4143 IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
4144 InterlockedPushEntrySList, 2),
4145 IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
4146 IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
4147 IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
4148 IMPORT_SFUNC(ExFreePoolWithTag, 2),
4149 IMPORT_SFUNC(ExFreePool, 1),
4150 #ifdef __i386__
4151 IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
4152 IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
4153 IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
4154 #else
4155 /*
4156 * For AMD64, we can get away with just mapping
4157 * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
4158 * because the calling conventions end up being the same.
4159 * On i386, we have to be careful because KfAcquireSpinLock()
4160 * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
4161 */
4162 IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
4163 IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
4164 IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
4165 #endif
4166 IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
4167 IMPORT_FFUNC(InterlockedIncrement, 1),
4168 IMPORT_FFUNC(InterlockedDecrement, 1),
4169 IMPORT_FFUNC(InterlockedExchange, 2),
4170 IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
4171 IMPORT_SFUNC(IoAllocateMdl, 5),
4172 IMPORT_SFUNC(IoFreeMdl, 1),
4173 IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
4174 IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
4175 IMPORT_SFUNC(MmFreeContiguousMemory, 1),
4176 IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
4177 IMPORT_SFUNC(MmSizeOfMdl, 1),
4178 IMPORT_SFUNC(MmMapLockedPages, 2),
4179 IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
4180 IMPORT_SFUNC(MmUnmapLockedPages, 2),
4181 IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
4182 IMPORT_SFUNC(MmGetPhysicalAddress, 1),
4183 IMPORT_SFUNC(MmGetSystemRoutineAddress, 1),
4184 IMPORT_SFUNC(MmIsAddressValid, 1),
4185 IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
4186 IMPORT_SFUNC(MmUnmapIoSpace, 2),
4187 IMPORT_SFUNC(KeInitializeSpinLock, 1),
4188 IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
4189 IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4),
4190 IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
4191 IMPORT_SFUNC(IoGetDeviceProperty, 5),
4192 IMPORT_SFUNC(IoAllocateWorkItem, 1),
4193 IMPORT_SFUNC(IoFreeWorkItem, 1),
4194 IMPORT_SFUNC(IoQueueWorkItem, 4),
4195 IMPORT_SFUNC(ExQueueWorkItem, 2),
4196 IMPORT_SFUNC(ntoskrnl_workitem, 2),
4197 IMPORT_SFUNC(KeInitializeMutex, 2),
4198 IMPORT_SFUNC(KeReleaseMutex, 2),
4199 IMPORT_SFUNC(KeReadStateMutex, 1),
4200 IMPORT_SFUNC(KeInitializeEvent, 3),
4201 IMPORT_SFUNC(KeSetEvent, 3),
4202 IMPORT_SFUNC(KeResetEvent, 1),
4203 IMPORT_SFUNC(KeClearEvent, 1),
4204 IMPORT_SFUNC(KeReadStateEvent, 1),
4205 IMPORT_SFUNC(KeInitializeTimer, 1),
4206 IMPORT_SFUNC(KeInitializeTimerEx, 2),
4207 IMPORT_SFUNC(KeSetTimer, 3),
4208 IMPORT_SFUNC(KeSetTimerEx, 4),
4209 IMPORT_SFUNC(KeCancelTimer, 1),
4210 IMPORT_SFUNC(KeReadStateTimer, 1),
4211 IMPORT_SFUNC(KeInitializeDpc, 3),
4212 IMPORT_SFUNC(KeInsertQueueDpc, 3),
4213 IMPORT_SFUNC(KeRemoveQueueDpc, 1),
4214 IMPORT_SFUNC(KeSetImportanceDpc, 2),
4215 IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
4216 IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
4217 IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
4218 IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
4219 IMPORT_FFUNC(ObfDereferenceObject, 1),
4220 IMPORT_SFUNC(ZwClose, 1),
4221 IMPORT_SFUNC(PsCreateSystemThread, 7),
4222 IMPORT_SFUNC(PsTerminateSystemThread, 1),
4223 IMPORT_SFUNC(IoWMIRegistrationControl, 2),
4224 IMPORT_SFUNC(WmiQueryTraceInformation, 5),
4225 IMPORT_CFUNC(WmiTraceMessage, 0),
4226 IMPORT_SFUNC(KeQuerySystemTime, 1),
4227 IMPORT_CFUNC(KeTickCount, 0),
4228 IMPORT_SFUNC(KeDelayExecutionThread, 3),
4229 IMPORT_SFUNC(KeQueryInterruptTime, 0),
4230 IMPORT_SFUNC(KeGetCurrentThread, 0),
4231 IMPORT_SFUNC(KeSetPriorityThread, 2),
4232
4233 /*
4234 * This last entry is a catch-all for any function we haven't
4235 * implemented yet. The PE import list patching routine will
4236 * use it for any function that doesn't have an explicit match
4237 * in this table.
4238 */
4239
4240 { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
4241
4242 /* End of list. */
4243
4244 { NULL, NULL, NULL }
4245 };
Cache object: 425d5cbd993222e53d33c9489dcb7af6
|