The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/ndis/subr_ntoskrnl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003
    3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/10.4/sys/compat/ndis/subr_ntoskrnl.c 314667 2017-03-04 13:03:31Z avg $");
   35 
   36 #include <sys/ctype.h>
   37 #include <sys/unistd.h>
   38 #include <sys/param.h>
   39 #include <sys/types.h>
   40 #include <sys/errno.h>
   41 #include <sys/systm.h>
   42 #include <sys/malloc.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 
   46 #include <sys/callout.h>
   47 #include <sys/kdb.h>
   48 #include <sys/kernel.h>
   49 #include <sys/proc.h>
   50 #include <sys/condvar.h>
   51 #include <sys/kthread.h>
   52 #include <sys/module.h>
   53 #include <sys/smp.h>
   54 #include <sys/sched.h>
   55 #include <sys/sysctl.h>
   56 
   57 #include <machine/atomic.h>
   58 #include <machine/bus.h>
   59 #include <machine/stdarg.h>
   60 #include <machine/resource.h>
   61 
   62 #include <sys/bus.h>
   63 #include <sys/rman.h>
   64 
   65 #include <vm/vm.h>
   66 #include <vm/vm_param.h>
   67 #include <vm/pmap.h>
   68 #include <vm/uma.h>
   69 #include <vm/vm_kern.h>
   70 #include <vm/vm_map.h>
   71 #include <vm/vm_extern.h>
   72 
   73 #include <compat/ndis/pe_var.h>
   74 #include <compat/ndis/cfg_var.h>
   75 #include <compat/ndis/resource_var.h>
   76 #include <compat/ndis/ntoskrnl_var.h>
   77 #include <compat/ndis/hal_var.h>
   78 #include <compat/ndis/ndis_var.h>
   79 
   80 #ifdef NTOSKRNL_DEBUG_TIMERS
   81 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
   82 
   83 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLTYPE_INT | CTLFLAG_RW,
   84     NULL, 0, sysctl_show_timers, "I",
   85     "Show ntoskrnl timer stats");
   86 #endif
   87 
   88 struct kdpc_queue {
   89         list_entry              kq_disp;
   90         struct thread           *kq_td;
   91         int                     kq_cpu;
   92         int                     kq_exit;
   93         int                     kq_running;
   94         kspin_lock              kq_lock;
   95         nt_kevent               kq_proc;
   96         nt_kevent               kq_done;
   97 };
   98 
   99 typedef struct kdpc_queue kdpc_queue;
  100 
  101 struct wb_ext {
  102         struct cv               we_cv;
  103         struct thread           *we_td;
  104 };
  105 
  106 typedef struct wb_ext wb_ext;
  107 
  108 #define NTOSKRNL_TIMEOUTS       256
  109 #ifdef NTOSKRNL_DEBUG_TIMERS
  110 static uint64_t ntoskrnl_timer_fires;
  111 static uint64_t ntoskrnl_timer_sets;
  112 static uint64_t ntoskrnl_timer_reloads;
  113 static uint64_t ntoskrnl_timer_cancels;
  114 #endif
  115 
  116 struct callout_entry {
  117         struct callout          ce_callout;
  118         list_entry              ce_list;
  119 };
  120 
  121 typedef struct callout_entry callout_entry;
  122 
  123 static struct list_entry ntoskrnl_calllist;
  124 static struct mtx ntoskrnl_calllock;
  125 struct kuser_shared_data kuser_shared_data;
  126 
  127 static struct list_entry ntoskrnl_intlist;
  128 static kspin_lock ntoskrnl_intlock;
  129 
  130 static uint8_t RtlEqualUnicodeString(unicode_string *,
  131         unicode_string *, uint8_t);
  132 static void RtlCopyString(ansi_string *, const ansi_string *);
  133 static void RtlCopyUnicodeString(unicode_string *,
  134         unicode_string *);
  135 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
  136          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
  137 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
  138         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
  139 static irp *IoBuildDeviceIoControlRequest(uint32_t,
  140         device_object *, void *, uint32_t, void *, uint32_t,
  141         uint8_t, nt_kevent *, io_status_block *);
  142 static irp *IoAllocateIrp(uint8_t, uint8_t);
  143 static void IoReuseIrp(irp *, uint32_t);
  144 static void IoFreeIrp(irp *);
  145 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
  146 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
  147 static uint32_t KeWaitForMultipleObjects(uint32_t,
  148         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
  149         int64_t *, wait_block *);
  150 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
  151 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
  152 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
  153 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
  154 static void ntoskrnl_insert_timer(ktimer *, int);
  155 static void ntoskrnl_remove_timer(ktimer *);
  156 #ifdef NTOSKRNL_DEBUG_TIMERS
  157 static void ntoskrnl_show_timers(void);
  158 #endif
  159 static void ntoskrnl_timercall(void *);
  160 static void ntoskrnl_dpc_thread(void *);
  161 static void ntoskrnl_destroy_dpc_threads(void);
  162 static void ntoskrnl_destroy_workitem_threads(void);
  163 static void ntoskrnl_workitem_thread(void *);
  164 static void ntoskrnl_workitem(device_object *, void *);
  165 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
  166 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
  167 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
  168 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
  169 static uint16_t READ_REGISTER_USHORT(uint16_t *);
  170 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
  171 static uint32_t READ_REGISTER_ULONG(uint32_t *);
  172 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
  173 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
  174 static int64_t _allmul(int64_t, int64_t);
  175 static int64_t _alldiv(int64_t, int64_t);
  176 static int64_t _allrem(int64_t, int64_t);
  177 static int64_t _allshr(int64_t, uint8_t);
  178 static int64_t _allshl(int64_t, uint8_t);
  179 static uint64_t _aullmul(uint64_t, uint64_t);
  180 static uint64_t _aulldiv(uint64_t, uint64_t);
  181 static uint64_t _aullrem(uint64_t, uint64_t);
  182 static uint64_t _aullshr(uint64_t, uint8_t);
  183 static uint64_t _aullshl(uint64_t, uint8_t);
  184 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
  185 static void InitializeSListHead(slist_header *);
  186 static slist_entry *ntoskrnl_popsl(slist_header *);
  187 static void ExFreePoolWithTag(void *, uint32_t);
  188 static void ExInitializePagedLookasideList(paged_lookaside_list *,
  189         lookaside_alloc_func *, lookaside_free_func *,
  190         uint32_t, size_t, uint32_t, uint16_t);
  191 static void ExDeletePagedLookasideList(paged_lookaside_list *);
  192 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
  193         lookaside_alloc_func *, lookaside_free_func *,
  194         uint32_t, size_t, uint32_t, uint16_t);
  195 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
  196 static slist_entry
  197         *ExInterlockedPushEntrySList(slist_header *,
  198         slist_entry *, kspin_lock *);
  199 static slist_entry
  200         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
  201 static uint32_t InterlockedIncrement(volatile uint32_t *);
  202 static uint32_t InterlockedDecrement(volatile uint32_t *);
  203 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
  204 static void *MmAllocateContiguousMemory(uint32_t, uint64_t);
  205 static void *MmAllocateContiguousMemorySpecifyCache(uint32_t,
  206         uint64_t, uint64_t, uint64_t, enum nt_caching_type);
  207 static void MmFreeContiguousMemory(void *);
  208 static void MmFreeContiguousMemorySpecifyCache(void *, uint32_t,
  209         enum nt_caching_type);
  210 static uint32_t MmSizeOfMdl(void *, size_t);
  211 static void *MmMapLockedPages(mdl *, uint8_t);
  212 static void *MmMapLockedPagesSpecifyCache(mdl *,
  213         uint8_t, uint32_t, void *, uint32_t, uint32_t);
  214 static void MmUnmapLockedPages(void *, mdl *);
  215 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
  216 static void RtlZeroMemory(void *, size_t);
  217 static void RtlSecureZeroMemory(void *, size_t);
  218 static void RtlFillMemory(void *, size_t, uint8_t);
  219 static void RtlMoveMemory(void *, const void *, size_t);
  220 static ndis_status RtlCharToInteger(const char *, uint32_t, uint32_t *);
  221 static void RtlCopyMemory(void *, const void *, size_t);
  222 static size_t RtlCompareMemory(const void *, const void *, size_t);
  223 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
  224         uint32_t, uint32_t *);
  225 static int atoi (const char *);
  226 static long atol (const char *);
  227 static int rand(void);
  228 static void srand(unsigned int);
  229 static void KeQuerySystemTime(uint64_t *);
  230 static uint32_t KeTickCount(void);
  231 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
  232 static int32_t IoOpenDeviceRegistryKey(struct device_object *, uint32_t,
  233     uint32_t, void **);
  234 static void ntoskrnl_thrfunc(void *);
  235 static ndis_status PsCreateSystemThread(ndis_handle *,
  236         uint32_t, void *, ndis_handle, void *, void *, void *);
  237 static ndis_status PsTerminateSystemThread(ndis_status);
  238 static ndis_status IoGetDeviceObjectPointer(unicode_string *,
  239         uint32_t, void *, device_object *);
  240 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
  241         uint32_t, void *, uint32_t *);
  242 static void KeInitializeMutex(kmutant *, uint32_t);
  243 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
  244 static uint32_t KeReadStateMutex(kmutant *);
  245 static ndis_status ObReferenceObjectByHandle(ndis_handle,
  246         uint32_t, void *, uint8_t, void **, void **);
  247 static void ObfDereferenceObject(void *);
  248 static uint32_t ZwClose(ndis_handle);
  249 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
  250         uint32_t, void *);
  251 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
  252 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
  253 static void *ntoskrnl_memset(void *, int, size_t);
  254 static void *ntoskrnl_memmove(void *, void *, size_t);
  255 static void *ntoskrnl_memchr(void *, unsigned char, size_t);
  256 static char *ntoskrnl_strstr(char *, char *);
  257 static char *ntoskrnl_strncat(char *, char *, size_t);
  258 static int ntoskrnl_toupper(int);
  259 static int ntoskrnl_tolower(int);
  260 static funcptr ntoskrnl_findwrap(funcptr);
  261 static uint32_t DbgPrint(char *, ...);
  262 static void DbgBreakPoint(void);
  263 static void KeBugCheckEx(uint32_t, u_long, u_long, u_long, u_long);
  264 static int32_t KeDelayExecutionThread(uint8_t, uint8_t, int64_t *);
  265 static int32_t KeSetPriorityThread(struct thread *, int32_t);
  266 static void dummy(void);
  267 
  268 static struct mtx ntoskrnl_dispatchlock;
  269 static struct mtx ntoskrnl_interlock;
  270 static kspin_lock ntoskrnl_cancellock;
  271 static int ntoskrnl_kth = 0;
  272 static struct nt_objref_head ntoskrnl_reflist;
  273 static uma_zone_t mdl_zone;
  274 static uma_zone_t iw_zone;
  275 static struct kdpc_queue *kq_queues;
  276 static struct kdpc_queue *wq_queues;
  277 static int wq_idx = 0;
  278 
  279 int
  280 ntoskrnl_libinit()
  281 {
  282         image_patch_table       *patch;
  283         int                     error;
  284         struct proc             *p;
  285         kdpc_queue              *kq;
  286         callout_entry           *e;
  287         int                     i;
  288 
  289         mtx_init(&ntoskrnl_dispatchlock,
  290             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
  291         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  292         KeInitializeSpinLock(&ntoskrnl_cancellock);
  293         KeInitializeSpinLock(&ntoskrnl_intlock);
  294         TAILQ_INIT(&ntoskrnl_reflist);
  295 
  296         InitializeListHead(&ntoskrnl_calllist);
  297         InitializeListHead(&ntoskrnl_intlist);
  298         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  299 
  300         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
  301 #ifdef NTOSKRNL_MULTIPLE_DPCS
  302             sizeof(kdpc_queue) * mp_ncpus, 0);
  303 #else
  304             sizeof(kdpc_queue), 0);
  305 #endif
  306 
  307         if (kq_queues == NULL)
  308                 return (ENOMEM);
  309 
  310         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
  311             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
  312 
  313         if (wq_queues == NULL)
  314                 return (ENOMEM);
  315 
  316 #ifdef NTOSKRNL_MULTIPLE_DPCS
  317         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
  318 #else
  319         bzero((char *)kq_queues, sizeof(kdpc_queue));
  320 #endif
  321         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
  322 
  323         /*
  324          * Launch the DPC threads.
  325          */
  326 
  327 #ifdef NTOSKRNL_MULTIPLE_DPCS
  328         for (i = 0; i < mp_ncpus; i++) {
  329 #else
  330         for (i = 0; i < 1; i++) {
  331 #endif
  332                 kq = kq_queues + i;
  333                 kq->kq_cpu = i;
  334                 error = kproc_create(ntoskrnl_dpc_thread, kq, &p,
  335                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows DPC %d", i);
  336                 if (error)
  337                         panic("failed to launch DPC thread");
  338         }
  339 
  340         /*
  341          * Launch the workitem threads.
  342          */
  343 
  344         for (i = 0; i < WORKITEM_THREADS; i++) {
  345                 kq = wq_queues + i;
  346                 error = kproc_create(ntoskrnl_workitem_thread, kq, &p,
  347                     RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Workitem %d", i);
  348                 if (error)
  349                         panic("failed to launch workitem thread");
  350         }
  351 
  352         patch = ntoskrnl_functbl;
  353         while (patch->ipt_func != NULL) {
  354                 windrv_wrap((funcptr)patch->ipt_func,
  355                     (funcptr *)&patch->ipt_wrap,
  356                     patch->ipt_argcnt, patch->ipt_ftype);
  357                 patch++;
  358         }
  359 
  360         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
  361                 e = ExAllocatePoolWithTag(NonPagedPool,
  362                     sizeof(callout_entry), 0);
  363                 if (e == NULL)
  364                         panic("failed to allocate timeouts");
  365                 mtx_lock_spin(&ntoskrnl_calllock);
  366                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
  367                 mtx_unlock_spin(&ntoskrnl_calllock);
  368         }
  369 
  370         /*
  371          * MDLs are supposed to be variable size (they describe
  372          * buffers containing some number of pages, but we don't
  373          * know ahead of time how many pages that will be). But
  374          * always allocating them off the heap is very slow. As
  375          * a compromise, we create an MDL UMA zone big enough to
  376          * handle any buffer requiring up to 16 pages, and we
  377          * use those for any MDLs for buffers of 16 pages or less
  378          * in size. For buffers larger than that (which we assume
  379          * will be few and far between, we allocate the MDLs off
  380          * the heap.
  381          */
  382 
  383         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
  384             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  385 
  386         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
  387             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  388 
  389         return (0);
  390 }
  391 
  392 int
  393 ntoskrnl_libfini()
  394 {
  395         image_patch_table       *patch;
  396         callout_entry           *e;
  397         list_entry              *l;
  398 
  399         patch = ntoskrnl_functbl;
  400         while (patch->ipt_func != NULL) {
  401                 windrv_unwrap(patch->ipt_wrap);
  402                 patch++;
  403         }
  404 
  405         /* Stop the workitem queues. */
  406         ntoskrnl_destroy_workitem_threads();
  407         /* Stop the DPC queues. */
  408         ntoskrnl_destroy_dpc_threads();
  409 
  410         ExFreePool(kq_queues);
  411         ExFreePool(wq_queues);
  412 
  413         uma_zdestroy(mdl_zone);
  414         uma_zdestroy(iw_zone);
  415 
  416         mtx_lock_spin(&ntoskrnl_calllock);
  417         while(!IsListEmpty(&ntoskrnl_calllist)) {
  418                 l = RemoveHeadList(&ntoskrnl_calllist);
  419                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
  420                 mtx_unlock_spin(&ntoskrnl_calllock);
  421                 ExFreePool(e);
  422                 mtx_lock_spin(&ntoskrnl_calllock);
  423         }
  424         mtx_unlock_spin(&ntoskrnl_calllock);
  425 
  426         mtx_destroy(&ntoskrnl_dispatchlock);
  427         mtx_destroy(&ntoskrnl_interlock);
  428         mtx_destroy(&ntoskrnl_calllock);
  429 
  430         return (0);
  431 }
  432 
  433 /*
  434  * We need to be able to reference this externally from the wrapper;
  435  * GCC only generates a local implementation of memset.
  436  */
  437 static void *
  438 ntoskrnl_memset(buf, ch, size)
  439         void                    *buf;
  440         int                     ch;
  441         size_t                  size;
  442 {
  443         return (memset(buf, ch, size));
  444 }
  445 
  446 static void *
  447 ntoskrnl_memmove(dst, src, size)
  448         void                    *src;
  449         void                    *dst;
  450         size_t                  size;
  451 {
  452         bcopy(src, dst, size);
  453         return (dst);
  454 }
  455 
  456 static void *
  457 ntoskrnl_memchr(void *buf, unsigned char ch, size_t len)
  458 {
  459         if (len != 0) {
  460                 unsigned char *p = buf;
  461 
  462                 do {
  463                         if (*p++ == ch)
  464                                 return (p - 1);
  465                 } while (--len != 0);
  466         }
  467         return (NULL);
  468 }
  469 
  470 static char *
  471 ntoskrnl_strstr(s, find)
  472         char *s, *find;
  473 {
  474         char c, sc;
  475         size_t len;
  476 
  477         if ((c = *find++) != 0) {
  478                 len = strlen(find);
  479                 do {
  480                         do {
  481                                 if ((sc = *s++) == 0)
  482                                         return (NULL);
  483                         } while (sc != c);
  484                 } while (strncmp(s, find, len) != 0);
  485                 s--;
  486         }
  487         return ((char *)s);
  488 }
  489 
  490 /* Taken from libc */
  491 static char *
  492 ntoskrnl_strncat(dst, src, n)
  493         char            *dst;
  494         char            *src;
  495         size_t          n;
  496 {
  497         if (n != 0) {
  498                 char *d = dst;
  499                 const char *s = src;
  500 
  501                 while (*d != 0)
  502                         d++;
  503                 do {
  504                         if ((*d = *s++) == 0)
  505                                 break;
  506                         d++;
  507                 } while (--n != 0);
  508                 *d = 0;
  509         }
  510         return (dst);
  511 }
  512 
  513 static int
  514 ntoskrnl_toupper(c)
  515         int                     c;
  516 {
  517         return (toupper(c));
  518 }
  519 
  520 static int
  521 ntoskrnl_tolower(c)
  522         int                     c;
  523 {
  524         return (tolower(c));
  525 }
  526 
  527 static uint8_t
  528 RtlEqualUnicodeString(unicode_string *str1, unicode_string *str2,
  529         uint8_t caseinsensitive)
  530 {
  531         int                     i;
  532 
  533         if (str1->us_len != str2->us_len)
  534                 return (FALSE);
  535 
  536         for (i = 0; i < str1->us_len; i++) {
  537                 if (caseinsensitive == TRUE) {
  538                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
  539                             toupper((char)(str2->us_buf[i] & 0xFF)))
  540                                 return (FALSE);
  541                 } else {
  542                         if (str1->us_buf[i] != str2->us_buf[i])
  543                                 return (FALSE);
  544                 }
  545         }
  546 
  547         return (TRUE);
  548 }
  549 
  550 static void
  551 RtlCopyString(dst, src)
  552         ansi_string             *dst;
  553         const ansi_string       *src;
  554 {
  555         if (src != NULL && src->as_buf != NULL && dst->as_buf != NULL) {
  556                 dst->as_len = min(src->as_len, dst->as_maxlen);
  557                 memcpy(dst->as_buf, src->as_buf, dst->as_len);
  558                 if (dst->as_len < dst->as_maxlen)
  559                         dst->as_buf[dst->as_len] = 0;
  560         } else
  561                 dst->as_len = 0;
  562 }
  563 
  564 static void
  565 RtlCopyUnicodeString(dest, src)
  566         unicode_string          *dest;
  567         unicode_string          *src;
  568 {
  569 
  570         if (dest->us_maxlen >= src->us_len)
  571                 dest->us_len = src->us_len;
  572         else
  573                 dest->us_len = dest->us_maxlen;
  574         memcpy(dest->us_buf, src->us_buf, dest->us_len);
  575 }
  576 
  577 static void
  578 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
  579         char                    *ascii;
  580         uint16_t                *unicode;
  581         int                     len;
  582 {
  583         int                     i;
  584         uint16_t                *ustr;
  585 
  586         ustr = unicode;
  587         for (i = 0; i < len; i++) {
  588                 *ustr = (uint16_t)ascii[i];
  589                 ustr++;
  590         }
  591 }
  592 
  593 static void
  594 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
  595         uint16_t                *unicode;
  596         char                    *ascii;
  597         int                     len;
  598 {
  599         int                     i;
  600         uint8_t                 *astr;
  601 
  602         astr = ascii;
  603         for (i = 0; i < len / 2; i++) {
  604                 *astr = (uint8_t)unicode[i];
  605                 astr++;
  606         }
  607 }
  608 
  609 uint32_t
  610 RtlUnicodeStringToAnsiString(ansi_string *dest, unicode_string *src, uint8_t allocate)
  611 {
  612         if (dest == NULL || src == NULL)
  613                 return (STATUS_INVALID_PARAMETER);
  614 
  615         dest->as_len = src->us_len / 2;
  616         if (dest->as_maxlen < dest->as_len)
  617                 dest->as_len = dest->as_maxlen;
  618 
  619         if (allocate == TRUE) {
  620                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
  621                     (src->us_len / 2) + 1, 0);
  622                 if (dest->as_buf == NULL)
  623                         return (STATUS_INSUFFICIENT_RESOURCES);
  624                 dest->as_len = dest->as_maxlen = src->us_len / 2;
  625         } else {
  626                 dest->as_len = src->us_len / 2; /* XXX */
  627                 if (dest->as_maxlen < dest->as_len)
  628                         dest->as_len = dest->as_maxlen;
  629         }
  630 
  631         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
  632             dest->as_len * 2);
  633 
  634         return (STATUS_SUCCESS);
  635 }
  636 
  637 uint32_t
  638 RtlAnsiStringToUnicodeString(unicode_string *dest, ansi_string *src,
  639         uint8_t allocate)
  640 {
  641         if (dest == NULL || src == NULL)
  642                 return (STATUS_INVALID_PARAMETER);
  643 
  644         if (allocate == TRUE) {
  645                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
  646                     src->as_len * 2, 0);
  647                 if (dest->us_buf == NULL)
  648                         return (STATUS_INSUFFICIENT_RESOURCES);
  649                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
  650         } else {
  651                 dest->us_len = src->as_len * 2; /* XXX */
  652                 if (dest->us_maxlen < dest->us_len)
  653                         dest->us_len = dest->us_maxlen;
  654         }
  655 
  656         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
  657             dest->us_len / 2);
  658 
  659         return (STATUS_SUCCESS);
  660 }
  661 
  662 void *
  663 ExAllocatePoolWithTag(pooltype, len, tag)
  664         uint32_t                pooltype;
  665         size_t                  len;
  666         uint32_t                tag;
  667 {
  668         void                    *buf;
  669 
  670         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
  671         if (buf == NULL)
  672                 return (NULL);
  673 
  674         return (buf);
  675 }
  676 
  677 static void
  678 ExFreePoolWithTag(buf, tag)
  679         void            *buf;
  680         uint32_t        tag;
  681 {
  682         ExFreePool(buf);
  683 }
  684 
  685 void
  686 ExFreePool(buf)
  687         void                    *buf;
  688 {
  689         free(buf, M_DEVBUF);
  690 }
  691 
  692 uint32_t
  693 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
  694         driver_object           *drv;
  695         void                    *clid;
  696         uint32_t                extlen;
  697         void                    **ext;
  698 {
  699         custom_extension        *ce;
  700 
  701         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
  702             + extlen, 0);
  703 
  704         if (ce == NULL)
  705                 return (STATUS_INSUFFICIENT_RESOURCES);
  706 
  707         ce->ce_clid = clid;
  708         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
  709 
  710         *ext = (void *)(ce + 1);
  711 
  712         return (STATUS_SUCCESS);
  713 }
  714 
  715 void *
  716 IoGetDriverObjectExtension(drv, clid)
  717         driver_object           *drv;
  718         void                    *clid;
  719 {
  720         list_entry              *e;
  721         custom_extension        *ce;
  722 
  723         /*
  724          * Sanity check. Our dummy bus drivers don't have
  725          * any driver extentions.
  726          */
  727 
  728         if (drv->dro_driverext == NULL)
  729                 return (NULL);
  730 
  731         e = drv->dro_driverext->dre_usrext.nle_flink;
  732         while (e != &drv->dro_driverext->dre_usrext) {
  733                 ce = (custom_extension *)e;
  734                 if (ce->ce_clid == clid)
  735                         return ((void *)(ce + 1));
  736                 e = e->nle_flink;
  737         }
  738 
  739         return (NULL);
  740 }
  741 
  742 
  743 uint32_t
  744 IoCreateDevice(driver_object *drv, uint32_t devextlen, unicode_string *devname,
  745         uint32_t devtype, uint32_t devchars, uint8_t exclusive,
  746         device_object **newdev)
  747 {
  748         device_object           *dev;
  749 
  750         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
  751         if (dev == NULL)
  752                 return (STATUS_INSUFFICIENT_RESOURCES);
  753 
  754         dev->do_type = devtype;
  755         dev->do_drvobj = drv;
  756         dev->do_currirp = NULL;
  757         dev->do_flags = 0;
  758 
  759         if (devextlen) {
  760                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
  761                     devextlen, 0);
  762 
  763                 if (dev->do_devext == NULL) {
  764                         ExFreePool(dev);
  765                         return (STATUS_INSUFFICIENT_RESOURCES);
  766                 }
  767 
  768                 bzero(dev->do_devext, devextlen);
  769         } else
  770                 dev->do_devext = NULL;
  771 
  772         dev->do_size = sizeof(device_object) + devextlen;
  773         dev->do_refcnt = 1;
  774         dev->do_attacheddev = NULL;
  775         dev->do_nextdev = NULL;
  776         dev->do_devtype = devtype;
  777         dev->do_stacksize = 1;
  778         dev->do_alignreq = 1;
  779         dev->do_characteristics = devchars;
  780         dev->do_iotimer = NULL;
  781         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
  782 
  783         /*
  784          * Vpd is used for disk/tape devices,
  785          * but we don't support those. (Yet.)
  786          */
  787         dev->do_vpb = NULL;
  788 
  789         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
  790             sizeof(devobj_extension), 0);
  791 
  792         if (dev->do_devobj_ext == NULL) {
  793                 if (dev->do_devext != NULL)
  794                         ExFreePool(dev->do_devext);
  795                 ExFreePool(dev);
  796                 return (STATUS_INSUFFICIENT_RESOURCES);
  797         }
  798 
  799         dev->do_devobj_ext->dve_type = 0;
  800         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
  801         dev->do_devobj_ext->dve_devobj = dev;
  802 
  803         /*
  804          * Attach this device to the driver object's list
  805          * of devices. Note: this is not the same as attaching
  806          * the device to the device stack. The driver's AddDevice
  807          * routine must explicitly call IoAddDeviceToDeviceStack()
  808          * to do that.
  809          */
  810 
  811         if (drv->dro_devobj == NULL) {
  812                 drv->dro_devobj = dev;
  813                 dev->do_nextdev = NULL;
  814         } else {
  815                 dev->do_nextdev = drv->dro_devobj;
  816                 drv->dro_devobj = dev;
  817         }
  818 
  819         *newdev = dev;
  820 
  821         return (STATUS_SUCCESS);
  822 }
  823 
  824 void
  825 IoDeleteDevice(dev)
  826         device_object           *dev;
  827 {
  828         device_object           *prev;
  829 
  830         if (dev == NULL)
  831                 return;
  832 
  833         if (dev->do_devobj_ext != NULL)
  834                 ExFreePool(dev->do_devobj_ext);
  835 
  836         if (dev->do_devext != NULL)
  837                 ExFreePool(dev->do_devext);
  838 
  839         /* Unlink the device from the driver's device list. */
  840 
  841         prev = dev->do_drvobj->dro_devobj;
  842         if (prev == dev)
  843                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
  844         else {
  845                 while (prev->do_nextdev != dev)
  846                         prev = prev->do_nextdev;
  847                 prev->do_nextdev = dev->do_nextdev;
  848         }
  849 
  850         ExFreePool(dev);
  851 }
  852 
  853 device_object *
  854 IoGetAttachedDevice(dev)
  855         device_object           *dev;
  856 {
  857         device_object           *d;
  858 
  859         if (dev == NULL)
  860                 return (NULL);
  861 
  862         d = dev;
  863 
  864         while (d->do_attacheddev != NULL)
  865                 d = d->do_attacheddev;
  866 
  867         return (d);
  868 }
  869 
  870 static irp *
  871 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
  872         uint32_t                func;
  873         device_object           *dobj;
  874         void                    *buf;
  875         uint32_t                len;
  876         uint64_t                *off;
  877         nt_kevent               *event;
  878         io_status_block         *status;
  879 {
  880         irp                     *ip;
  881 
  882         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
  883         if (ip == NULL)
  884                 return (NULL);
  885         ip->irp_usrevent = event;
  886 
  887         return (ip);
  888 }
  889 
  890 static irp *
  891 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
  892         uint32_t                func;
  893         device_object           *dobj;
  894         void                    *buf;
  895         uint32_t                len;
  896         uint64_t                *off;
  897         io_status_block         *status;
  898 {
  899         irp                     *ip;
  900         io_stack_location       *sl;
  901 
  902         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  903         if (ip == NULL)
  904                 return (NULL);
  905 
  906         ip->irp_usriostat = status;
  907         ip->irp_tail.irp_overlay.irp_thread = NULL;
  908 
  909         sl = IoGetNextIrpStackLocation(ip);
  910         sl->isl_major = func;
  911         sl->isl_minor = 0;
  912         sl->isl_flags = 0;
  913         sl->isl_ctl = 0;
  914         sl->isl_devobj = dobj;
  915         sl->isl_fileobj = NULL;
  916         sl->isl_completionfunc = NULL;
  917 
  918         ip->irp_userbuf = buf;
  919 
  920         if (dobj->do_flags & DO_BUFFERED_IO) {
  921                 ip->irp_assoc.irp_sysbuf =
  922                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
  923                 if (ip->irp_assoc.irp_sysbuf == NULL) {
  924                         IoFreeIrp(ip);
  925                         return (NULL);
  926                 }
  927                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
  928         }
  929 
  930         if (dobj->do_flags & DO_DIRECT_IO) {
  931                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
  932                 if (ip->irp_mdl == NULL) {
  933                         if (ip->irp_assoc.irp_sysbuf != NULL)
  934                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
  935                         IoFreeIrp(ip);
  936                         return (NULL);
  937                 }
  938                 ip->irp_userbuf = NULL;
  939                 ip->irp_assoc.irp_sysbuf = NULL;
  940         }
  941 
  942         if (func == IRP_MJ_READ) {
  943                 sl->isl_parameters.isl_read.isl_len = len;
  944                 if (off != NULL)
  945                         sl->isl_parameters.isl_read.isl_byteoff = *off;
  946                 else
  947                         sl->isl_parameters.isl_read.isl_byteoff = 0;
  948         }
  949 
  950         if (func == IRP_MJ_WRITE) {
  951                 sl->isl_parameters.isl_write.isl_len = len;
  952                 if (off != NULL)
  953                         sl->isl_parameters.isl_write.isl_byteoff = *off;
  954                 else
  955                         sl->isl_parameters.isl_write.isl_byteoff = 0;
  956         }
  957 
  958         return (ip);
  959 }
  960 
  961 static irp *
  962 IoBuildDeviceIoControlRequest(uint32_t iocode, device_object *dobj, void *ibuf,
  963         uint32_t ilen, void *obuf, uint32_t olen, uint8_t isinternal,
  964         nt_kevent *event, io_status_block *status)
  965 {
  966         irp                     *ip;
  967         io_stack_location       *sl;
  968         uint32_t                buflen;
  969 
  970         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  971         if (ip == NULL)
  972                 return (NULL);
  973         ip->irp_usrevent = event;
  974         ip->irp_usriostat = status;
  975         ip->irp_tail.irp_overlay.irp_thread = NULL;
  976 
  977         sl = IoGetNextIrpStackLocation(ip);
  978         sl->isl_major = isinternal == TRUE ?
  979             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
  980         sl->isl_minor = 0;
  981         sl->isl_flags = 0;
  982         sl->isl_ctl = 0;
  983         sl->isl_devobj = dobj;
  984         sl->isl_fileobj = NULL;
  985         sl->isl_completionfunc = NULL;
  986         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
  987         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
  988         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
  989 
  990         switch(IO_METHOD(iocode)) {
  991         case METHOD_BUFFERED:
  992                 if (ilen > olen)
  993                         buflen = ilen;
  994                 else
  995                         buflen = olen;
  996                 if (buflen) {
  997                         ip->irp_assoc.irp_sysbuf =
  998                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
  999                         if (ip->irp_assoc.irp_sysbuf == NULL) {
 1000                                 IoFreeIrp(ip);
 1001                                 return (NULL);
 1002                         }
 1003                 }
 1004                 if (ilen && ibuf != NULL) {
 1005                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
 1006                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
 1007                             buflen - ilen);
 1008                 } else
 1009                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
 1010                 ip->irp_userbuf = obuf;
 1011                 break;
 1012         case METHOD_IN_DIRECT:
 1013         case METHOD_OUT_DIRECT:
 1014                 if (ilen && ibuf != NULL) {
 1015                         ip->irp_assoc.irp_sysbuf =
 1016                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
 1017                         if (ip->irp_assoc.irp_sysbuf == NULL) {
 1018                                 IoFreeIrp(ip);
 1019                                 return (NULL);
 1020                         }
 1021                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
 1022                 }
 1023                 if (olen && obuf != NULL) {
 1024                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
 1025                             FALSE, FALSE, ip);
 1026                         /*
 1027                          * Normally we would MmProbeAndLockPages()
 1028                          * here, but we don't have to in our
 1029                          * imlementation.
 1030                          */
 1031                 }
 1032                 break;
 1033         case METHOD_NEITHER:
 1034                 ip->irp_userbuf = obuf;
 1035                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
 1036                 break;
 1037         default:
 1038                 break;
 1039         }
 1040 
 1041         /*
 1042          * Ideally, we should associate this IRP with the calling
 1043          * thread here.
 1044          */
 1045 
 1046         return (ip);
 1047 }
 1048 
 1049 static irp *
 1050 IoAllocateIrp(uint8_t stsize, uint8_t chargequota)
 1051 {
 1052         irp                     *i;
 1053 
 1054         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
 1055         if (i == NULL)
 1056                 return (NULL);
 1057 
 1058         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
 1059 
 1060         return (i);
 1061 }
 1062 
 1063 static irp *
 1064 IoMakeAssociatedIrp(irp *ip, uint8_t stsize)
 1065 {
 1066         irp                     *associrp;
 1067 
 1068         associrp = IoAllocateIrp(stsize, FALSE);
 1069         if (associrp == NULL)
 1070                 return (NULL);
 1071 
 1072         mtx_lock(&ntoskrnl_dispatchlock);
 1073         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
 1074         associrp->irp_tail.irp_overlay.irp_thread =
 1075             ip->irp_tail.irp_overlay.irp_thread;
 1076         associrp->irp_assoc.irp_master = ip;
 1077         mtx_unlock(&ntoskrnl_dispatchlock);
 1078 
 1079         return (associrp);
 1080 }
 1081 
 1082 static void
 1083 IoFreeIrp(ip)
 1084         irp                     *ip;
 1085 {
 1086         ExFreePool(ip);
 1087 }
 1088 
 1089 static void
 1090 IoInitializeIrp(irp *io, uint16_t psize, uint8_t ssize)
 1091 {
 1092         bzero((char *)io, IoSizeOfIrp(ssize));
 1093         io->irp_size = psize;
 1094         io->irp_stackcnt = ssize;
 1095         io->irp_currentstackloc = ssize;
 1096         InitializeListHead(&io->irp_thlist);
 1097         io->irp_tail.irp_overlay.irp_csl =
 1098             (io_stack_location *)(io + 1) + ssize;
 1099 }
 1100 
 1101 static void
 1102 IoReuseIrp(ip, status)
 1103         irp                     *ip;
 1104         uint32_t                status;
 1105 {
 1106         uint8_t                 allocflags;
 1107 
 1108         allocflags = ip->irp_allocflags;
 1109         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
 1110         ip->irp_iostat.isb_status = status;
 1111         ip->irp_allocflags = allocflags;
 1112 }
 1113 
 1114 void
 1115 IoAcquireCancelSpinLock(uint8_t *irql)
 1116 {
 1117         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
 1118 }
 1119 
 1120 void
 1121 IoReleaseCancelSpinLock(uint8_t irql)
 1122 {
 1123         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
 1124 }
 1125 
 1126 uint8_t
 1127 IoCancelIrp(irp *ip)
 1128 {
 1129         cancel_func             cfunc;
 1130         uint8_t                 cancelirql;
 1131 
 1132         IoAcquireCancelSpinLock(&cancelirql);
 1133         cfunc = IoSetCancelRoutine(ip, NULL);
 1134         ip->irp_cancel = TRUE;
 1135         if (cfunc == NULL) {
 1136                 IoReleaseCancelSpinLock(cancelirql);
 1137                 return (FALSE);
 1138         }
 1139         ip->irp_cancelirql = cancelirql;
 1140         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
 1141         return (uint8_t)IoSetCancelValue(ip, TRUE);
 1142 }
 1143 
 1144 uint32_t
 1145 IofCallDriver(dobj, ip)
 1146         device_object           *dobj;
 1147         irp                     *ip;
 1148 {
 1149         driver_object           *drvobj;
 1150         io_stack_location       *sl;
 1151         uint32_t                status;
 1152         driver_dispatch         disp;
 1153 
 1154         drvobj = dobj->do_drvobj;
 1155 
 1156         if (ip->irp_currentstackloc <= 0)
 1157                 panic("IoCallDriver(): out of stack locations");
 1158 
 1159         IoSetNextIrpStackLocation(ip);
 1160         sl = IoGetCurrentIrpStackLocation(ip);
 1161 
 1162         sl->isl_devobj = dobj;
 1163 
 1164         disp = drvobj->dro_dispatch[sl->isl_major];
 1165         status = MSCALL2(disp, dobj, ip);
 1166 
 1167         return (status);
 1168 }
 1169 
 1170 void
 1171 IofCompleteRequest(irp *ip, uint8_t prioboost)
 1172 {
 1173         uint32_t                status;
 1174         device_object           *dobj;
 1175         io_stack_location       *sl;
 1176         completion_func         cf;
 1177 
 1178         KASSERT(ip->irp_iostat.isb_status != STATUS_PENDING,
 1179             ("incorrect IRP(%p) status (STATUS_PENDING)", ip));
 1180 
 1181         sl = IoGetCurrentIrpStackLocation(ip);
 1182         IoSkipCurrentIrpStackLocation(ip);
 1183 
 1184         do {
 1185                 if (sl->isl_ctl & SL_PENDING_RETURNED)
 1186                         ip->irp_pendingreturned = TRUE;
 1187 
 1188                 if (ip->irp_currentstackloc != (ip->irp_stackcnt + 1))
 1189                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
 1190                 else
 1191                         dobj = NULL;
 1192 
 1193                 if (sl->isl_completionfunc != NULL &&
 1194                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
 1195                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
 1196                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
 1197                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
 1198                     (ip->irp_cancel == TRUE &&
 1199                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
 1200                         cf = sl->isl_completionfunc;
 1201                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
 1202                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
 1203                                 return;
 1204                 } else {
 1205                         if ((ip->irp_currentstackloc <= ip->irp_stackcnt) &&
 1206                             (ip->irp_pendingreturned == TRUE))
 1207                                 IoMarkIrpPending(ip);
 1208                 }
 1209 
 1210                 /* move to the next.  */
 1211                 IoSkipCurrentIrpStackLocation(ip);
 1212                 sl++;
 1213         } while (ip->irp_currentstackloc <= (ip->irp_stackcnt + 1));
 1214 
 1215         if (ip->irp_usriostat != NULL)
 1216                 *ip->irp_usriostat = ip->irp_iostat;
 1217         if (ip->irp_usrevent != NULL)
 1218                 KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
 1219 
 1220         /* Handle any associated IRPs. */
 1221 
 1222         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
 1223                 uint32_t                masterirpcnt;
 1224                 irp                     *masterirp;
 1225                 mdl                     *m;
 1226 
 1227                 masterirp = ip->irp_assoc.irp_master;
 1228                 masterirpcnt =
 1229                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
 1230 
 1231                 while ((m = ip->irp_mdl) != NULL) {
 1232                         ip->irp_mdl = m->mdl_next;
 1233                         IoFreeMdl(m);
 1234                 }
 1235                 IoFreeIrp(ip);
 1236                 if (masterirpcnt == 0)
 1237                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
 1238                 return;
 1239         }
 1240 
 1241         /* With any luck, these conditions will never arise. */
 1242 
 1243         if (ip->irp_flags & IRP_PAGING_IO) {
 1244                 if (ip->irp_mdl != NULL)
 1245                         IoFreeMdl(ip->irp_mdl);
 1246                 IoFreeIrp(ip);
 1247         }
 1248 }
 1249 
 1250 void
 1251 ntoskrnl_intr(arg)
 1252         void                    *arg;
 1253 {
 1254         kinterrupt              *iobj;
 1255         uint8_t                 irql;
 1256         uint8_t                 claimed;
 1257         list_entry              *l;
 1258 
 1259         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1260         l = ntoskrnl_intlist.nle_flink;
 1261         while (l != &ntoskrnl_intlist) {
 1262                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
 1263                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
 1264                 if (claimed == TRUE)
 1265                         break;
 1266                 l = l->nle_flink;
 1267         }
 1268         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1269 }
 1270 
 1271 uint8_t
 1272 KeAcquireInterruptSpinLock(iobj)
 1273         kinterrupt              *iobj;
 1274 {
 1275         uint8_t                 irql;
 1276         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1277         return (irql);
 1278 }
 1279 
 1280 void
 1281 KeReleaseInterruptSpinLock(kinterrupt *iobj, uint8_t irql)
 1282 {
 1283         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1284 }
 1285 
 1286 uint8_t
 1287 KeSynchronizeExecution(iobj, syncfunc, syncctx)
 1288         kinterrupt              *iobj;
 1289         void                    *syncfunc;
 1290         void                    *syncctx;
 1291 {
 1292         uint8_t                 irql;
 1293 
 1294         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1295         MSCALL1(syncfunc, syncctx);
 1296         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1297 
 1298         return (TRUE);
 1299 }
 1300 
 1301 /*
 1302  * IoConnectInterrupt() is passed only the interrupt vector and
 1303  * irql that a device wants to use, but no device-specific tag
 1304  * of any kind. This conflicts rather badly with FreeBSD's
 1305  * bus_setup_intr(), which needs the device_t for the device
 1306  * requesting interrupt delivery. In order to bypass this
 1307  * inconsistency, we implement a second level of interrupt
 1308  * dispatching on top of bus_setup_intr(). All devices use
 1309  * ntoskrnl_intr() as their ISR, and any device requesting
 1310  * interrupts will be registered with ntoskrnl_intr()'s interrupt
 1311  * dispatch list. When an interrupt arrives, we walk the list
 1312  * and invoke all the registered ISRs. This effectively makes all
 1313  * interrupts shared, but it's the only way to duplicate the
 1314  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
 1315  */
 1316 
 1317 uint32_t
 1318 IoConnectInterrupt(kinterrupt **iobj, void *svcfunc, void *svcctx,
 1319         kspin_lock *lock, uint32_t vector, uint8_t irql, uint8_t syncirql,
 1320         uint8_t imode, uint8_t shared, uint32_t affinity, uint8_t savefloat)
 1321 {
 1322         uint8_t                 curirql;
 1323 
 1324         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
 1325         if (*iobj == NULL)
 1326                 return (STATUS_INSUFFICIENT_RESOURCES);
 1327 
 1328         (*iobj)->ki_svcfunc = svcfunc;
 1329         (*iobj)->ki_svcctx = svcctx;
 1330 
 1331         if (lock == NULL) {
 1332                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
 1333                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
 1334         } else
 1335                 (*iobj)->ki_lock = lock;
 1336 
 1337         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
 1338         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
 1339         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
 1340 
 1341         return (STATUS_SUCCESS);
 1342 }
 1343 
 1344 void
 1345 IoDisconnectInterrupt(iobj)
 1346         kinterrupt              *iobj;
 1347 {
 1348         uint8_t                 irql;
 1349 
 1350         if (iobj == NULL)
 1351                 return;
 1352 
 1353         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1354         RemoveEntryList((&iobj->ki_list));
 1355         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1356 
 1357         ExFreePool(iobj);
 1358 }
 1359 
 1360 device_object *
 1361 IoAttachDeviceToDeviceStack(src, dst)
 1362         device_object           *src;
 1363         device_object           *dst;
 1364 {
 1365         device_object           *attached;
 1366 
 1367         mtx_lock(&ntoskrnl_dispatchlock);
 1368         attached = IoGetAttachedDevice(dst);
 1369         attached->do_attacheddev = src;
 1370         src->do_attacheddev = NULL;
 1371         src->do_stacksize = attached->do_stacksize + 1;
 1372         mtx_unlock(&ntoskrnl_dispatchlock);
 1373 
 1374         return (attached);
 1375 }
 1376 
 1377 void
 1378 IoDetachDevice(topdev)
 1379         device_object           *topdev;
 1380 {
 1381         device_object           *tail;
 1382 
 1383         mtx_lock(&ntoskrnl_dispatchlock);
 1384 
 1385         /* First, break the chain. */
 1386         tail = topdev->do_attacheddev;
 1387         if (tail == NULL) {
 1388                 mtx_unlock(&ntoskrnl_dispatchlock);
 1389                 return;
 1390         }
 1391         topdev->do_attacheddev = tail->do_attacheddev;
 1392         topdev->do_refcnt--;
 1393 
 1394         /* Now reduce the stacksize count for the takm_il objects. */
 1395 
 1396         tail = topdev->do_attacheddev;
 1397         while (tail != NULL) {
 1398                 tail->do_stacksize--;
 1399                 tail = tail->do_attacheddev;
 1400         }
 1401 
 1402         mtx_unlock(&ntoskrnl_dispatchlock);
 1403 }
 1404 
 1405 /*
 1406  * For the most part, an object is considered signalled if
 1407  * dh_sigstate == TRUE. The exception is for mutant objects
 1408  * (mutexes), where the logic works like this:
 1409  *
 1410  * - If the thread already owns the object and sigstate is
 1411  *   less than or equal to 0, then the object is considered
 1412  *   signalled (recursive acquisition).
 1413  * - If dh_sigstate == 1, the object is also considered
 1414  *   signalled.
 1415  */
 1416 
 1417 static int
 1418 ntoskrnl_is_signalled(obj, td)
 1419         nt_dispatch_header      *obj;
 1420         struct thread           *td;
 1421 {
 1422         kmutant                 *km;
 1423 
 1424         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1425                 km = (kmutant *)obj;
 1426                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
 1427                     obj->dh_sigstate == 1)
 1428                         return (TRUE);
 1429                 return (FALSE);
 1430         }
 1431 
 1432         if (obj->dh_sigstate > 0)
 1433                 return (TRUE);
 1434         return (FALSE);
 1435 }
 1436 
 1437 static void
 1438 ntoskrnl_satisfy_wait(obj, td)
 1439         nt_dispatch_header      *obj;
 1440         struct thread           *td;
 1441 {
 1442         kmutant                 *km;
 1443 
 1444         switch (obj->dh_type) {
 1445         case DISP_TYPE_MUTANT:
 1446                 km = (struct kmutant *)obj;
 1447                 obj->dh_sigstate--;
 1448                 /*
 1449                  * If sigstate reaches 0, the mutex is now
 1450                  * non-signalled (the new thread owns it).
 1451                  */
 1452                 if (obj->dh_sigstate == 0) {
 1453                         km->km_ownerthread = td;
 1454                         if (km->km_abandoned == TRUE)
 1455                                 km->km_abandoned = FALSE;
 1456                 }
 1457                 break;
 1458         /* Synchronization objects get reset to unsignalled. */
 1459         case DISP_TYPE_SYNCHRONIZATION_EVENT:
 1460         case DISP_TYPE_SYNCHRONIZATION_TIMER:
 1461                 obj->dh_sigstate = 0;
 1462                 break;
 1463         case DISP_TYPE_SEMAPHORE:
 1464                 obj->dh_sigstate--;
 1465                 break;
 1466         default:
 1467                 break;
 1468         }
 1469 }
 1470 
 1471 static void
 1472 ntoskrnl_satisfy_multiple_waits(wb)
 1473         wait_block              *wb;
 1474 {
 1475         wait_block              *cur;
 1476         struct thread           *td;
 1477 
 1478         cur = wb;
 1479         td = wb->wb_kthread;
 1480 
 1481         do {
 1482                 ntoskrnl_satisfy_wait(wb->wb_object, td);
 1483                 cur->wb_awakened = TRUE;
 1484                 cur = cur->wb_next;
 1485         } while (cur != wb);
 1486 }
 1487 
 1488 /* Always called with dispatcher lock held. */
 1489 static void
 1490 ntoskrnl_waittest(obj, increment)
 1491         nt_dispatch_header      *obj;
 1492         uint32_t                increment;
 1493 {
 1494         wait_block              *w, *next;
 1495         list_entry              *e;
 1496         struct thread           *td;
 1497         wb_ext                  *we;
 1498         int                     satisfied;
 1499 
 1500         /*
 1501          * Once an object has been signalled, we walk its list of
 1502          * wait blocks. If a wait block can be awakened, then satisfy
 1503          * waits as necessary and wake the thread.
 1504          *
 1505          * The rules work like this:
 1506          *
 1507          * If a wait block is marked as WAITTYPE_ANY, then
 1508          * we can satisfy the wait conditions on the current
 1509          * object and wake the thread right away. Satisfying
 1510          * the wait also has the effect of breaking us out
 1511          * of the search loop.
 1512          *
 1513          * If the object is marked as WAITTYLE_ALL, then the
 1514          * wait block will be part of a circularly linked
 1515          * list of wait blocks belonging to a waiting thread
 1516          * that's sleeping in KeWaitForMultipleObjects(). In
 1517          * order to wake the thread, all the objects in the
 1518          * wait list must be in the signalled state. If they
 1519          * are, we then satisfy all of them and wake the
 1520          * thread.
 1521          *
 1522          */
 1523 
 1524         e = obj->dh_waitlisthead.nle_flink;
 1525 
 1526         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
 1527                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
 1528                 we = w->wb_ext;
 1529                 td = we->we_td;
 1530                 satisfied = FALSE;
 1531                 if (w->wb_waittype == WAITTYPE_ANY) {
 1532                         /*
 1533                          * Thread can be awakened if
 1534                          * any wait is satisfied.
 1535                          */
 1536                         ntoskrnl_satisfy_wait(obj, td);
 1537                         satisfied = TRUE;
 1538                         w->wb_awakened = TRUE;
 1539                 } else {
 1540                         /*
 1541                          * Thread can only be woken up
 1542                          * if all waits are satisfied.
 1543                          * If the thread is waiting on multiple
 1544                          * objects, they should all be linked
 1545                          * through the wb_next pointers in the
 1546                          * wait blocks.
 1547                          */
 1548                         satisfied = TRUE;
 1549                         next = w->wb_next;
 1550                         while (next != w) {
 1551                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
 1552                                         satisfied = FALSE;
 1553                                         break;
 1554                                 }
 1555                                 next = next->wb_next;
 1556                         }
 1557                         ntoskrnl_satisfy_multiple_waits(w);
 1558                 }
 1559 
 1560                 if (satisfied == TRUE)
 1561                         cv_broadcastpri(&we->we_cv,
 1562                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
 1563                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
 1564 
 1565                 e = e->nle_flink;
 1566         }
 1567 }
 1568 
 1569 /*
 1570  * Return the number of 100 nanosecond intervals since
 1571  * January 1, 1601. (?!?!)
 1572  */
 1573 void
 1574 ntoskrnl_time(tval)
 1575         uint64_t                *tval;
 1576 {
 1577         struct timespec         ts;
 1578 
 1579         nanotime(&ts);
 1580         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
 1581             11644473600 * 10000000; /* 100ns ticks from 1601 to 1970 */
 1582 }
 1583 
 1584 static void
 1585 KeQuerySystemTime(current_time)
 1586         uint64_t                *current_time;
 1587 {
 1588         ntoskrnl_time(current_time);
 1589 }
 1590 
 1591 static uint32_t
 1592 KeTickCount(void)
 1593 {
 1594         struct timeval tv;
 1595         getmicrouptime(&tv);
 1596         return tvtohz(&tv);
 1597 }
 1598 
 1599 
 1600 /*
 1601  * KeWaitForSingleObject() is a tricky beast, because it can be used
 1602  * with several different object types: semaphores, timers, events,
 1603  * mutexes and threads. Semaphores don't appear very often, but the
 1604  * other object types are quite common. KeWaitForSingleObject() is
 1605  * what's normally used to acquire a mutex, and it can be used to
 1606  * wait for a thread termination.
 1607  *
 1608  * The Windows NDIS API is implemented in terms of Windows kernel
 1609  * primitives, and some of the object manipulation is duplicated in
 1610  * NDIS. For example, NDIS has timers and events, which are actually
 1611  * Windows kevents and ktimers. Now, you're supposed to only use the
 1612  * NDIS variants of these objects within the confines of the NDIS API,
 1613  * but there are some naughty developers out there who will use
 1614  * KeWaitForSingleObject() on NDIS timer and event objects, so we
 1615  * have to support that as well. Conseqently, our NDIS timer and event
 1616  * code has to be closely tied into our ntoskrnl timer and event code,
 1617  * just as it is in Windows.
 1618  *
 1619  * KeWaitForSingleObject() may do different things for different kinds
 1620  * of objects:
 1621  *
 1622  * - For events, we check if the event has been signalled. If the
 1623  *   event is already in the signalled state, we just return immediately,
 1624  *   otherwise we wait for it to be set to the signalled state by someone
 1625  *   else calling KeSetEvent(). Events can be either synchronization or
 1626  *   notification events.
 1627  *
 1628  * - For timers, if the timer has already fired and the timer is in
 1629  *   the signalled state, we just return, otherwise we wait on the
 1630  *   timer. Unlike an event, timers get signalled automatically when
 1631  *   they expire rather than someone having to trip them manually.
 1632  *   Timers initialized with KeInitializeTimer() are always notification
 1633  *   events: KeInitializeTimerEx() lets you initialize a timer as
 1634  *   either a notification or synchronization event.
 1635  *
 1636  * - For mutexes, we try to acquire the mutex and if we can't, we wait
 1637  *   on the mutex until it's available and then grab it. When a mutex is
 1638  *   released, it enters the signalled state, which wakes up one of the
 1639  *   threads waiting to acquire it. Mutexes are always synchronization
 1640  *   events.
 1641  *
 1642  * - For threads, the only thing we do is wait until the thread object
 1643  *   enters a signalled state, which occurs when the thread terminates.
 1644  *   Threads are always notification events.
 1645  *
 1646  * A notification event wakes up all threads waiting on an object. A
 1647  * synchronization event wakes up just one. Also, a synchronization event
 1648  * is auto-clearing, which means we automatically set the event back to
 1649  * the non-signalled state once the wakeup is done.
 1650  */
 1651 
 1652 uint32_t
 1653 KeWaitForSingleObject(void *arg, uint32_t reason, uint32_t mode,
 1654     uint8_t alertable, int64_t *duetime)
 1655 {
 1656         wait_block              w;
 1657         struct thread           *td = curthread;
 1658         struct timeval          tv;
 1659         int                     error = 0;
 1660         uint64_t                curtime;
 1661         wb_ext                  we;
 1662         nt_dispatch_header      *obj;
 1663 
 1664         obj = arg;
 1665 
 1666         if (obj == NULL)
 1667                 return (STATUS_INVALID_PARAMETER);
 1668 
 1669         mtx_lock(&ntoskrnl_dispatchlock);
 1670 
 1671         cv_init(&we.we_cv, "KeWFS");
 1672         we.we_td = td;
 1673 
 1674         /*
 1675          * Check to see if this object is already signalled,
 1676          * and just return without waiting if it is.
 1677          */
 1678         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
 1679                 /* Sanity check the signal state value. */
 1680                 if (obj->dh_sigstate != INT32_MIN) {
 1681                         ntoskrnl_satisfy_wait(obj, curthread);
 1682                         mtx_unlock(&ntoskrnl_dispatchlock);
 1683                         return (STATUS_SUCCESS);
 1684                 } else {
 1685                         /*
 1686                          * There's a limit to how many times we can
 1687                          * recursively acquire a mutant. If we hit
 1688                          * the limit, something is very wrong.
 1689                          */
 1690                         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1691                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1692                                 panic("mutant limit exceeded");
 1693                         }
 1694                 }
 1695         }
 1696 
 1697         bzero((char *)&w, sizeof(wait_block));
 1698         w.wb_object = obj;
 1699         w.wb_ext = &we;
 1700         w.wb_waittype = WAITTYPE_ANY;
 1701         w.wb_next = &w;
 1702         w.wb_waitkey = 0;
 1703         w.wb_awakened = FALSE;
 1704         w.wb_oldpri = td->td_priority;
 1705 
 1706         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
 1707 
 1708         /*
 1709          * The timeout value is specified in 100 nanosecond units
 1710          * and can be a positive or negative number. If it's positive,
 1711          * then the duetime is absolute, and we need to convert it
 1712          * to an absolute offset relative to now in order to use it.
 1713          * If it's negative, then the duetime is relative and we
 1714          * just have to convert the units.
 1715          */
 1716 
 1717         if (duetime != NULL) {
 1718                 if (*duetime < 0) {
 1719                         tv.tv_sec = - (*duetime) / 10000000;
 1720                         tv.tv_usec = (- (*duetime) / 10) -
 1721                             (tv.tv_sec * 1000000);
 1722                 } else {
 1723                         ntoskrnl_time(&curtime);
 1724                         if (*duetime < curtime)
 1725                                 tv.tv_sec = tv.tv_usec = 0;
 1726                         else {
 1727                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1728                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1729                                     (tv.tv_sec * 1000000);
 1730                         }
 1731                 }
 1732         }
 1733 
 1734         if (duetime == NULL)
 1735                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1736         else
 1737                 error = cv_timedwait(&we.we_cv,
 1738                     &ntoskrnl_dispatchlock, tvtohz(&tv));
 1739 
 1740         RemoveEntryList(&w.wb_waitlist);
 1741 
 1742         cv_destroy(&we.we_cv);
 1743 
 1744         /* We timed out. Leave the object alone and return status. */
 1745 
 1746         if (error == EWOULDBLOCK) {
 1747                 mtx_unlock(&ntoskrnl_dispatchlock);
 1748                 return (STATUS_TIMEOUT);
 1749         }
 1750 
 1751         mtx_unlock(&ntoskrnl_dispatchlock);
 1752 
 1753         return (STATUS_SUCCESS);
 1754 /*
 1755         return (KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
 1756             mode, alertable, duetime, &w));
 1757 */
 1758 }
 1759 
 1760 static uint32_t
 1761 KeWaitForMultipleObjects(uint32_t cnt, nt_dispatch_header *obj[], uint32_t wtype,
 1762         uint32_t reason, uint32_t mode, uint8_t alertable, int64_t *duetime,
 1763         wait_block *wb_array)
 1764 {
 1765         struct thread           *td = curthread;
 1766         wait_block              *whead, *w;
 1767         wait_block              _wb_array[MAX_WAIT_OBJECTS];
 1768         nt_dispatch_header      *cur;
 1769         struct timeval          tv;
 1770         int                     i, wcnt = 0, error = 0;
 1771         uint64_t                curtime;
 1772         struct timespec         t1, t2;
 1773         uint32_t                status = STATUS_SUCCESS;
 1774         wb_ext                  we;
 1775 
 1776         if (cnt > MAX_WAIT_OBJECTS)
 1777                 return (STATUS_INVALID_PARAMETER);
 1778         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
 1779                 return (STATUS_INVALID_PARAMETER);
 1780 
 1781         mtx_lock(&ntoskrnl_dispatchlock);
 1782 
 1783         cv_init(&we.we_cv, "KeWFM");
 1784         we.we_td = td;
 1785 
 1786         if (wb_array == NULL)
 1787                 whead = _wb_array;
 1788         else
 1789                 whead = wb_array;
 1790 
 1791         bzero((char *)whead, sizeof(wait_block) * cnt);
 1792 
 1793         /* First pass: see if we can satisfy any waits immediately. */
 1794 
 1795         wcnt = 0;
 1796         w = whead;
 1797 
 1798         for (i = 0; i < cnt; i++) {
 1799                 InsertTailList((&obj[i]->dh_waitlisthead),
 1800                     (&w->wb_waitlist));
 1801                 w->wb_ext = &we;
 1802                 w->wb_object = obj[i];
 1803                 w->wb_waittype = wtype;
 1804                 w->wb_waitkey = i;
 1805                 w->wb_awakened = FALSE;
 1806                 w->wb_oldpri = td->td_priority;
 1807                 w->wb_next = w + 1;
 1808                 w++;
 1809                 wcnt++;
 1810                 if (ntoskrnl_is_signalled(obj[i], td)) {
 1811                         /*
 1812                          * There's a limit to how many times
 1813                          * we can recursively acquire a mutant.
 1814                          * If we hit the limit, something
 1815                          * is very wrong.
 1816                          */
 1817                         if (obj[i]->dh_sigstate == INT32_MIN &&
 1818                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
 1819                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1820                                 panic("mutant limit exceeded");
 1821                         }
 1822 
 1823                         /*
 1824                          * If this is a WAITTYPE_ANY wait, then
 1825                          * satisfy the waited object and exit
 1826                          * right now.
 1827                          */
 1828 
 1829                         if (wtype == WAITTYPE_ANY) {
 1830                                 ntoskrnl_satisfy_wait(obj[i], td);
 1831                                 status = STATUS_WAIT_0 + i;
 1832                                 goto wait_done;
 1833                         } else {
 1834                                 w--;
 1835                                 wcnt--;
 1836                                 w->wb_object = NULL;
 1837                                 RemoveEntryList(&w->wb_waitlist);
 1838                         }
 1839                 }
 1840         }
 1841 
 1842         /*
 1843          * If this is a WAITTYPE_ALL wait and all objects are
 1844          * already signalled, satisfy the waits and exit now.
 1845          */
 1846 
 1847         if (wtype == WAITTYPE_ALL && wcnt == 0) {
 1848                 for (i = 0; i < cnt; i++)
 1849                         ntoskrnl_satisfy_wait(obj[i], td);
 1850                 status = STATUS_SUCCESS;
 1851                 goto wait_done;
 1852         }
 1853 
 1854         /*
 1855          * Create a circular waitblock list. The waitcount
 1856          * must always be non-zero when we get here.
 1857          */
 1858 
 1859         (w - 1)->wb_next = whead;
 1860 
 1861         /* Wait on any objects that aren't yet signalled. */
 1862 
 1863         /* Calculate timeout, if any. */
 1864 
 1865         if (duetime != NULL) {
 1866                 if (*duetime < 0) {
 1867                         tv.tv_sec = - (*duetime) / 10000000;
 1868                         tv.tv_usec = (- (*duetime) / 10) -
 1869                             (tv.tv_sec * 1000000);
 1870                 } else {
 1871                         ntoskrnl_time(&curtime);
 1872                         if (*duetime < curtime)
 1873                                 tv.tv_sec = tv.tv_usec = 0;
 1874                         else {
 1875                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1876                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1877                                     (tv.tv_sec * 1000000);
 1878                         }
 1879                 }
 1880         }
 1881 
 1882         while (wcnt) {
 1883                 nanotime(&t1);
 1884 
 1885                 if (duetime == NULL)
 1886                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1887                 else
 1888                         error = cv_timedwait(&we.we_cv,
 1889                             &ntoskrnl_dispatchlock, tvtohz(&tv));
 1890 
 1891                 /* Wait with timeout expired. */
 1892 
 1893                 if (error) {
 1894                         status = STATUS_TIMEOUT;
 1895                         goto wait_done;
 1896                 }
 1897 
 1898                 nanotime(&t2);
 1899 
 1900                 /* See what's been signalled. */
 1901 
 1902                 w = whead;
 1903                 do {
 1904                         cur = w->wb_object;
 1905                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
 1906                             w->wb_awakened == TRUE) {
 1907                                 /* Sanity check the signal state value. */
 1908                                 if (cur->dh_sigstate == INT32_MIN &&
 1909                                     cur->dh_type == DISP_TYPE_MUTANT) {
 1910                                         mtx_unlock(&ntoskrnl_dispatchlock);
 1911                                         panic("mutant limit exceeded");
 1912                                 }
 1913                                 wcnt--;
 1914                                 if (wtype == WAITTYPE_ANY) {
 1915                                         status = w->wb_waitkey &
 1916                                             STATUS_WAIT_0;
 1917                                         goto wait_done;
 1918                                 }
 1919                         }
 1920                         w = w->wb_next;
 1921                 } while (w != whead);
 1922 
 1923                 /*
 1924                  * If all objects have been signalled, or if this
 1925                  * is a WAITTYPE_ANY wait and we were woke up by
 1926                  * someone, we can bail.
 1927                  */
 1928 
 1929                 if (wcnt == 0) {
 1930                         status = STATUS_SUCCESS;
 1931                         goto wait_done;
 1932                 }
 1933 
 1934                 /*
 1935                  * If this is WAITTYPE_ALL wait, and there's still
 1936                  * objects that haven't been signalled, deduct the
 1937                  * time that's elapsed so far from the timeout and
 1938                  * wait again (or continue waiting indefinitely if
 1939                  * there's no timeout).
 1940                  */
 1941 
 1942                 if (duetime != NULL) {
 1943                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
 1944                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
 1945                 }
 1946         }
 1947 
 1948 
 1949 wait_done:
 1950 
 1951         cv_destroy(&we.we_cv);
 1952 
 1953         for (i = 0; i < cnt; i++) {
 1954                 if (whead[i].wb_object != NULL)
 1955                         RemoveEntryList(&whead[i].wb_waitlist);
 1956 
 1957         }
 1958         mtx_unlock(&ntoskrnl_dispatchlock);
 1959 
 1960         return (status);
 1961 }
 1962 
 1963 static void
 1964 WRITE_REGISTER_USHORT(uint16_t *reg, uint16_t val)
 1965 {
 1966         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1967 }
 1968 
 1969 static uint16_t
 1970 READ_REGISTER_USHORT(reg)
 1971         uint16_t                *reg;
 1972 {
 1973         return (bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1974 }
 1975 
 1976 static void
 1977 WRITE_REGISTER_ULONG(reg, val)
 1978         uint32_t                *reg;
 1979         uint32_t                val;
 1980 {
 1981         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1982 }
 1983 
 1984 static uint32_t
 1985 READ_REGISTER_ULONG(reg)
 1986         uint32_t                *reg;
 1987 {
 1988         return (bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1989 }
 1990 
 1991 static uint8_t
 1992 READ_REGISTER_UCHAR(uint8_t *reg)
 1993 {
 1994         return (bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1995 }
 1996 
 1997 static void
 1998 WRITE_REGISTER_UCHAR(uint8_t *reg, uint8_t val)
 1999 {
 2000         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 2001 }
 2002 
 2003 static int64_t
 2004 _allmul(a, b)
 2005         int64_t                 a;
 2006         int64_t                 b;
 2007 {
 2008         return (a * b);
 2009 }
 2010 
 2011 static int64_t
 2012 _alldiv(a, b)
 2013         int64_t                 a;
 2014         int64_t                 b;
 2015 {
 2016         return (a / b);
 2017 }
 2018 
 2019 static int64_t
 2020 _allrem(a, b)
 2021         int64_t                 a;
 2022         int64_t                 b;
 2023 {
 2024         return (a % b);
 2025 }
 2026 
 2027 static uint64_t
 2028 _aullmul(a, b)
 2029         uint64_t                a;
 2030         uint64_t                b;
 2031 {
 2032         return (a * b);
 2033 }
 2034 
 2035 static uint64_t
 2036 _aulldiv(a, b)
 2037         uint64_t                a;
 2038         uint64_t                b;
 2039 {
 2040         return (a / b);
 2041 }
 2042 
 2043 static uint64_t
 2044 _aullrem(a, b)
 2045         uint64_t                a;
 2046         uint64_t                b;
 2047 {
 2048         return (a % b);
 2049 }
 2050 
 2051 static int64_t
 2052 _allshl(int64_t a, uint8_t b)
 2053 {
 2054         return (a << b);
 2055 }
 2056 
 2057 static uint64_t
 2058 _aullshl(uint64_t a, uint8_t b)
 2059 {
 2060         return (a << b);
 2061 }
 2062 
 2063 static int64_t
 2064 _allshr(int64_t a, uint8_t b)
 2065 {
 2066         return (a >> b);
 2067 }
 2068 
 2069 static uint64_t
 2070 _aullshr(uint64_t a, uint8_t b)
 2071 {
 2072         return (a >> b);
 2073 }
 2074 
 2075 static slist_entry *
 2076 ntoskrnl_pushsl(head, entry)
 2077         slist_header            *head;
 2078         slist_entry             *entry;
 2079 {
 2080         slist_entry             *oldhead;
 2081 
 2082         oldhead = head->slh_list.slh_next;
 2083         entry->sl_next = head->slh_list.slh_next;
 2084         head->slh_list.slh_next = entry;
 2085         head->slh_list.slh_depth++;
 2086         head->slh_list.slh_seq++;
 2087 
 2088         return (oldhead);
 2089 }
 2090 
 2091 static void
 2092 InitializeSListHead(head)
 2093         slist_header            *head;
 2094 {
 2095         memset(head, 0, sizeof(*head));
 2096 }
 2097 
 2098 static slist_entry *
 2099 ntoskrnl_popsl(head)
 2100         slist_header            *head;
 2101 {
 2102         slist_entry             *first;
 2103 
 2104         first = head->slh_list.slh_next;
 2105         if (first != NULL) {
 2106                 head->slh_list.slh_next = first->sl_next;
 2107                 head->slh_list.slh_depth--;
 2108                 head->slh_list.slh_seq++;
 2109         }
 2110 
 2111         return (first);
 2112 }
 2113 
 2114 /*
 2115  * We need this to make lookaside lists work for amd64.
 2116  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
 2117  * list structure. For amd64 to work right, this has to be a
 2118  * pointer to the wrapped version of the routine, not the
 2119  * original. Letting the Windows driver invoke the original
 2120  * function directly will result in a convention calling
 2121  * mismatch and a pretty crash. On x86, this effectively
 2122  * becomes a no-op since ipt_func and ipt_wrap are the same.
 2123  */
 2124 
 2125 static funcptr
 2126 ntoskrnl_findwrap(func)
 2127         funcptr                 func;
 2128 {
 2129         image_patch_table       *patch;
 2130 
 2131         patch = ntoskrnl_functbl;
 2132         while (patch->ipt_func != NULL) {
 2133                 if ((funcptr)patch->ipt_func == func)
 2134                         return ((funcptr)patch->ipt_wrap);
 2135                 patch++;
 2136         }
 2137 
 2138         return (NULL);
 2139 }
 2140 
 2141 static void
 2142 ExInitializePagedLookasideList(paged_lookaside_list *lookaside,
 2143         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
 2144         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
 2145 {
 2146         bzero((char *)lookaside, sizeof(paged_lookaside_list));
 2147 
 2148         if (size < sizeof(slist_entry))
 2149                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2150         else
 2151                 lookaside->nll_l.gl_size = size;
 2152         lookaside->nll_l.gl_tag = tag;
 2153         if (allocfunc == NULL)
 2154                 lookaside->nll_l.gl_allocfunc =
 2155                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2156         else
 2157                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2158 
 2159         if (freefunc == NULL)
 2160                 lookaside->nll_l.gl_freefunc =
 2161                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2162         else
 2163                 lookaside->nll_l.gl_freefunc = freefunc;
 2164 
 2165 #ifdef __i386__
 2166         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2167 #endif
 2168 
 2169         lookaside->nll_l.gl_type = NonPagedPool;
 2170         lookaside->nll_l.gl_depth = depth;
 2171         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2172 }
 2173 
 2174 static void
 2175 ExDeletePagedLookasideList(lookaside)
 2176         paged_lookaside_list   *lookaside;
 2177 {
 2178         void                    *buf;
 2179         void            (*freefunc)(void *);
 2180 
 2181         freefunc = lookaside->nll_l.gl_freefunc;
 2182         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2183                 MSCALL1(freefunc, buf);
 2184 }
 2185 
 2186 static void
 2187 ExInitializeNPagedLookasideList(npaged_lookaside_list *lookaside,
 2188         lookaside_alloc_func *allocfunc, lookaside_free_func *freefunc,
 2189         uint32_t flags, size_t size, uint32_t tag, uint16_t depth)
 2190 {
 2191         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
 2192 
 2193         if (size < sizeof(slist_entry))
 2194                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2195         else
 2196                 lookaside->nll_l.gl_size = size;
 2197         lookaside->nll_l.gl_tag = tag;
 2198         if (allocfunc == NULL)
 2199                 lookaside->nll_l.gl_allocfunc =
 2200                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2201         else
 2202                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2203 
 2204         if (freefunc == NULL)
 2205                 lookaside->nll_l.gl_freefunc =
 2206                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2207         else
 2208                 lookaside->nll_l.gl_freefunc = freefunc;
 2209 
 2210 #ifdef __i386__
 2211         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2212 #endif
 2213 
 2214         lookaside->nll_l.gl_type = NonPagedPool;
 2215         lookaside->nll_l.gl_depth = depth;
 2216         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2217 }
 2218 
 2219 static void
 2220 ExDeleteNPagedLookasideList(lookaside)
 2221         npaged_lookaside_list   *lookaside;
 2222 {
 2223         void                    *buf;
 2224         void            (*freefunc)(void *);
 2225 
 2226         freefunc = lookaside->nll_l.gl_freefunc;
 2227         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2228                 MSCALL1(freefunc, buf);
 2229 }
 2230 
 2231 slist_entry *
 2232 InterlockedPushEntrySList(head, entry)
 2233         slist_header            *head;
 2234         slist_entry             *entry;
 2235 {
 2236         slist_entry             *oldhead;
 2237 
 2238         mtx_lock_spin(&ntoskrnl_interlock);
 2239         oldhead = ntoskrnl_pushsl(head, entry);
 2240         mtx_unlock_spin(&ntoskrnl_interlock);
 2241 
 2242         return (oldhead);
 2243 }
 2244 
 2245 slist_entry *
 2246 InterlockedPopEntrySList(head)
 2247         slist_header            *head;
 2248 {
 2249         slist_entry             *first;
 2250 
 2251         mtx_lock_spin(&ntoskrnl_interlock);
 2252         first = ntoskrnl_popsl(head);
 2253         mtx_unlock_spin(&ntoskrnl_interlock);
 2254 
 2255         return (first);
 2256 }
 2257 
 2258 static slist_entry *
 2259 ExInterlockedPushEntrySList(head, entry, lock)
 2260         slist_header            *head;
 2261         slist_entry             *entry;
 2262         kspin_lock              *lock;
 2263 {
 2264         return (InterlockedPushEntrySList(head, entry));
 2265 }
 2266 
 2267 static slist_entry *
 2268 ExInterlockedPopEntrySList(head, lock)
 2269         slist_header            *head;
 2270         kspin_lock              *lock;
 2271 {
 2272         return (InterlockedPopEntrySList(head));
 2273 }
 2274 
 2275 uint16_t
 2276 ExQueryDepthSList(head)
 2277         slist_header            *head;
 2278 {
 2279         uint16_t                depth;
 2280 
 2281         mtx_lock_spin(&ntoskrnl_interlock);
 2282         depth = head->slh_list.slh_depth;
 2283         mtx_unlock_spin(&ntoskrnl_interlock);
 2284 
 2285         return (depth);
 2286 }
 2287 
 2288 void
 2289 KeInitializeSpinLock(lock)
 2290         kspin_lock              *lock;
 2291 {
 2292         *lock = 0;
 2293 }
 2294 
 2295 #ifdef __i386__
 2296 void
 2297 KefAcquireSpinLockAtDpcLevel(lock)
 2298         kspin_lock              *lock;
 2299 {
 2300 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2301         int                     i = 0;
 2302 #endif
 2303 
 2304         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
 2305                 /* sit and spin */;
 2306 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2307                 i++;
 2308                 if (i > 200000000)
 2309                         panic("DEADLOCK!");
 2310 #endif
 2311         }
 2312 }
 2313 
 2314 void
 2315 KefReleaseSpinLockFromDpcLevel(lock)
 2316         kspin_lock              *lock;
 2317 {
 2318         atomic_store_rel_int((volatile u_int *)lock, 0);
 2319 }
 2320 
 2321 uint8_t
 2322 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
 2323 {
 2324         uint8_t                 oldirql;
 2325 
 2326         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
 2327                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
 2328 
 2329         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
 2330         KeAcquireSpinLockAtDpcLevel(lock);
 2331 
 2332         return (oldirql);
 2333 }
 2334 #else
 2335 void
 2336 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
 2337 {
 2338         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
 2339                 /* sit and spin */;
 2340 }
 2341 
 2342 void
 2343 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
 2344 {
 2345         atomic_store_rel_int((volatile u_int *)lock, 0);
 2346 }
 2347 #endif /* __i386__ */
 2348 
 2349 uintptr_t
 2350 InterlockedExchange(dst, val)
 2351         volatile uint32_t       *dst;
 2352         uintptr_t               val;
 2353 {
 2354         uintptr_t               r;
 2355 
 2356         mtx_lock_spin(&ntoskrnl_interlock);
 2357         r = *dst;
 2358         *dst = val;
 2359         mtx_unlock_spin(&ntoskrnl_interlock);
 2360 
 2361         return (r);
 2362 }
 2363 
 2364 static uint32_t
 2365 InterlockedIncrement(addend)
 2366         volatile uint32_t       *addend;
 2367 {
 2368         atomic_add_long((volatile u_long *)addend, 1);
 2369         return (*addend);
 2370 }
 2371 
 2372 static uint32_t
 2373 InterlockedDecrement(addend)
 2374         volatile uint32_t       *addend;
 2375 {
 2376         atomic_subtract_long((volatile u_long *)addend, 1);
 2377         return (*addend);
 2378 }
 2379 
 2380 static void
 2381 ExInterlockedAddLargeStatistic(addend, inc)
 2382         uint64_t                *addend;
 2383         uint32_t                inc;
 2384 {
 2385         mtx_lock_spin(&ntoskrnl_interlock);
 2386         *addend += inc;
 2387         mtx_unlock_spin(&ntoskrnl_interlock);
 2388 };
 2389 
 2390 mdl *
 2391 IoAllocateMdl(void *vaddr, uint32_t len, uint8_t secondarybuf,
 2392         uint8_t chargequota, irp *iopkt)
 2393 {
 2394         mdl                     *m;
 2395         int                     zone = 0;
 2396 
 2397         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
 2398                 m = ExAllocatePoolWithTag(NonPagedPool,
 2399                     MmSizeOfMdl(vaddr, len), 0);
 2400         else {
 2401                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
 2402                 zone++;
 2403         }
 2404 
 2405         if (m == NULL)
 2406                 return (NULL);
 2407 
 2408         MmInitializeMdl(m, vaddr, len);
 2409 
 2410         /*
 2411          * MmInitializMdl() clears the flags field, so we
 2412          * have to set this here. If the MDL came from the
 2413          * MDL UMA zone, tag it so we can release it to
 2414          * the right place later.
 2415          */
 2416         if (zone)
 2417                 m->mdl_flags = MDL_ZONE_ALLOCED;
 2418 
 2419         if (iopkt != NULL) {
 2420                 if (secondarybuf == TRUE) {
 2421                         mdl                     *last;
 2422                         last = iopkt->irp_mdl;
 2423                         while (last->mdl_next != NULL)
 2424                                 last = last->mdl_next;
 2425                         last->mdl_next = m;
 2426                 } else {
 2427                         if (iopkt->irp_mdl != NULL)
 2428                                 panic("leaking an MDL in IoAllocateMdl()");
 2429                         iopkt->irp_mdl = m;
 2430                 }
 2431         }
 2432 
 2433         return (m);
 2434 }
 2435 
 2436 void
 2437 IoFreeMdl(m)
 2438         mdl                     *m;
 2439 {
 2440         if (m == NULL)
 2441                 return;
 2442 
 2443         if (m->mdl_flags & MDL_ZONE_ALLOCED)
 2444                 uma_zfree(mdl_zone, m);
 2445         else
 2446                 ExFreePool(m);
 2447 }
 2448 
 2449 static void *
 2450 MmAllocateContiguousMemory(size, highest)
 2451         uint32_t                size;
 2452         uint64_t                highest;
 2453 {
 2454         void *addr;
 2455         size_t pagelength = roundup(size, PAGE_SIZE);
 2456 
 2457         addr = ExAllocatePoolWithTag(NonPagedPool, pagelength, 0);
 2458 
 2459         return (addr);
 2460 }
 2461 
 2462 static void *
 2463 MmAllocateContiguousMemorySpecifyCache(size, lowest, highest,
 2464     boundary, cachetype)
 2465         uint32_t                size;
 2466         uint64_t                lowest;
 2467         uint64_t                highest;
 2468         uint64_t                boundary;
 2469         enum nt_caching_type    cachetype;
 2470 {
 2471         vm_memattr_t            memattr;
 2472         void                    *ret;
 2473 
 2474         switch (cachetype) {
 2475         case MmNonCached:
 2476                 memattr = VM_MEMATTR_UNCACHEABLE;
 2477                 break;
 2478         case MmWriteCombined:
 2479                 memattr = VM_MEMATTR_WRITE_COMBINING;
 2480                 break;
 2481         case MmNonCachedUnordered:
 2482                 memattr = VM_MEMATTR_UNCACHEABLE;
 2483                 break;
 2484         case MmCached:
 2485         case MmHardwareCoherentCached:
 2486         case MmUSWCCached:
 2487         default:
 2488                 memattr = VM_MEMATTR_DEFAULT;
 2489                 break;
 2490         }
 2491 
 2492         ret = (void *)kmem_alloc_contig(kernel_arena, size, M_ZERO | M_NOWAIT,
 2493             lowest, highest, PAGE_SIZE, boundary, memattr);
 2494         if (ret != NULL)
 2495                 malloc_type_allocated(M_DEVBUF, round_page(size));
 2496         return (ret);
 2497 }
 2498 
 2499 static void
 2500 MmFreeContiguousMemory(base)
 2501         void                    *base;
 2502 {
 2503         ExFreePool(base);
 2504 }
 2505 
 2506 static void
 2507 MmFreeContiguousMemorySpecifyCache(base, size, cachetype)
 2508         void                    *base;
 2509         uint32_t                size;
 2510         enum nt_caching_type    cachetype;
 2511 {
 2512         contigfree(base, size, M_DEVBUF);
 2513 }
 2514 
 2515 static uint32_t
 2516 MmSizeOfMdl(vaddr, len)
 2517         void                    *vaddr;
 2518         size_t                  len;
 2519 {
 2520         uint32_t                l;
 2521 
 2522         l = sizeof(struct mdl) +
 2523             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
 2524 
 2525         return (l);
 2526 }
 2527 
 2528 /*
 2529  * The Microsoft documentation says this routine fills in the
 2530  * page array of an MDL with the _physical_ page addresses that
 2531  * comprise the buffer, but we don't really want to do that here.
 2532  * Instead, we just fill in the page array with the kernel virtual
 2533  * addresses of the buffers.
 2534  */
 2535 void
 2536 MmBuildMdlForNonPagedPool(m)
 2537         mdl                     *m;
 2538 {
 2539         vm_offset_t             *mdl_pages;
 2540         int                     pagecnt, i;
 2541 
 2542         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
 2543 
 2544         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
 2545                 panic("not enough pages in MDL to describe buffer");
 2546 
 2547         mdl_pages = MmGetMdlPfnArray(m);
 2548 
 2549         for (i = 0; i < pagecnt; i++)
 2550                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
 2551 
 2552         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
 2553         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
 2554 }
 2555 
 2556 static void *
 2557 MmMapLockedPages(mdl *buf, uint8_t accessmode)
 2558 {
 2559         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
 2560         return (MmGetMdlVirtualAddress(buf));
 2561 }
 2562 
 2563 static void *
 2564 MmMapLockedPagesSpecifyCache(mdl *buf, uint8_t accessmode, uint32_t cachetype,
 2565         void *vaddr, uint32_t bugcheck, uint32_t prio)
 2566 {
 2567         return (MmMapLockedPages(buf, accessmode));
 2568 }
 2569 
 2570 static void
 2571 MmUnmapLockedPages(vaddr, buf)
 2572         void                    *vaddr;
 2573         mdl                     *buf;
 2574 {
 2575         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
 2576 }
 2577 
 2578 /*
 2579  * This function has a problem in that it will break if you
 2580  * compile this module without PAE and try to use it on a PAE
 2581  * kernel. Unfortunately, there's no way around this at the
 2582  * moment. It's slightly less broken that using pmap_kextract().
 2583  * You'd think the virtual memory subsystem would help us out
 2584  * here, but it doesn't.
 2585  */
 2586 
 2587 static uint64_t
 2588 MmGetPhysicalAddress(void *base)
 2589 {
 2590         return (pmap_extract(kernel_map->pmap, (vm_offset_t)base));
 2591 }
 2592 
 2593 void *
 2594 MmGetSystemRoutineAddress(ustr)
 2595         unicode_string          *ustr;
 2596 {
 2597         ansi_string             astr;
 2598 
 2599         if (RtlUnicodeStringToAnsiString(&astr, ustr, TRUE))
 2600                 return (NULL);
 2601         return (ndis_get_routine_address(ntoskrnl_functbl, astr.as_buf));
 2602 }
 2603 
 2604 uint8_t
 2605 MmIsAddressValid(vaddr)
 2606         void                    *vaddr;
 2607 {
 2608         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
 2609                 return (TRUE);
 2610 
 2611         return (FALSE);
 2612 }
 2613 
 2614 void *
 2615 MmMapIoSpace(paddr, len, cachetype)
 2616         uint64_t                paddr;
 2617         uint32_t                len;
 2618         uint32_t                cachetype;
 2619 {
 2620         devclass_t              nexus_class;
 2621         device_t                *nexus_devs, devp;
 2622         int                     nexus_count = 0;
 2623         device_t                matching_dev = NULL;
 2624         struct resource         *res;
 2625         int                     i;
 2626         vm_offset_t             v;
 2627 
 2628         /* There will always be at least one nexus. */
 2629 
 2630         nexus_class = devclass_find("nexus");
 2631         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
 2632 
 2633         for (i = 0; i < nexus_count; i++) {
 2634                 devp = nexus_devs[i];
 2635                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
 2636                 if (matching_dev)
 2637                         break;
 2638         }
 2639 
 2640         free(nexus_devs, M_TEMP);
 2641 
 2642         if (matching_dev == NULL)
 2643                 return (NULL);
 2644 
 2645         v = (vm_offset_t)rman_get_virtual(res);
 2646         if (paddr > rman_get_start(res))
 2647                 v += paddr - rman_get_start(res);
 2648 
 2649         return ((void *)v);
 2650 }
 2651 
 2652 void
 2653 MmUnmapIoSpace(vaddr, len)
 2654         void                    *vaddr;
 2655         size_t                  len;
 2656 {
 2657 }
 2658 
 2659 
 2660 static device_t
 2661 ntoskrnl_finddev(dev, paddr, res)
 2662         device_t                dev;
 2663         uint64_t                paddr;
 2664         struct resource         **res;
 2665 {
 2666         device_t                *children = NULL;
 2667         device_t                matching_dev;
 2668         int                     childcnt;
 2669         struct resource         *r;
 2670         struct resource_list    *rl;
 2671         struct resource_list_entry      *rle;
 2672         uint32_t                flags;
 2673         int                     i;
 2674 
 2675         /* We only want devices that have been successfully probed. */
 2676 
 2677         if (device_is_alive(dev) == FALSE)
 2678                 return (NULL);
 2679 
 2680         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 2681         if (rl != NULL) {
 2682                 STAILQ_FOREACH(rle, rl, link) {
 2683                         r = rle->res;
 2684 
 2685                         if (r == NULL)
 2686                                 continue;
 2687 
 2688                         flags = rman_get_flags(r);
 2689 
 2690                         if (rle->type == SYS_RES_MEMORY &&
 2691                             paddr >= rman_get_start(r) &&
 2692                             paddr <= rman_get_end(r)) {
 2693                                 if (!(flags & RF_ACTIVE))
 2694                                         bus_activate_resource(dev,
 2695                                             SYS_RES_MEMORY, 0, r);
 2696                                 *res = r;
 2697                                 return (dev);
 2698                         }
 2699                 }
 2700         }
 2701 
 2702         /*
 2703          * If this device has children, do another
 2704          * level of recursion to inspect them.
 2705          */
 2706 
 2707         device_get_children(dev, &children, &childcnt);
 2708 
 2709         for (i = 0; i < childcnt; i++) {
 2710                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
 2711                 if (matching_dev != NULL) {
 2712                         free(children, M_TEMP);
 2713                         return (matching_dev);
 2714                 }
 2715         }
 2716 
 2717 
 2718         /* Won't somebody please think of the children! */
 2719 
 2720         if (children != NULL)
 2721                 free(children, M_TEMP);
 2722 
 2723         return (NULL);
 2724 }
 2725 
 2726 /*
 2727  * Workitems are unlike DPCs, in that they run in a user-mode thread
 2728  * context rather than at DISPATCH_LEVEL in kernel context. In our
 2729  * case we run them in kernel context anyway.
 2730  */
 2731 static void
 2732 ntoskrnl_workitem_thread(arg)
 2733         void                    *arg;
 2734 {
 2735         kdpc_queue              *kq;
 2736         list_entry              *l;
 2737         io_workitem             *iw;
 2738         uint8_t                 irql;
 2739 
 2740         kq = arg;
 2741 
 2742         InitializeListHead(&kq->kq_disp);
 2743         kq->kq_td = curthread;
 2744         kq->kq_exit = 0;
 2745         KeInitializeSpinLock(&kq->kq_lock);
 2746         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 2747 
 2748         while (1) {
 2749                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 2750 
 2751                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 2752 
 2753                 if (kq->kq_exit) {
 2754                         kq->kq_exit = 0;
 2755                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2756                         break;
 2757                 }
 2758 
 2759                 while (!IsListEmpty(&kq->kq_disp)) {
 2760                         l = RemoveHeadList(&kq->kq_disp);
 2761                         iw = CONTAINING_RECORD(l,
 2762                             io_workitem, iw_listentry);
 2763                         InitializeListHead((&iw->iw_listentry));
 2764                         if (iw->iw_func == NULL)
 2765                                 continue;
 2766                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2767                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
 2768                         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2769                 }
 2770 
 2771                 KeReleaseSpinLock(&kq->kq_lock, irql);
 2772         }
 2773 
 2774         kproc_exit(0);
 2775         return; /* notreached */
 2776 }
 2777 
 2778 static ndis_status
 2779 RtlCharToInteger(src, base, val)
 2780         const char              *src;
 2781         uint32_t                base;
 2782         uint32_t                *val;
 2783 {
 2784         int negative = 0;
 2785         uint32_t res;
 2786 
 2787         if (!src || !val)
 2788                 return (STATUS_ACCESS_VIOLATION);
 2789         while (*src != '\0' && *src <= ' ')
 2790                 src++;
 2791         if (*src == '+')
 2792                 src++;
 2793         else if (*src == '-') {
 2794                 src++;
 2795                 negative = 1;
 2796         }
 2797         if (base == 0) {
 2798                 base = 10;
 2799                 if (*src == '') {
 2800                         src++;
 2801                         if (*src == 'b') {
 2802                                 base = 2;
 2803                                 src++;
 2804                         } else if (*src == 'o') {
 2805                                 base = 8;
 2806                                 src++;
 2807                         } else if (*src == 'x') {
 2808                                 base = 16;
 2809                                 src++;
 2810                         }
 2811                 }
 2812         } else if (!(base == 2 || base == 8 || base == 10 || base == 16))
 2813                 return (STATUS_INVALID_PARAMETER);
 2814 
 2815         for (res = 0; *src; src++) {
 2816                 int v;
 2817                 if (isdigit(*src))
 2818                         v = *src - '';
 2819                 else if (isxdigit(*src))
 2820                         v = tolower(*src) - 'a' + 10;
 2821                 else
 2822                         v = base;
 2823                 if (v >= base)
 2824                         return (STATUS_INVALID_PARAMETER);
 2825                 res = res * base + v;
 2826         }
 2827         *val = negative ? -res : res;
 2828         return (STATUS_SUCCESS);
 2829 }
 2830 
 2831 static void
 2832 ntoskrnl_destroy_workitem_threads(void)
 2833 {
 2834         kdpc_queue              *kq;
 2835         int                     i;
 2836 
 2837         for (i = 0; i < WORKITEM_THREADS; i++) {
 2838                 kq = wq_queues + i;
 2839                 kq->kq_exit = 1;
 2840                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 2841                 while (kq->kq_exit)
 2842                         tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", hz/10);
 2843         }
 2844 }
 2845 
 2846 io_workitem *
 2847 IoAllocateWorkItem(dobj)
 2848         device_object           *dobj;
 2849 {
 2850         io_workitem             *iw;
 2851 
 2852         iw = uma_zalloc(iw_zone, M_NOWAIT);
 2853         if (iw == NULL)
 2854                 return (NULL);
 2855 
 2856         InitializeListHead(&iw->iw_listentry);
 2857         iw->iw_dobj = dobj;
 2858 
 2859         mtx_lock(&ntoskrnl_dispatchlock);
 2860         iw->iw_idx = wq_idx;
 2861         WORKIDX_INC(wq_idx);
 2862         mtx_unlock(&ntoskrnl_dispatchlock);
 2863 
 2864         return (iw);
 2865 }
 2866 
 2867 void
 2868 IoFreeWorkItem(iw)
 2869         io_workitem             *iw;
 2870 {
 2871         uma_zfree(iw_zone, iw);
 2872 }
 2873 
 2874 void
 2875 IoQueueWorkItem(iw, iw_func, qtype, ctx)
 2876         io_workitem             *iw;
 2877         io_workitem_func        iw_func;
 2878         uint32_t                qtype;
 2879         void                    *ctx;
 2880 {
 2881         kdpc_queue              *kq;
 2882         list_entry              *l;
 2883         io_workitem             *cur;
 2884         uint8_t                 irql;
 2885 
 2886         kq = wq_queues + iw->iw_idx;
 2887 
 2888         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2889 
 2890         /*
 2891          * Traverse the list and make sure this workitem hasn't
 2892          * already been inserted. Queuing the same workitem
 2893          * twice will hose the list but good.
 2894          */
 2895 
 2896         l = kq->kq_disp.nle_flink;
 2897         while (l != &kq->kq_disp) {
 2898                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2899                 if (cur == iw) {
 2900                         /* Already queued -- do nothing. */
 2901                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2902                         return;
 2903                 }
 2904                 l = l->nle_flink;
 2905         }
 2906 
 2907         iw->iw_func = iw_func;
 2908         iw->iw_ctx = ctx;
 2909 
 2910         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
 2911         KeReleaseSpinLock(&kq->kq_lock, irql);
 2912 
 2913         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 2914 }
 2915 
 2916 static void
 2917 ntoskrnl_workitem(dobj, arg)
 2918         device_object           *dobj;
 2919         void                    *arg;
 2920 {
 2921         io_workitem             *iw;
 2922         work_queue_item         *w;
 2923         work_item_func          f;
 2924 
 2925         iw = arg;
 2926         w = (work_queue_item *)dobj;
 2927         f = (work_item_func)w->wqi_func;
 2928         uma_zfree(iw_zone, iw);
 2929         MSCALL2(f, w, w->wqi_ctx);
 2930 }
 2931 
 2932 /*
 2933  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
 2934  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
 2935  * problem with ExQueueWorkItem() is that it can't guard against
 2936  * the condition where a driver submits a job to the work queue and
 2937  * is then unloaded before the job is able to run. IoQueueWorkItem()
 2938  * acquires a reference to the device's device_object via the
 2939  * object manager and retains it until after the job has completed,
 2940  * which prevents the driver from being unloaded before the job
 2941  * runs. (We don't currently support this behavior, though hopefully
 2942  * that will change once the object manager API is fleshed out a bit.)
 2943  *
 2944  * Having said all that, the ExQueueWorkItem() API remains, because
 2945  * there are still other parts of Windows that use it, including
 2946  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
 2947  * We fake up the ExQueueWorkItem() API on top of our implementation
 2948  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
 2949  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
 2950  * queue item (provided by the caller) in to IoAllocateWorkItem()
 2951  * instead of the device_object. We need to save this pointer so
 2952  * we can apply a sanity check: as with the DPC queue and other
 2953  * workitem queues, we can't allow the same work queue item to
 2954  * be queued twice. If it's already pending, we silently return
 2955  */
 2956 
 2957 void
 2958 ExQueueWorkItem(w, qtype)
 2959         work_queue_item         *w;
 2960         uint32_t                qtype;
 2961 {
 2962         io_workitem             *iw;
 2963         io_workitem_func        iwf;
 2964         kdpc_queue              *kq;
 2965         list_entry              *l;
 2966         io_workitem             *cur;
 2967         uint8_t                 irql;
 2968 
 2969 
 2970         /*
 2971          * We need to do a special sanity test to make sure
 2972          * the ExQueueWorkItem() API isn't used to queue
 2973          * the same workitem twice. Rather than checking the
 2974          * io_workitem pointer itself, we test the attached
 2975          * device object, which is really a pointer to the
 2976          * legacy work queue item structure.
 2977          */
 2978 
 2979         kq = wq_queues + WORKITEM_LEGACY_THREAD;
 2980         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2981         l = kq->kq_disp.nle_flink;
 2982         while (l != &kq->kq_disp) {
 2983                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2984                 if (cur->iw_dobj == (device_object *)w) {
 2985                         /* Already queued -- do nothing. */
 2986                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2987                         return;
 2988                 }
 2989                 l = l->nle_flink;
 2990         }
 2991         KeReleaseSpinLock(&kq->kq_lock, irql);
 2992 
 2993         iw = IoAllocateWorkItem((device_object *)w);
 2994         if (iw == NULL)
 2995                 return;
 2996 
 2997         iw->iw_idx = WORKITEM_LEGACY_THREAD;
 2998         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
 2999         IoQueueWorkItem(iw, iwf, qtype, iw);
 3000 }
 3001 
 3002 static void
 3003 RtlZeroMemory(dst, len)
 3004         void                    *dst;
 3005         size_t                  len;
 3006 {
 3007         bzero(dst, len);
 3008 }
 3009 
 3010 static void
 3011 RtlSecureZeroMemory(dst, len)
 3012         void                    *dst;
 3013         size_t                  len;
 3014 {
 3015         memset(dst, 0, len);
 3016 }
 3017 
 3018 static void
 3019 RtlFillMemory(void *dst, size_t len, uint8_t c)
 3020 {
 3021         memset(dst, c, len);
 3022 }
 3023 
 3024 static void
 3025 RtlMoveMemory(dst, src, len)
 3026         void                    *dst;
 3027         const void              *src;
 3028         size_t                  len;
 3029 {
 3030         memmove(dst, src, len);
 3031 }
 3032 
 3033 static void
 3034 RtlCopyMemory(dst, src, len)
 3035         void                    *dst;
 3036         const void              *src;
 3037         size_t                  len;
 3038 {
 3039         bcopy(src, dst, len);
 3040 }
 3041 
 3042 static size_t
 3043 RtlCompareMemory(s1, s2, len)
 3044         const void              *s1;
 3045         const void              *s2;
 3046         size_t                  len;
 3047 {
 3048         size_t                  i;
 3049         uint8_t                 *m1, *m2;
 3050 
 3051         m1 = __DECONST(char *, s1);
 3052         m2 = __DECONST(char *, s2);
 3053 
 3054         for (i = 0; i < len && m1[i] == m2[i]; i++);
 3055         return (i);
 3056 }
 3057 
 3058 void
 3059 RtlInitAnsiString(dst, src)
 3060         ansi_string             *dst;
 3061         char                    *src;
 3062 {
 3063         ansi_string             *a;
 3064 
 3065         a = dst;
 3066         if (a == NULL)
 3067                 return;
 3068         if (src == NULL) {
 3069                 a->as_len = a->as_maxlen = 0;
 3070                 a->as_buf = NULL;
 3071         } else {
 3072                 a->as_buf = src;
 3073                 a->as_len = a->as_maxlen = strlen(src);
 3074         }
 3075 }
 3076 
 3077 void
 3078 RtlInitUnicodeString(dst, src)
 3079         unicode_string          *dst;
 3080         uint16_t                *src;
 3081 {
 3082         unicode_string          *u;
 3083         int                     i;
 3084 
 3085         u = dst;
 3086         if (u == NULL)
 3087                 return;
 3088         if (src == NULL) {
 3089                 u->us_len = u->us_maxlen = 0;
 3090                 u->us_buf = NULL;
 3091         } else {
 3092                 i = 0;
 3093                 while(src[i] != 0)
 3094                         i++;
 3095                 u->us_buf = src;
 3096                 u->us_len = u->us_maxlen = i * 2;
 3097         }
 3098 }
 3099 
 3100 ndis_status
 3101 RtlUnicodeStringToInteger(ustr, base, val)
 3102         unicode_string          *ustr;
 3103         uint32_t                base;
 3104         uint32_t                *val;
 3105 {
 3106         uint16_t                *uchr;
 3107         int                     len, neg = 0;
 3108         char                    abuf[64];
 3109         char                    *astr;
 3110 
 3111         uchr = ustr->us_buf;
 3112         len = ustr->us_len;
 3113         bzero(abuf, sizeof(abuf));
 3114 
 3115         if ((char)((*uchr) & 0xFF) == '-') {
 3116                 neg = 1;
 3117                 uchr++;
 3118                 len -= 2;
 3119         } else if ((char)((*uchr) & 0xFF) == '+') {
 3120                 neg = 0;
 3121                 uchr++;
 3122                 len -= 2;
 3123         }
 3124 
 3125         if (base == 0) {
 3126                 if ((char)((*uchr) & 0xFF) == 'b') {
 3127                         base = 2;
 3128                         uchr++;
 3129                         len -= 2;
 3130                 } else if ((char)((*uchr) & 0xFF) == 'o') {
 3131                         base = 8;
 3132                         uchr++;
 3133                         len -= 2;
 3134                 } else if ((char)((*uchr) & 0xFF) == 'x') {
 3135                         base = 16;
 3136                         uchr++;
 3137                         len -= 2;
 3138                 } else
 3139                         base = 10;
 3140         }
 3141 
 3142         astr = abuf;
 3143         if (neg) {
 3144                 strcpy(astr, "-");
 3145                 astr++;
 3146         }
 3147 
 3148         ntoskrnl_unicode_to_ascii(uchr, astr, len);
 3149         *val = strtoul(abuf, NULL, base);
 3150 
 3151         return (STATUS_SUCCESS);
 3152 }
 3153 
 3154 void
 3155 RtlFreeUnicodeString(ustr)
 3156         unicode_string          *ustr;
 3157 {
 3158         if (ustr->us_buf == NULL)
 3159                 return;
 3160         ExFreePool(ustr->us_buf);
 3161         ustr->us_buf = NULL;
 3162 }
 3163 
 3164 void
 3165 RtlFreeAnsiString(astr)
 3166         ansi_string             *astr;
 3167 {
 3168         if (astr->as_buf == NULL)
 3169                 return;
 3170         ExFreePool(astr->as_buf);
 3171         astr->as_buf = NULL;
 3172 }
 3173 
 3174 static int
 3175 atoi(str)
 3176         const char              *str;
 3177 {
 3178         return (int)strtol(str, (char **)NULL, 10);
 3179 }
 3180 
 3181 static long
 3182 atol(str)
 3183         const char              *str;
 3184 {
 3185         return strtol(str, (char **)NULL, 10);
 3186 }
 3187 
 3188 static int
 3189 rand(void)
 3190 {
 3191 
 3192         return (random());
 3193 }
 3194 
 3195 static void
 3196 srand(unsigned int seed)
 3197 {
 3198 
 3199         srandom(seed);
 3200 }
 3201 
 3202 static uint8_t
 3203 IoIsWdmVersionAvailable(uint8_t major, uint8_t minor)
 3204 {
 3205         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
 3206                 return (TRUE);
 3207         return (FALSE);
 3208 }
 3209 
 3210 static int32_t
 3211 IoOpenDeviceRegistryKey(struct device_object *devobj, uint32_t type,
 3212     uint32_t mask, void **key)
 3213 {
 3214         return (NDIS_STATUS_INVALID_DEVICE_REQUEST);
 3215 }
 3216 
 3217 static ndis_status
 3218 IoGetDeviceObjectPointer(name, reqaccess, fileobj, devobj)
 3219         unicode_string          *name;
 3220         uint32_t                reqaccess;
 3221         void                    *fileobj;
 3222         device_object           *devobj;
 3223 {
 3224         return (STATUS_SUCCESS);
 3225 }
 3226 
 3227 static ndis_status
 3228 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
 3229         device_object           *devobj;
 3230         uint32_t                regprop;
 3231         uint32_t                buflen;
 3232         void                    *prop;
 3233         uint32_t                *reslen;
 3234 {
 3235         driver_object           *drv;
 3236         uint16_t                **name;
 3237 
 3238         drv = devobj->do_drvobj;
 3239 
 3240         switch (regprop) {
 3241         case DEVPROP_DRIVER_KEYNAME:
 3242                 name = prop;
 3243                 *name = drv->dro_drivername.us_buf;
 3244                 *reslen = drv->dro_drivername.us_len;
 3245                 break;
 3246         default:
 3247                 return (STATUS_INVALID_PARAMETER_2);
 3248                 break;
 3249         }
 3250 
 3251         return (STATUS_SUCCESS);
 3252 }
 3253 
 3254 static void
 3255 KeInitializeMutex(kmutex, level)
 3256         kmutant                 *kmutex;
 3257         uint32_t                level;
 3258 {
 3259         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
 3260         kmutex->km_abandoned = FALSE;
 3261         kmutex->km_apcdisable = 1;
 3262         kmutex->km_header.dh_sigstate = 1;
 3263         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
 3264         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
 3265         kmutex->km_ownerthread = NULL;
 3266 }
 3267 
 3268 static uint32_t
 3269 KeReleaseMutex(kmutant *kmutex, uint8_t kwait)
 3270 {
 3271         uint32_t                prevstate;
 3272 
 3273         mtx_lock(&ntoskrnl_dispatchlock);
 3274         prevstate = kmutex->km_header.dh_sigstate;
 3275         if (kmutex->km_ownerthread != curthread) {
 3276                 mtx_unlock(&ntoskrnl_dispatchlock);
 3277                 return (STATUS_MUTANT_NOT_OWNED);
 3278         }
 3279 
 3280         kmutex->km_header.dh_sigstate++;
 3281         kmutex->km_abandoned = FALSE;
 3282 
 3283         if (kmutex->km_header.dh_sigstate == 1) {
 3284                 kmutex->km_ownerthread = NULL;
 3285                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
 3286         }
 3287 
 3288         mtx_unlock(&ntoskrnl_dispatchlock);
 3289 
 3290         return (prevstate);
 3291 }
 3292 
 3293 static uint32_t
 3294 KeReadStateMutex(kmutex)
 3295         kmutant                 *kmutex;
 3296 {
 3297         return (kmutex->km_header.dh_sigstate);
 3298 }
 3299 
 3300 void
 3301 KeInitializeEvent(nt_kevent *kevent, uint32_t type, uint8_t state)
 3302 {
 3303         InitializeListHead((&kevent->k_header.dh_waitlisthead));
 3304         kevent->k_header.dh_sigstate = state;
 3305         if (type == EVENT_TYPE_NOTIFY)
 3306                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
 3307         else
 3308                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
 3309         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
 3310 }
 3311 
 3312 uint32_t
 3313 KeResetEvent(kevent)
 3314         nt_kevent               *kevent;
 3315 {
 3316         uint32_t                prevstate;
 3317 
 3318         mtx_lock(&ntoskrnl_dispatchlock);
 3319         prevstate = kevent->k_header.dh_sigstate;
 3320         kevent->k_header.dh_sigstate = FALSE;
 3321         mtx_unlock(&ntoskrnl_dispatchlock);
 3322 
 3323         return (prevstate);
 3324 }
 3325 
 3326 uint32_t
 3327 KeSetEvent(nt_kevent *kevent, uint32_t increment, uint8_t kwait)
 3328 {
 3329         uint32_t                prevstate;
 3330         wait_block              *w;
 3331         nt_dispatch_header      *dh;
 3332         struct thread           *td;
 3333         wb_ext                  *we;
 3334 
 3335         mtx_lock(&ntoskrnl_dispatchlock);
 3336         prevstate = kevent->k_header.dh_sigstate;
 3337         dh = &kevent->k_header;
 3338 
 3339         if (IsListEmpty(&dh->dh_waitlisthead))
 3340                 /*
 3341                  * If there's nobody in the waitlist, just set
 3342                  * the state to signalled.
 3343                  */
 3344                 dh->dh_sigstate = 1;
 3345         else {
 3346                 /*
 3347                  * Get the first waiter. If this is a synchronization
 3348                  * event, just wake up that one thread (don't bother
 3349                  * setting the state to signalled since we're supposed
 3350                  * to automatically clear synchronization events anyway).
 3351                  *
 3352                  * If it's a notification event, or the first
 3353                  * waiter is doing a WAITTYPE_ALL wait, go through
 3354                  * the full wait satisfaction process.
 3355                  */
 3356                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
 3357                     wait_block, wb_waitlist);
 3358                 we = w->wb_ext;
 3359                 td = we->we_td;
 3360                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
 3361                     w->wb_waittype == WAITTYPE_ALL) {
 3362                         if (prevstate == 0) {
 3363                                 dh->dh_sigstate = 1;
 3364                                 ntoskrnl_waittest(dh, increment);
 3365                         }
 3366                 } else {
 3367                         w->wb_awakened |= TRUE;
 3368                         cv_broadcastpri(&we->we_cv,
 3369                             (w->wb_oldpri - (increment * 4)) > PRI_MIN_KERN ?
 3370                             w->wb_oldpri - (increment * 4) : PRI_MIN_KERN);
 3371                 }
 3372         }
 3373 
 3374         mtx_unlock(&ntoskrnl_dispatchlock);
 3375 
 3376         return (prevstate);
 3377 }
 3378 
 3379 void
 3380 KeClearEvent(kevent)
 3381         nt_kevent               *kevent;
 3382 {
 3383         kevent->k_header.dh_sigstate = FALSE;
 3384 }
 3385 
 3386 uint32_t
 3387 KeReadStateEvent(kevent)
 3388         nt_kevent               *kevent;
 3389 {
 3390         return (kevent->k_header.dh_sigstate);
 3391 }
 3392 
 3393 /*
 3394  * The object manager in Windows is responsible for managing
 3395  * references and access to various types of objects, including
 3396  * device_objects, events, threads, timers and so on. However,
 3397  * there's a difference in the way objects are handled in user
 3398  * mode versus kernel mode.
 3399  *
 3400  * In user mode (i.e. Win32 applications), all objects are
 3401  * managed by the object manager. For example, when you create
 3402  * a timer or event object, you actually end up with an 
 3403  * object_header (for the object manager's bookkeeping
 3404  * purposes) and an object body (which contains the actual object
 3405  * structure, e.g. ktimer, kevent, etc...). This allows Windows
 3406  * to manage resource quotas and to enforce access restrictions
 3407  * on basically every kind of system object handled by the kernel.
 3408  *
 3409  * However, in kernel mode, you only end up using the object
 3410  * manager some of the time. For example, in a driver, you create
 3411  * a timer object by simply allocating the memory for a ktimer
 3412  * structure and initializing it with KeInitializeTimer(). Hence,
 3413  * the timer has no object_header and no reference counting or
 3414  * security/resource checks are done on it. The assumption in
 3415  * this case is that if you're running in kernel mode, you know
 3416  * what you're doing, and you're already at an elevated privilege
 3417  * anyway.
 3418  *
 3419  * There are some exceptions to this. The two most important ones
 3420  * for our purposes are device_objects and threads. We need to use
 3421  * the object manager to do reference counting on device_objects,
 3422  * and for threads, you can only get a pointer to a thread's
 3423  * dispatch header by using ObReferenceObjectByHandle() on the
 3424  * handle returned by PsCreateSystemThread().
 3425  */
 3426 
 3427 static ndis_status
 3428 ObReferenceObjectByHandle(ndis_handle handle, uint32_t reqaccess, void *otype,
 3429         uint8_t accessmode, void **object, void **handleinfo)
 3430 {
 3431         nt_objref               *nr;
 3432 
 3433         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
 3434         if (nr == NULL)
 3435                 return (STATUS_INSUFFICIENT_RESOURCES);
 3436 
 3437         InitializeListHead((&nr->no_dh.dh_waitlisthead));
 3438         nr->no_obj = handle;
 3439         nr->no_dh.dh_type = DISP_TYPE_THREAD;
 3440         nr->no_dh.dh_sigstate = 0;
 3441         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
 3442             sizeof(uint32_t));
 3443         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
 3444         *object = nr;
 3445 
 3446         return (STATUS_SUCCESS);
 3447 }
 3448 
 3449 static void
 3450 ObfDereferenceObject(object)
 3451         void                    *object;
 3452 {
 3453         nt_objref               *nr;
 3454 
 3455         nr = object;
 3456         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
 3457         free(nr, M_DEVBUF);
 3458 }
 3459 
 3460 static uint32_t
 3461 ZwClose(handle)
 3462         ndis_handle             handle;
 3463 {
 3464         return (STATUS_SUCCESS);
 3465 }
 3466 
 3467 static uint32_t
 3468 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
 3469         uint32_t                traceclass;
 3470         void                    *traceinfo;
 3471         uint32_t                infolen;
 3472         uint32_t                reqlen;
 3473         void                    *buf;
 3474 {
 3475         return (STATUS_NOT_FOUND);
 3476 }
 3477 
 3478 static uint32_t
 3479 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
 3480         void *guid, uint16_t messagenum, ...)
 3481 {
 3482         return (STATUS_SUCCESS);
 3483 }
 3484 
 3485 static uint32_t
 3486 IoWMIRegistrationControl(dobj, action)
 3487         device_object           *dobj;
 3488         uint32_t                action;
 3489 {
 3490         return (STATUS_SUCCESS);
 3491 }
 3492 
 3493 /*
 3494  * This is here just in case the thread returns without calling
 3495  * PsTerminateSystemThread().
 3496  */
 3497 static void
 3498 ntoskrnl_thrfunc(arg)
 3499         void                    *arg;
 3500 {
 3501         thread_context          *thrctx;
 3502         uint32_t (*tfunc)(void *);
 3503         void                    *tctx;
 3504         uint32_t                rval;
 3505 
 3506         thrctx = arg;
 3507         tfunc = thrctx->tc_thrfunc;
 3508         tctx = thrctx->tc_thrctx;
 3509         free(thrctx, M_TEMP);
 3510 
 3511         rval = MSCALL1(tfunc, tctx);
 3512 
 3513         PsTerminateSystemThread(rval);
 3514         return; /* notreached */
 3515 }
 3516 
 3517 static ndis_status
 3518 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
 3519         clientid, thrfunc, thrctx)
 3520         ndis_handle             *handle;
 3521         uint32_t                reqaccess;
 3522         void                    *objattrs;
 3523         ndis_handle             phandle;
 3524         void                    *clientid;
 3525         void                    *thrfunc;
 3526         void                    *thrctx;
 3527 {
 3528         int                     error;
 3529         thread_context          *tc;
 3530         struct proc             *p;
 3531 
 3532         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
 3533         if (tc == NULL)
 3534                 return (STATUS_INSUFFICIENT_RESOURCES);
 3535 
 3536         tc->tc_thrctx = thrctx;
 3537         tc->tc_thrfunc = thrfunc;
 3538 
 3539         error = kproc_create(ntoskrnl_thrfunc, tc, &p,
 3540             RFHIGHPID, NDIS_KSTACK_PAGES, "Windows Kthread %d", ntoskrnl_kth);
 3541 
 3542         if (error) {
 3543                 free(tc, M_TEMP);
 3544                 return (STATUS_INSUFFICIENT_RESOURCES);
 3545         }
 3546 
 3547         *handle = p;
 3548         ntoskrnl_kth++;
 3549 
 3550         return (STATUS_SUCCESS);
 3551 }
 3552 
 3553 /*
 3554  * In Windows, the exit of a thread is an event that you're allowed
 3555  * to wait on, assuming you've obtained a reference to the thread using
 3556  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
 3557  * simulate this behavior is to register each thread we create in a
 3558  * reference list, and if someone holds a reference to us, we poke
 3559  * them.
 3560  */
 3561 static ndis_status
 3562 PsTerminateSystemThread(status)
 3563         ndis_status             status;
 3564 {
 3565         struct nt_objref        *nr;
 3566 
 3567         mtx_lock(&ntoskrnl_dispatchlock);
 3568         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
 3569                 if (nr->no_obj != curthread->td_proc)
 3570                         continue;
 3571                 nr->no_dh.dh_sigstate = 1;
 3572                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
 3573                 break;
 3574         }
 3575         mtx_unlock(&ntoskrnl_dispatchlock);
 3576 
 3577         ntoskrnl_kth--;
 3578 
 3579         kproc_exit(0);
 3580         return (0);     /* notreached */
 3581 }
 3582 
 3583 static uint32_t
 3584 DbgPrint(char *fmt, ...)
 3585 {
 3586         va_list                 ap;
 3587 
 3588         if (bootverbose) {
 3589                 va_start(ap, fmt);
 3590                 vprintf(fmt, ap);
 3591                 va_end(ap);
 3592         }
 3593 
 3594         return (STATUS_SUCCESS);
 3595 }
 3596 
 3597 static void
 3598 DbgBreakPoint(void)
 3599 {
 3600 
 3601         kdb_enter(KDB_WHY_NDIS, "DbgBreakPoint(): breakpoint");
 3602 }
 3603 
 3604 static void
 3605 KeBugCheckEx(code, param1, param2, param3, param4)
 3606     uint32_t                    code;
 3607     u_long                      param1;
 3608     u_long                      param2;
 3609     u_long                      param3;
 3610     u_long                      param4;
 3611 {
 3612         panic("KeBugCheckEx: STOP 0x%X", code);
 3613 }
 3614 
 3615 static void
 3616 ntoskrnl_timercall(arg)
 3617         void                    *arg;
 3618 {
 3619         ktimer                  *timer;
 3620         struct timeval          tv;
 3621         kdpc                    *dpc;
 3622 
 3623         mtx_lock(&ntoskrnl_dispatchlock);
 3624 
 3625         timer = arg;
 3626 
 3627 #ifdef NTOSKRNL_DEBUG_TIMERS
 3628         ntoskrnl_timer_fires++;
 3629 #endif
 3630         ntoskrnl_remove_timer(timer);
 3631 
 3632         /*
 3633          * This should never happen, but complain
 3634          * if it does.
 3635          */
 3636 
 3637         if (timer->k_header.dh_inserted == FALSE) {
 3638                 mtx_unlock(&ntoskrnl_dispatchlock);
 3639                 printf("NTOS: timer %p fired even though "
 3640                     "it was canceled\n", timer);
 3641                 return;
 3642         }
 3643 
 3644         /* Mark the timer as no longer being on the timer queue. */
 3645 
 3646         timer->k_header.dh_inserted = FALSE;
 3647 
 3648         /* Now signal the object and satisfy any waits on it. */
 3649 
 3650         timer->k_header.dh_sigstate = 1;
 3651         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
 3652 
 3653         /*
 3654          * If this is a periodic timer, re-arm it
 3655          * so it will fire again. We do this before
 3656          * calling any deferred procedure calls because
 3657          * it's possible the DPC might cancel the timer,
 3658          * in which case it would be wrong for us to
 3659          * re-arm it again afterwards.
 3660          */
 3661 
 3662         if (timer->k_period) {
 3663                 tv.tv_sec = 0;
 3664                 tv.tv_usec = timer->k_period * 1000;
 3665                 timer->k_header.dh_inserted = TRUE;
 3666                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
 3667 #ifdef NTOSKRNL_DEBUG_TIMERS
 3668                 ntoskrnl_timer_reloads++;
 3669 #endif
 3670         }
 3671 
 3672         dpc = timer->k_dpc;
 3673 
 3674         mtx_unlock(&ntoskrnl_dispatchlock);
 3675 
 3676         /* If there's a DPC associated with the timer, queue it up. */
 3677 
 3678         if (dpc != NULL)
 3679                 KeInsertQueueDpc(dpc, NULL, NULL);
 3680 }
 3681 
 3682 #ifdef NTOSKRNL_DEBUG_TIMERS
 3683 static int
 3684 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
 3685 {
 3686         int                     ret;
 3687 
 3688         ret = 0;
 3689         ntoskrnl_show_timers();
 3690         return (sysctl_handle_int(oidp, &ret, 0, req));
 3691 }
 3692 
 3693 static void
 3694 ntoskrnl_show_timers()
 3695 {
 3696         int                     i = 0;
 3697         list_entry              *l;
 3698 
 3699         mtx_lock_spin(&ntoskrnl_calllock);
 3700         l = ntoskrnl_calllist.nle_flink;
 3701         while(l != &ntoskrnl_calllist) {
 3702                 i++;
 3703                 l = l->nle_flink;
 3704         }
 3705         mtx_unlock_spin(&ntoskrnl_calllock);
 3706 
 3707         printf("\n");
 3708         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
 3709         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
 3710         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
 3711         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
 3712         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
 3713         printf("\n");
 3714 }
 3715 #endif
 3716 
 3717 /*
 3718  * Must be called with dispatcher lock held.
 3719  */
 3720 
 3721 static void
 3722 ntoskrnl_insert_timer(timer, ticks)
 3723         ktimer                  *timer;
 3724         int                     ticks;
 3725 {
 3726         callout_entry           *e;
 3727         list_entry              *l;
 3728         struct callout          *c;
 3729 
 3730         /*
 3731          * Try and allocate a timer.
 3732          */
 3733         mtx_lock_spin(&ntoskrnl_calllock);
 3734         if (IsListEmpty(&ntoskrnl_calllist)) {
 3735                 mtx_unlock_spin(&ntoskrnl_calllock);
 3736 #ifdef NTOSKRNL_DEBUG_TIMERS
 3737                 ntoskrnl_show_timers();
 3738 #endif
 3739                 panic("out of timers!");
 3740         }
 3741         l = RemoveHeadList(&ntoskrnl_calllist);
 3742         mtx_unlock_spin(&ntoskrnl_calllock);
 3743 
 3744         e = CONTAINING_RECORD(l, callout_entry, ce_list);
 3745         c = &e->ce_callout;
 3746 
 3747         timer->k_callout = c;
 3748 
 3749         callout_init(c, 1);
 3750         callout_reset(c, ticks, ntoskrnl_timercall, timer);
 3751 }
 3752 
 3753 static void
 3754 ntoskrnl_remove_timer(timer)
 3755         ktimer                  *timer;
 3756 {
 3757         callout_entry           *e;
 3758 
 3759         e = (callout_entry *)timer->k_callout;
 3760         callout_stop(timer->k_callout);
 3761 
 3762         mtx_lock_spin(&ntoskrnl_calllock);
 3763         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
 3764         mtx_unlock_spin(&ntoskrnl_calllock);
 3765 }
 3766 
 3767 void
 3768 KeInitializeTimer(timer)
 3769         ktimer                  *timer;
 3770 {
 3771         if (timer == NULL)
 3772                 return;
 3773 
 3774         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
 3775 }
 3776 
 3777 void
 3778 KeInitializeTimerEx(timer, type)
 3779         ktimer                  *timer;
 3780         uint32_t                type;
 3781 {
 3782         if (timer == NULL)
 3783                 return;
 3784 
 3785         bzero((char *)timer, sizeof(ktimer));
 3786         InitializeListHead((&timer->k_header.dh_waitlisthead));
 3787         timer->k_header.dh_sigstate = FALSE;
 3788         timer->k_header.dh_inserted = FALSE;
 3789         if (type == EVENT_TYPE_NOTIFY)
 3790                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
 3791         else
 3792                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
 3793         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
 3794 }
 3795 
 3796 /*
 3797  * DPC subsystem. A Windows Defered Procedure Call has the following
 3798  * properties:
 3799  * - It runs at DISPATCH_LEVEL.
 3800  * - It can have one of 3 importance values that control when it
 3801  *   runs relative to other DPCs in the queue.
 3802  * - On SMP systems, it can be set to run on a specific processor.
 3803  * In order to satisfy the last property, we create a DPC thread for
 3804  * each CPU in the system and bind it to that CPU. Each thread
 3805  * maintains three queues with different importance levels, which
 3806  * will be processed in order from lowest to highest.
 3807  *
 3808  * In Windows, interrupt handlers run as DPCs. (Not to be confused
 3809  * with ISRs, which run in interrupt context and can preempt DPCs.)
 3810  * ISRs are given the highest importance so that they'll take
 3811  * precedence over timers and other things.
 3812  */
 3813 
 3814 static void
 3815 ntoskrnl_dpc_thread(arg)
 3816         void                    *arg;
 3817 {
 3818         kdpc_queue              *kq;
 3819         kdpc                    *d;
 3820         list_entry              *l;
 3821         uint8_t                 irql;
 3822 
 3823         kq = arg;
 3824 
 3825         InitializeListHead(&kq->kq_disp);
 3826         kq->kq_td = curthread;
 3827         kq->kq_exit = 0;
 3828         kq->kq_running = FALSE;
 3829         KeInitializeSpinLock(&kq->kq_lock);
 3830         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 3831         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
 3832 
 3833         /*
 3834          * Elevate our priority. DPCs are used to run interrupt
 3835          * handlers, and they should trigger as soon as possible
 3836          * once scheduled by an ISR.
 3837          */
 3838 
 3839         thread_lock(curthread);
 3840 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3841         sched_bind(curthread, kq->kq_cpu);
 3842 #endif
 3843         sched_prio(curthread, PRI_MIN_KERN);
 3844         thread_unlock(curthread);
 3845 
 3846         while (1) {
 3847                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 3848 
 3849                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 3850 
 3851                 if (kq->kq_exit) {
 3852                         kq->kq_exit = 0;
 3853                         KeReleaseSpinLock(&kq->kq_lock, irql);
 3854                         break;
 3855                 }
 3856 
 3857                 kq->kq_running = TRUE;
 3858 
 3859                 while (!IsListEmpty(&kq->kq_disp)) {
 3860                         l = RemoveHeadList((&kq->kq_disp));
 3861                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3862                         InitializeListHead((&d->k_dpclistentry));
 3863                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 3864                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
 3865                             d->k_sysarg1, d->k_sysarg2);
 3866                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3867                 }
 3868 
 3869                 kq->kq_running = FALSE;
 3870 
 3871                 KeReleaseSpinLock(&kq->kq_lock, irql);
 3872 
 3873                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
 3874         }
 3875 
 3876         kproc_exit(0);
 3877         return; /* notreached */
 3878 }
 3879 
 3880 static void
 3881 ntoskrnl_destroy_dpc_threads(void)
 3882 {
 3883         kdpc_queue              *kq;
 3884         kdpc                    dpc;
 3885         int                     i;
 3886 
 3887         kq = kq_queues;
 3888 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3889         for (i = 0; i < mp_ncpus; i++) {
 3890 #else
 3891         for (i = 0; i < 1; i++) {
 3892 #endif
 3893                 kq += i;
 3894 
 3895                 kq->kq_exit = 1;
 3896                 KeInitializeDpc(&dpc, NULL, NULL);
 3897                 KeSetTargetProcessorDpc(&dpc, i);
 3898                 KeInsertQueueDpc(&dpc, NULL, NULL);
 3899                 while (kq->kq_exit)
 3900                         tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", hz/10);
 3901         }
 3902 }
 3903 
 3904 static uint8_t
 3905 ntoskrnl_insert_dpc(head, dpc)
 3906         list_entry              *head;
 3907         kdpc                    *dpc;
 3908 {
 3909         list_entry              *l;
 3910         kdpc                    *d;
 3911 
 3912         l = head->nle_flink;
 3913         while (l != head) {
 3914                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3915                 if (d == dpc)
 3916                         return (FALSE);
 3917                 l = l->nle_flink;
 3918         }
 3919 
 3920         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
 3921                 InsertTailList((head), (&dpc->k_dpclistentry));
 3922         else
 3923                 InsertHeadList((head), (&dpc->k_dpclistentry));
 3924 
 3925         return (TRUE);
 3926 }
 3927 
 3928 void
 3929 KeInitializeDpc(dpc, dpcfunc, dpcctx)
 3930         kdpc                    *dpc;
 3931         void                    *dpcfunc;
 3932         void                    *dpcctx;
 3933 {
 3934 
 3935         if (dpc == NULL)
 3936                 return;
 3937 
 3938         dpc->k_deferedfunc = dpcfunc;
 3939         dpc->k_deferredctx = dpcctx;
 3940         dpc->k_num = KDPC_CPU_DEFAULT;
 3941         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
 3942         InitializeListHead((&dpc->k_dpclistentry));
 3943 }
 3944 
 3945 uint8_t
 3946 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
 3947         kdpc                    *dpc;
 3948         void                    *sysarg1;
 3949         void                    *sysarg2;
 3950 {
 3951         kdpc_queue              *kq;
 3952         uint8_t                 r;
 3953         uint8_t                 irql;
 3954 
 3955         if (dpc == NULL)
 3956                 return (FALSE);
 3957 
 3958         kq = kq_queues;
 3959 
 3960 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3961         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 3962 
 3963         /*
 3964          * By default, the DPC is queued to run on the same CPU
 3965          * that scheduled it.
 3966          */
 3967 
 3968         if (dpc->k_num == KDPC_CPU_DEFAULT)
 3969                 kq += curthread->td_oncpu;
 3970         else
 3971                 kq += dpc->k_num;
 3972         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3973 #else
 3974         KeAcquireSpinLock(&kq->kq_lock, &irql);
 3975 #endif
 3976 
 3977         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
 3978         if (r == TRUE) {
 3979                 dpc->k_sysarg1 = sysarg1;
 3980                 dpc->k_sysarg2 = sysarg2;
 3981         }
 3982         KeReleaseSpinLock(&kq->kq_lock, irql);
 3983 
 3984         if (r == FALSE)
 3985                 return (r);
 3986 
 3987         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 3988 
 3989         return (r);
 3990 }
 3991 
 3992 uint8_t
 3993 KeRemoveQueueDpc(dpc)
 3994         kdpc                    *dpc;
 3995 {
 3996         kdpc_queue              *kq;
 3997         uint8_t                 irql;
 3998 
 3999         if (dpc == NULL)
 4000                 return (FALSE);
 4001 
 4002 #ifdef NTOSKRNL_MULTIPLE_DPCS
 4003         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 4004 
 4005         kq = kq_queues + dpc->k_num;
 4006 
 4007         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 4008 #else
 4009         kq = kq_queues;
 4010         KeAcquireSpinLock(&kq->kq_lock, &irql);
 4011 #endif
 4012 
 4013         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
 4014                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 4015                 KeLowerIrql(irql);
 4016                 return (FALSE);
 4017         }
 4018 
 4019         RemoveEntryList((&dpc->k_dpclistentry));
 4020         InitializeListHead((&dpc->k_dpclistentry));
 4021 
 4022         KeReleaseSpinLock(&kq->kq_lock, irql);
 4023 
 4024         return (TRUE);
 4025 }
 4026 
 4027 void
 4028 KeSetImportanceDpc(dpc, imp)
 4029         kdpc                    *dpc;
 4030         uint32_t                imp;
 4031 {
 4032         if (imp != KDPC_IMPORTANCE_LOW &&
 4033             imp != KDPC_IMPORTANCE_MEDIUM &&
 4034             imp != KDPC_IMPORTANCE_HIGH)
 4035                 return;
 4036 
 4037         dpc->k_importance = (uint8_t)imp;
 4038 }
 4039 
 4040 void
 4041 KeSetTargetProcessorDpc(kdpc *dpc, uint8_t cpu)
 4042 {
 4043         if (cpu > mp_ncpus)
 4044                 return;
 4045 
 4046         dpc->k_num = cpu;
 4047 }
 4048 
 4049 void
 4050 KeFlushQueuedDpcs(void)
 4051 {
 4052         kdpc_queue              *kq;
 4053         int                     i;
 4054 
 4055         /*
 4056          * Poke each DPC queue and wait
 4057          * for them to drain.
 4058          */
 4059 
 4060 #ifdef NTOSKRNL_MULTIPLE_DPCS
 4061         for (i = 0; i < mp_ncpus; i++) {
 4062 #else
 4063         for (i = 0; i < 1; i++) {
 4064 #endif
 4065                 kq = kq_queues + i;
 4066                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 4067                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
 4068         }
 4069 }
 4070 
 4071 uint32_t
 4072 KeGetCurrentProcessorNumber(void)
 4073 {
 4074         return ((uint32_t)curthread->td_oncpu);
 4075 }
 4076 
 4077 uint8_t
 4078 KeSetTimerEx(timer, duetime, period, dpc)
 4079         ktimer                  *timer;
 4080         int64_t                 duetime;
 4081         uint32_t                period;
 4082         kdpc                    *dpc;
 4083 {
 4084         struct timeval          tv;
 4085         uint64_t                curtime;
 4086         uint8_t                 pending;
 4087 
 4088         if (timer == NULL)
 4089                 return (FALSE);
 4090 
 4091         mtx_lock(&ntoskrnl_dispatchlock);
 4092 
 4093         if (timer->k_header.dh_inserted == TRUE) {
 4094                 ntoskrnl_remove_timer(timer);
 4095 #ifdef NTOSKRNL_DEBUG_TIMERS
 4096                 ntoskrnl_timer_cancels++;
 4097 #endif
 4098                 timer->k_header.dh_inserted = FALSE;
 4099                 pending = TRUE;
 4100         } else
 4101                 pending = FALSE;
 4102 
 4103         timer->k_duetime = duetime;
 4104         timer->k_period = period;
 4105         timer->k_header.dh_sigstate = FALSE;
 4106         timer->k_dpc = dpc;
 4107 
 4108         if (duetime < 0) {
 4109                 tv.tv_sec = - (duetime) / 10000000;
 4110                 tv.tv_usec = (- (duetime) / 10) -
 4111                     (tv.tv_sec * 1000000);
 4112         } else {
 4113                 ntoskrnl_time(&curtime);
 4114                 if (duetime < curtime)
 4115                         tv.tv_sec = tv.tv_usec = 0;
 4116                 else {
 4117                         tv.tv_sec = ((duetime) - curtime) / 10000000;
 4118                         tv.tv_usec = ((duetime) - curtime) / 10 -
 4119                             (tv.tv_sec * 1000000);
 4120                 }
 4121         }
 4122 
 4123         timer->k_header.dh_inserted = TRUE;
 4124         ntoskrnl_insert_timer(timer, tvtohz(&tv));
 4125 #ifdef NTOSKRNL_DEBUG_TIMERS
 4126         ntoskrnl_timer_sets++;
 4127 #endif
 4128 
 4129         mtx_unlock(&ntoskrnl_dispatchlock);
 4130 
 4131         return (pending);
 4132 }
 4133 
 4134 uint8_t
 4135 KeSetTimer(timer, duetime, dpc)
 4136         ktimer                  *timer;
 4137         int64_t                 duetime;
 4138         kdpc                    *dpc;
 4139 {
 4140         return (KeSetTimerEx(timer, duetime, 0, dpc));
 4141 }
 4142 
 4143 /*
 4144  * The Windows DDK documentation seems to say that cancelling
 4145  * a timer that has a DPC will result in the DPC also being
 4146  * cancelled, but this isn't really the case.
 4147  */
 4148 
 4149 uint8_t
 4150 KeCancelTimer(timer)
 4151         ktimer                  *timer;
 4152 {
 4153         uint8_t                 pending;
 4154 
 4155         if (timer == NULL)
 4156                 return (FALSE);
 4157 
 4158         mtx_lock(&ntoskrnl_dispatchlock);
 4159 
 4160         pending = timer->k_header.dh_inserted;
 4161 
 4162         if (timer->k_header.dh_inserted == TRUE) {
 4163                 timer->k_header.dh_inserted = FALSE;
 4164                 ntoskrnl_remove_timer(timer);
 4165 #ifdef NTOSKRNL_DEBUG_TIMERS
 4166                 ntoskrnl_timer_cancels++;
 4167 #endif
 4168         }
 4169 
 4170         mtx_unlock(&ntoskrnl_dispatchlock);
 4171 
 4172         return (pending);
 4173 }
 4174 
 4175 uint8_t
 4176 KeReadStateTimer(timer)
 4177         ktimer                  *timer;
 4178 {
 4179         return (timer->k_header.dh_sigstate);
 4180 }
 4181 
 4182 static int32_t
 4183 KeDelayExecutionThread(uint8_t wait_mode, uint8_t alertable, int64_t *interval)
 4184 {
 4185         ktimer                  timer;
 4186 
 4187         if (wait_mode != 0)
 4188                 panic("invalid wait_mode %d", wait_mode);
 4189 
 4190         KeInitializeTimer(&timer);
 4191         KeSetTimer(&timer, *interval, NULL);
 4192         KeWaitForSingleObject(&timer, 0, 0, alertable, NULL);
 4193 
 4194         return STATUS_SUCCESS;
 4195 }
 4196 
 4197 static uint64_t
 4198 KeQueryInterruptTime(void)
 4199 {
 4200         int ticks;
 4201         struct timeval tv;
 4202 
 4203         getmicrouptime(&tv);
 4204 
 4205         ticks = tvtohz(&tv);
 4206 
 4207         return ticks * ((10000000 + hz - 1) / hz);
 4208 }
 4209 
 4210 static struct thread *
 4211 KeGetCurrentThread(void)
 4212 {
 4213 
 4214         return curthread;
 4215 }
 4216 
 4217 static int32_t
 4218 KeSetPriorityThread(td, pri)
 4219         struct thread   *td;
 4220         int32_t         pri;
 4221 {
 4222         int32_t old;
 4223 
 4224         if (td == NULL)
 4225                 return LOW_REALTIME_PRIORITY;
 4226 
 4227         if (td->td_priority <= PRI_MIN_KERN)
 4228                 old = HIGH_PRIORITY;
 4229         else if (td->td_priority >= PRI_MAX_KERN)
 4230                 old = LOW_PRIORITY;
 4231         else
 4232                 old = LOW_REALTIME_PRIORITY;
 4233 
 4234         thread_lock(td);
 4235         if (pri == HIGH_PRIORITY)
 4236                 sched_prio(td, PRI_MIN_KERN);
 4237         if (pri == LOW_REALTIME_PRIORITY)
 4238                 sched_prio(td, PRI_MIN_KERN + (PRI_MAX_KERN - PRI_MIN_KERN) / 2);
 4239         if (pri == LOW_PRIORITY)
 4240                 sched_prio(td, PRI_MAX_KERN);
 4241         thread_unlock(td);
 4242 
 4243         return old;
 4244 }
 4245 
 4246 static void
 4247 dummy()
 4248 {
 4249         printf("ntoskrnl dummy called...\n");
 4250 }
 4251 
 4252 
 4253 image_patch_table ntoskrnl_functbl[] = {
 4254         IMPORT_SFUNC(RtlZeroMemory, 2),
 4255         IMPORT_SFUNC(RtlSecureZeroMemory, 2),
 4256         IMPORT_SFUNC(RtlFillMemory, 3),
 4257         IMPORT_SFUNC(RtlMoveMemory, 3),
 4258         IMPORT_SFUNC(RtlCharToInteger, 3),
 4259         IMPORT_SFUNC(RtlCopyMemory, 3),
 4260         IMPORT_SFUNC(RtlCopyString, 2),
 4261         IMPORT_SFUNC(RtlCompareMemory, 3),
 4262         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
 4263         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
 4264         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
 4265         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
 4266         IMPORT_SFUNC(RtlInitAnsiString, 2),
 4267         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
 4268         IMPORT_SFUNC(RtlInitUnicodeString, 2),
 4269         IMPORT_SFUNC(RtlFreeAnsiString, 1),
 4270         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
 4271         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
 4272         IMPORT_CFUNC(sprintf, 0),
 4273         IMPORT_CFUNC(vsprintf, 0),
 4274         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
 4275         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
 4276         IMPORT_CFUNC(DbgPrint, 0),
 4277         IMPORT_SFUNC(DbgBreakPoint, 0),
 4278         IMPORT_SFUNC(KeBugCheckEx, 5),
 4279         IMPORT_CFUNC(strncmp, 0),
 4280         IMPORT_CFUNC(strcmp, 0),
 4281         IMPORT_CFUNC_MAP(stricmp, strcasecmp, 0),
 4282         IMPORT_CFUNC(strncpy, 0),
 4283         IMPORT_CFUNC(strcpy, 0),
 4284         IMPORT_CFUNC(strlen, 0),
 4285         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
 4286         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
 4287         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
 4288         IMPORT_CFUNC_MAP(strncat, ntoskrnl_strncat, 0),
 4289         IMPORT_CFUNC_MAP(strchr, index, 0),
 4290         IMPORT_CFUNC_MAP(strrchr, rindex, 0),
 4291         IMPORT_CFUNC(memcpy, 0),
 4292         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
 4293         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
 4294         IMPORT_CFUNC_MAP(memchr, ntoskrnl_memchr, 0),
 4295         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
 4296         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
 4297         IMPORT_FFUNC(IofCallDriver, 2),
 4298         IMPORT_FFUNC(IofCompleteRequest, 2),
 4299         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
 4300         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
 4301         IMPORT_SFUNC(IoCancelIrp, 1),
 4302         IMPORT_SFUNC(IoConnectInterrupt, 11),
 4303         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
 4304         IMPORT_SFUNC(IoCreateDevice, 7),
 4305         IMPORT_SFUNC(IoDeleteDevice, 1),
 4306         IMPORT_SFUNC(IoGetAttachedDevice, 1),
 4307         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
 4308         IMPORT_SFUNC(IoDetachDevice, 1),
 4309         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
 4310         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
 4311         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
 4312         IMPORT_SFUNC(IoAllocateIrp, 2),
 4313         IMPORT_SFUNC(IoReuseIrp, 2),
 4314         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
 4315         IMPORT_SFUNC(IoFreeIrp, 1),
 4316         IMPORT_SFUNC(IoInitializeIrp, 3),
 4317         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
 4318         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
 4319         IMPORT_SFUNC(KeSynchronizeExecution, 3),
 4320         IMPORT_SFUNC(KeWaitForSingleObject, 5),
 4321         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
 4322         IMPORT_SFUNC(_allmul, 4),
 4323         IMPORT_SFUNC(_alldiv, 4),
 4324         IMPORT_SFUNC(_allrem, 4),
 4325         IMPORT_RFUNC(_allshr, 0),
 4326         IMPORT_RFUNC(_allshl, 0),
 4327         IMPORT_SFUNC(_aullmul, 4),
 4328         IMPORT_SFUNC(_aulldiv, 4),
 4329         IMPORT_SFUNC(_aullrem, 4),
 4330         IMPORT_RFUNC(_aullshr, 0),
 4331         IMPORT_RFUNC(_aullshl, 0),
 4332         IMPORT_CFUNC(atoi, 0),
 4333         IMPORT_CFUNC(atol, 0),
 4334         IMPORT_CFUNC(rand, 0),
 4335         IMPORT_CFUNC(srand, 0),
 4336         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
 4337         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
 4338         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
 4339         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
 4340         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
 4341         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
 4342         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
 4343         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
 4344         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
 4345         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
 4346         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
 4347         IMPORT_FFUNC(InitializeSListHead, 1),
 4348         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
 4349         IMPORT_SFUNC(ExQueryDepthSList, 1),
 4350         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
 4351                 InterlockedPopEntrySList, 1),
 4352         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
 4353                 InterlockedPushEntrySList, 2),
 4354         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
 4355         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
 4356         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
 4357         IMPORT_SFUNC(ExFreePoolWithTag, 2),
 4358         IMPORT_SFUNC(ExFreePool, 1),
 4359 #ifdef __i386__
 4360         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
 4361         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
 4362         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
 4363 #else
 4364         /*
 4365          * For AMD64, we can get away with just mapping
 4366          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
 4367          * because the calling conventions end up being the same.
 4368          * On i386, we have to be careful because KfAcquireSpinLock()
 4369          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
 4370          */
 4371         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
 4372         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
 4373         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
 4374 #endif
 4375         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
 4376         IMPORT_FFUNC(InterlockedIncrement, 1),
 4377         IMPORT_FFUNC(InterlockedDecrement, 1),
 4378         IMPORT_FFUNC(InterlockedExchange, 2),
 4379         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
 4380         IMPORT_SFUNC(IoAllocateMdl, 5),
 4381         IMPORT_SFUNC(IoFreeMdl, 1),
 4382         IMPORT_SFUNC(MmAllocateContiguousMemory, 2 + 1),
 4383         IMPORT_SFUNC(MmAllocateContiguousMemorySpecifyCache, 5 + 3),
 4384         IMPORT_SFUNC(MmFreeContiguousMemory, 1),
 4385         IMPORT_SFUNC(MmFreeContiguousMemorySpecifyCache, 3),
 4386         IMPORT_SFUNC(MmSizeOfMdl, 1),
 4387         IMPORT_SFUNC(MmMapLockedPages, 2),
 4388         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
 4389         IMPORT_SFUNC(MmUnmapLockedPages, 2),
 4390         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
 4391         IMPORT_SFUNC(MmGetPhysicalAddress, 1),
 4392         IMPORT_SFUNC(MmGetSystemRoutineAddress, 1),
 4393         IMPORT_SFUNC(MmIsAddressValid, 1),
 4394         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
 4395         IMPORT_SFUNC(MmUnmapIoSpace, 2),
 4396         IMPORT_SFUNC(KeInitializeSpinLock, 1),
 4397         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
 4398         IMPORT_SFUNC(IoOpenDeviceRegistryKey, 4),
 4399         IMPORT_SFUNC(IoGetDeviceObjectPointer, 4),
 4400         IMPORT_SFUNC(IoGetDeviceProperty, 5),
 4401         IMPORT_SFUNC(IoAllocateWorkItem, 1),
 4402         IMPORT_SFUNC(IoFreeWorkItem, 1),
 4403         IMPORT_SFUNC(IoQueueWorkItem, 4),
 4404         IMPORT_SFUNC(ExQueueWorkItem, 2),
 4405         IMPORT_SFUNC(ntoskrnl_workitem, 2),
 4406         IMPORT_SFUNC(KeInitializeMutex, 2),
 4407         IMPORT_SFUNC(KeReleaseMutex, 2),
 4408         IMPORT_SFUNC(KeReadStateMutex, 1),
 4409         IMPORT_SFUNC(KeInitializeEvent, 3),
 4410         IMPORT_SFUNC(KeSetEvent, 3),
 4411         IMPORT_SFUNC(KeResetEvent, 1),
 4412         IMPORT_SFUNC(KeClearEvent, 1),
 4413         IMPORT_SFUNC(KeReadStateEvent, 1),
 4414         IMPORT_SFUNC(KeInitializeTimer, 1),
 4415         IMPORT_SFUNC(KeInitializeTimerEx, 2),
 4416         IMPORT_SFUNC(KeSetTimer, 3),
 4417         IMPORT_SFUNC(KeSetTimerEx, 4),
 4418         IMPORT_SFUNC(KeCancelTimer, 1),
 4419         IMPORT_SFUNC(KeReadStateTimer, 1),
 4420         IMPORT_SFUNC(KeInitializeDpc, 3),
 4421         IMPORT_SFUNC(KeInsertQueueDpc, 3),
 4422         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
 4423         IMPORT_SFUNC(KeSetImportanceDpc, 2),
 4424         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
 4425         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
 4426         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
 4427         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
 4428         IMPORT_FFUNC(ObfDereferenceObject, 1),
 4429         IMPORT_SFUNC(ZwClose, 1),
 4430         IMPORT_SFUNC(PsCreateSystemThread, 7),
 4431         IMPORT_SFUNC(PsTerminateSystemThread, 1),
 4432         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
 4433         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
 4434         IMPORT_CFUNC(WmiTraceMessage, 0),
 4435         IMPORT_SFUNC(KeQuerySystemTime, 1),
 4436         IMPORT_CFUNC(KeTickCount, 0),
 4437         IMPORT_SFUNC(KeDelayExecutionThread, 3),
 4438         IMPORT_SFUNC(KeQueryInterruptTime, 0),
 4439         IMPORT_SFUNC(KeGetCurrentThread, 0),
 4440         IMPORT_SFUNC(KeSetPriorityThread, 2),
 4441 
 4442         /*
 4443          * This last entry is a catch-all for any function we haven't
 4444          * implemented yet. The PE import list patching routine will
 4445          * use it for any function that doesn't have an explicit match
 4446          * in this table.
 4447          */
 4448 
 4449         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
 4450 
 4451         /* End of list. */
 4452 
 4453         { NULL, NULL, NULL }
 4454 };

Cache object: ac246a8886cc7c58b45c6551923a24cc


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.