The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/ndis/subr_ntoskrnl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003
    3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD: releng/6.0/sys/compat/ndis/subr_ntoskrnl.c 151742 2005-10-27 17:08:57Z wpaul $");
   35 
   36 #include <sys/ctype.h>
   37 #include <sys/unistd.h>
   38 #include <sys/param.h>
   39 #include <sys/types.h>
   40 #include <sys/errno.h>
   41 #include <sys/systm.h>
   42 #include <sys/malloc.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 
   46 #include <sys/callout.h>
   47 #if __FreeBSD_version > 502113
   48 #include <sys/kdb.h>
   49 #endif
   50 #include <sys/kernel.h>
   51 #include <sys/proc.h>
   52 #include <sys/condvar.h>
   53 #include <sys/kthread.h>
   54 #include <sys/module.h>
   55 #include <sys/smp.h>
   56 #include <sys/sched.h>
   57 #include <sys/sysctl.h>
   58 
   59 #include <machine/atomic.h>
   60 #include <machine/clock.h>
   61 #include <machine/bus.h>
   62 #include <machine/stdarg.h>
   63 #include <machine/resource.h>
   64 
   65 #include <sys/bus.h>
   66 #include <sys/rman.h>
   67 
   68 #include <vm/vm.h>
   69 #include <vm/vm_param.h>
   70 #include <vm/pmap.h>
   71 #include <vm/uma.h>
   72 #include <vm/vm_kern.h>
   73 #include <vm/vm_map.h>
   74 
   75 #include <compat/ndis/pe_var.h>
   76 #include <compat/ndis/cfg_var.h>
   77 #include <compat/ndis/resource_var.h>
   78 #include <compat/ndis/ntoskrnl_var.h>
   79 #include <compat/ndis/hal_var.h>
   80 #include <compat/ndis/ndis_var.h>
   81 
   82 #ifdef NTOSKRNL_DEBUG_TIMERS
   83 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
   84 
   85 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
   86         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
   87 #endif
   88 
   89 struct kdpc_queue {
   90         list_entry              kq_disp;
   91         struct thread           *kq_td;
   92         int                     kq_cpu;
   93         int                     kq_exit;
   94         int                     kq_running;
   95         kspin_lock              kq_lock;
   96         nt_kevent               kq_proc;
   97         nt_kevent               kq_done;
   98 };
   99 
  100 typedef struct kdpc_queue kdpc_queue;
  101 
  102 struct wb_ext {
  103         struct cv               we_cv;
  104         struct thread           *we_td;
  105 };
  106 
  107 typedef struct wb_ext wb_ext;
  108 
  109 #define NTOSKRNL_TIMEOUTS       256
  110 #ifdef NTOSKRNL_DEBUG_TIMERS
  111 static uint64_t ntoskrnl_timer_fires;
  112 static uint64_t ntoskrnl_timer_sets;
  113 static uint64_t ntoskrnl_timer_reloads;
  114 static uint64_t ntoskrnl_timer_cancels;
  115 #endif
  116 
  117 struct callout_entry {
  118         struct callout          ce_callout;
  119         list_entry              ce_list;
  120 };
  121 
  122 typedef struct callout_entry callout_entry;
  123 
  124 static struct list_entry ntoskrnl_calllist;
  125 static struct mtx ntoskrnl_calllock;
  126 
  127 static struct list_entry ntoskrnl_intlist;
  128 static kspin_lock ntoskrnl_intlock;
  129 
  130 static uint8_t RtlEqualUnicodeString(unicode_string *,
  131         unicode_string *, uint8_t);
  132 static void RtlCopyUnicodeString(unicode_string *,
  133         unicode_string *);
  134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
  135          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
  136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
  137         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
  138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
  139         device_object *, void *, uint32_t, void *, uint32_t,
  140         uint8_t, nt_kevent *, io_status_block *);
  141 static irp *IoAllocateIrp(uint8_t, uint8_t);
  142 static void IoReuseIrp(irp *, uint32_t);
  143 static void IoFreeIrp(irp *);
  144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
  145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
  146 static uint32_t KeWaitForMultipleObjects(uint32_t,
  147         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
  148         int64_t *, wait_block *);
  149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
  150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
  151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
  152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
  153 static void ntoskrnl_insert_timer(ktimer *, int);
  154 static void ntoskrnl_remove_timer(ktimer *);
  155 #ifdef NTOSKRNL_DEBUG_TIMERS
  156 static void ntoskrnl_show_timers(void);
  157 #endif
  158 static void ntoskrnl_timercall(void *);
  159 static void ntoskrnl_dpc_thread(void *);
  160 static void ntoskrnl_destroy_dpc_threads(void);
  161 static void ntoskrnl_destroy_workitem_threads(void);
  162 static void ntoskrnl_workitem_thread(void *);
  163 static void ntoskrnl_workitem(device_object *, void *);
  164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
  165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
  166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
  167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
  168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
  169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
  170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
  171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
  172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
  173 static int64_t _allmul(int64_t, int64_t);
  174 static int64_t _alldiv(int64_t, int64_t);
  175 static int64_t _allrem(int64_t, int64_t);
  176 static int64_t _allshr(int64_t, uint8_t);
  177 static int64_t _allshl(int64_t, uint8_t);
  178 static uint64_t _aullmul(uint64_t, uint64_t);
  179 static uint64_t _aulldiv(uint64_t, uint64_t);
  180 static uint64_t _aullrem(uint64_t, uint64_t);
  181 static uint64_t _aullshr(uint64_t, uint8_t);
  182 static uint64_t _aullshl(uint64_t, uint8_t);
  183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
  184 static slist_entry *ntoskrnl_popsl(slist_header *);
  185 static void ExInitializePagedLookasideList(paged_lookaside_list *,
  186         lookaside_alloc_func *, lookaside_free_func *,
  187         uint32_t, size_t, uint32_t, uint16_t);
  188 static void ExDeletePagedLookasideList(paged_lookaside_list *);
  189 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
  190         lookaside_alloc_func *, lookaside_free_func *,
  191         uint32_t, size_t, uint32_t, uint16_t);
  192 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
  193 static slist_entry
  194         *ExInterlockedPushEntrySList(slist_header *,
  195         slist_entry *, kspin_lock *);
  196 static slist_entry
  197         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
  198 static uint32_t InterlockedIncrement(volatile uint32_t *);
  199 static uint32_t InterlockedDecrement(volatile uint32_t *);
  200 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
  201 static uint32_t MmSizeOfMdl(void *, size_t);
  202 static void *MmMapLockedPages(mdl *, uint8_t);
  203 static void *MmMapLockedPagesSpecifyCache(mdl *,
  204         uint8_t, uint32_t, void *, uint32_t, uint32_t);
  205 static void MmUnmapLockedPages(void *, mdl *);
  206 static uint8_t MmIsAddressValid(void *);
  207 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
  208 static size_t RtlCompareMemory(const void *, const void *, size_t);
  209 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
  210         uint32_t, uint32_t *);
  211 static int atoi (const char *);
  212 static long atol (const char *);
  213 static int rand(void);
  214 static void srand(unsigned int);
  215 static void ntoskrnl_time(uint64_t *);
  216 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
  217 static void ntoskrnl_thrfunc(void *);
  218 static ndis_status PsCreateSystemThread(ndis_handle *,
  219         uint32_t, void *, ndis_handle, void *, void *, void *);
  220 static ndis_status PsTerminateSystemThread(ndis_status);
  221 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
  222         uint32_t, void *, uint32_t *);
  223 static void KeInitializeMutex(kmutant *, uint32_t);
  224 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
  225 static uint32_t KeReadStateMutex(kmutant *);
  226 static ndis_status ObReferenceObjectByHandle(ndis_handle,
  227         uint32_t, void *, uint8_t, void **, void **);
  228 static void ObfDereferenceObject(void *);
  229 static uint32_t ZwClose(ndis_handle);
  230 static void *ntoskrnl_memset(void *, int, size_t);
  231 static char *ntoskrnl_strstr(char *, char *);
  232 static int ntoskrnl_toupper(int);
  233 static int ntoskrnl_tolower(int);
  234 static funcptr ntoskrnl_findwrap(funcptr);
  235 static uint32_t DbgPrint(char *, ...);
  236 static void DbgBreakPoint(void);
  237 static void dummy(void);
  238 
  239 static struct mtx ntoskrnl_dispatchlock;
  240 static struct mtx ntoskrnl_interlock;
  241 static kspin_lock ntoskrnl_cancellock;
  242 static int ntoskrnl_kth = 0;
  243 static struct nt_objref_head ntoskrnl_reflist;
  244 static uma_zone_t mdl_zone;
  245 static uma_zone_t iw_zone;
  246 static struct kdpc_queue *kq_queues;
  247 static struct kdpc_queue *wq_queues;
  248 static int wq_idx = 0;
  249 
  250 int
  251 ntoskrnl_libinit()
  252 {
  253         image_patch_table       *patch;
  254         int                     error;
  255         struct proc             *p;
  256         kdpc_queue              *kq;
  257         callout_entry           *e;
  258         int                     i;
  259         char                    name[64];
  260 
  261         mtx_init(&ntoskrnl_dispatchlock,
  262             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
  263         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  264         KeInitializeSpinLock(&ntoskrnl_cancellock);
  265         KeInitializeSpinLock(&ntoskrnl_intlock);
  266         TAILQ_INIT(&ntoskrnl_reflist);
  267 
  268         InitializeListHead(&ntoskrnl_calllist);
  269         InitializeListHead(&ntoskrnl_intlist);
  270         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  271 
  272         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
  273 #ifdef NTOSKRNL_MULTIPLE_DPCS
  274             sizeof(kdpc_queue) * mp_ncpus, 0);
  275 #else
  276             sizeof(kdpc_queue), 0);
  277 #endif
  278 
  279         if (kq_queues == NULL)
  280                 return(ENOMEM);
  281 
  282         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
  283             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
  284 
  285         if (wq_queues == NULL)
  286                 return(ENOMEM);
  287 
  288         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
  289         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
  290 
  291         /*
  292          * Launch the DPC threads.
  293          */
  294 
  295 #ifdef NTOSKRNL_MULTIPLE_DPCS
  296         for (i = 0; i < mp_ncpus; i++) {
  297 #else
  298         for (i = 0; i < 1; i++) {
  299 #endif
  300                 kq = kq_queues + i;
  301                 kq->kq_cpu = i;
  302                 sprintf(name, "Windows DPC %d", i);
  303                 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
  304                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
  305                 if (error)
  306                         panic("failed to launch DPC thread");
  307         }
  308 
  309         /*
  310          * Launch the workitem threads.
  311          */
  312 
  313         for (i = 0; i < WORKITEM_THREADS; i++) {
  314                 kq = wq_queues + i;
  315                 sprintf(name, "Windows Workitem %d", i);
  316                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
  317                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
  318                 if (error)
  319                         panic("failed to launch workitem thread");
  320         }
  321 
  322         patch = ntoskrnl_functbl;
  323         while (patch->ipt_func != NULL) {
  324                 windrv_wrap((funcptr)patch->ipt_func,
  325                     (funcptr *)&patch->ipt_wrap,
  326                     patch->ipt_argcnt, patch->ipt_ftype);
  327                 patch++;
  328         }
  329 
  330         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
  331                 e = ExAllocatePoolWithTag(NonPagedPool,
  332                     sizeof(callout_entry), 0);
  333                 if (e == NULL)
  334                         panic("failed to allocate timeouts");
  335                 mtx_lock_spin(&ntoskrnl_calllock);
  336                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
  337                 mtx_unlock_spin(&ntoskrnl_calllock);
  338         }
  339 
  340         /*
  341          * MDLs are supposed to be variable size (they describe
  342          * buffers containing some number of pages, but we don't
  343          * know ahead of time how many pages that will be). But
  344          * always allocating them off the heap is very slow. As
  345          * a compromise, we create an MDL UMA zone big enough to
  346          * handle any buffer requiring up to 16 pages, and we
  347          * use those for any MDLs for buffers of 16 pages or less
  348          * in size. For buffers larger than that (which we assume
  349          * will be few and far between, we allocate the MDLs off
  350          * the heap.
  351          */
  352 
  353         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
  354             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  355 
  356         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
  357             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  358 
  359         return(0);
  360 }
  361 
  362 int
  363 ntoskrnl_libfini()
  364 {
  365         image_patch_table       *patch;
  366         callout_entry           *e;
  367         list_entry              *l;
  368 
  369         patch = ntoskrnl_functbl;
  370         while (patch->ipt_func != NULL) {
  371                 windrv_unwrap(patch->ipt_wrap);
  372                 patch++;
  373         }
  374 
  375         /* Stop the workitem queues. */
  376         ntoskrnl_destroy_workitem_threads();
  377         /* Stop the DPC queues. */
  378         ntoskrnl_destroy_dpc_threads();
  379 
  380         ExFreePool(kq_queues);
  381         ExFreePool(wq_queues);
  382 
  383         uma_zdestroy(mdl_zone);
  384         uma_zdestroy(iw_zone);
  385 
  386         mtx_lock_spin(&ntoskrnl_calllock);
  387         while(!IsListEmpty(&ntoskrnl_calllist)) {
  388                 l = RemoveHeadList(&ntoskrnl_calllist);
  389                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
  390                 mtx_unlock_spin(&ntoskrnl_calllock);
  391                 ExFreePool(e);
  392                 mtx_lock_spin(&ntoskrnl_calllock);
  393         }
  394         mtx_unlock_spin(&ntoskrnl_calllock);
  395 
  396         mtx_destroy(&ntoskrnl_dispatchlock);
  397         mtx_destroy(&ntoskrnl_interlock);
  398         mtx_destroy(&ntoskrnl_calllock);
  399 
  400         return(0);
  401 }
  402 
  403 /*
  404  * We need to be able to reference this externally from the wrapper;
  405  * GCC only generates a local implementation of memset.
  406  */
  407 static void *
  408 ntoskrnl_memset(buf, ch, size)
  409         void                    *buf;
  410         int                     ch;
  411         size_t                  size;
  412 {
  413         return(memset(buf, ch, size));
  414 }
  415 
  416 static char *
  417 ntoskrnl_strstr(s, find)
  418         char *s, *find;
  419 {
  420         char c, sc;
  421         size_t len;
  422 
  423         if ((c = *find++) != 0) {
  424                 len = strlen(find);
  425                 do {
  426                         do {
  427                                 if ((sc = *s++) == 0)
  428                                         return (NULL);
  429                         } while (sc != c);
  430                 } while (strncmp(s, find, len) != 0);
  431                 s--;
  432         }
  433         return ((char *)s);
  434 }
  435 
  436 static int
  437 ntoskrnl_toupper(c)
  438         int                     c;
  439 {
  440         return(toupper(c));
  441 }
  442 
  443 static int
  444 ntoskrnl_tolower(c)
  445         int                     c;
  446 {
  447         return(tolower(c));
  448 }
  449 
  450 static uint8_t 
  451 RtlEqualUnicodeString(str1, str2, caseinsensitive)
  452         unicode_string          *str1;
  453         unicode_string          *str2;
  454         uint8_t                 caseinsensitive;
  455 {
  456         int                     i;
  457 
  458         if (str1->us_len != str2->us_len)
  459                 return(FALSE);
  460 
  461         for (i = 0; i < str1->us_len; i++) {
  462                 if (caseinsensitive == TRUE) {
  463                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
  464                             toupper((char)(str2->us_buf[i] & 0xFF)))
  465                                 return(FALSE);
  466                 } else {
  467                         if (str1->us_buf[i] != str2->us_buf[i])
  468                                 return(FALSE);
  469                 }
  470         }
  471 
  472         return(TRUE);
  473 }
  474 
  475 static void
  476 RtlCopyUnicodeString(dest, src)
  477         unicode_string          *dest;
  478         unicode_string          *src;
  479 {
  480 
  481         if (dest->us_maxlen >= src->us_len)
  482                 dest->us_len = src->us_len;
  483         else
  484                 dest->us_len = dest->us_maxlen;
  485         memcpy(dest->us_buf, src->us_buf, dest->us_len);
  486         return;
  487 }
  488 
  489 static void
  490 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
  491         char                    *ascii;
  492         uint16_t                *unicode;
  493         int                     len;
  494 {
  495         int                     i;
  496         uint16_t                *ustr;
  497 
  498         ustr = unicode;
  499         for (i = 0; i < len; i++) {
  500                 *ustr = (uint16_t)ascii[i];
  501                 ustr++;
  502         }
  503 
  504         return;
  505 }
  506 
  507 static void
  508 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
  509         uint16_t                *unicode;
  510         char                    *ascii;
  511         int                     len;
  512 {
  513         int                     i;
  514         uint8_t                 *astr;
  515 
  516         astr = ascii;
  517         for (i = 0; i < len / 2; i++) {
  518                 *astr = (uint8_t)unicode[i];
  519                 astr++;
  520         }
  521 
  522         return;
  523 }
  524 
  525 uint32_t
  526 RtlUnicodeStringToAnsiString(dest, src, allocate)
  527         ansi_string             *dest;
  528         unicode_string          *src;
  529         uint8_t                 allocate;
  530 {
  531         if (dest == NULL || src == NULL)
  532                 return(NDIS_STATUS_FAILURE);
  533 
  534 
  535         dest->as_len = src->us_len / 2;
  536         if (dest->as_maxlen < dest->as_len)
  537                 dest->as_len = dest->as_maxlen;
  538 
  539         if (allocate == TRUE) {
  540                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
  541                     (src->us_len / 2) + 1, 0);
  542                 if (dest->as_buf == NULL)
  543                         return(STATUS_INSUFFICIENT_RESOURCES);
  544                 dest->as_len = dest->as_maxlen = src->us_len / 2;
  545         } else {
  546                 dest->as_len = src->us_len / 2; /* XXX */
  547                 if (dest->as_maxlen < dest->as_len)
  548                         dest->as_len = dest->as_maxlen;
  549         }
  550 
  551         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
  552             dest->as_len * 2);
  553 
  554         return (STATUS_SUCCESS);
  555 }
  556 
  557 uint32_t
  558 RtlAnsiStringToUnicodeString(dest, src, allocate)
  559         unicode_string          *dest;
  560         ansi_string             *src;
  561         uint8_t                 allocate;
  562 {
  563         if (dest == NULL || src == NULL)
  564                 return(NDIS_STATUS_FAILURE);
  565 
  566         if (allocate == TRUE) {
  567                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
  568                     src->as_len * 2, 0);
  569                 if (dest->us_buf == NULL)
  570                         return(STATUS_INSUFFICIENT_RESOURCES);
  571                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
  572         } else {
  573                 dest->us_len = src->as_len * 2; /* XXX */
  574                 if (dest->us_maxlen < dest->us_len)
  575                         dest->us_len = dest->us_maxlen;
  576         }
  577 
  578         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
  579             dest->us_len / 2);
  580 
  581         return (STATUS_SUCCESS);
  582 }
  583 
  584 void *
  585 ExAllocatePoolWithTag(pooltype, len, tag)
  586         uint32_t                pooltype;
  587         size_t                  len;
  588         uint32_t                tag;
  589 {
  590         void                    *buf;
  591 
  592         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
  593         if (buf == NULL)
  594                 return(NULL);
  595 
  596         return(buf);
  597 }
  598 
  599 void
  600 ExFreePool(buf)
  601         void                    *buf;
  602 {
  603         free(buf, M_DEVBUF);
  604         return;
  605 }
  606 
  607 uint32_t
  608 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
  609         driver_object           *drv;
  610         void                    *clid;
  611         uint32_t                extlen;
  612         void                    **ext;
  613 {
  614         custom_extension        *ce;
  615 
  616         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
  617             + extlen, 0);
  618 
  619         if (ce == NULL)
  620                 return(STATUS_INSUFFICIENT_RESOURCES);
  621 
  622         ce->ce_clid = clid;
  623         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
  624 
  625         *ext = (void *)(ce + 1);
  626 
  627         return(STATUS_SUCCESS);
  628 }
  629 
  630 void *
  631 IoGetDriverObjectExtension(drv, clid)
  632         driver_object           *drv;
  633         void                    *clid;
  634 {
  635         list_entry              *e;
  636         custom_extension        *ce;
  637 
  638         /*
  639          * Sanity check. Our dummy bus drivers don't have
  640          * any driver extentions.
  641          */
  642 
  643         if (drv->dro_driverext == NULL)
  644                 return(NULL);
  645 
  646         e = drv->dro_driverext->dre_usrext.nle_flink;
  647         while (e != &drv->dro_driverext->dre_usrext) {
  648                 ce = (custom_extension *)e;
  649                 if (ce->ce_clid == clid)
  650                         return((void *)(ce + 1));
  651                 e = e->nle_flink;
  652         }
  653 
  654         return(NULL);
  655 }
  656 
  657 
  658 uint32_t
  659 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
  660         driver_object           *drv;
  661         uint32_t                devextlen;
  662         unicode_string          *devname;
  663         uint32_t                devtype;
  664         uint32_t                devchars;
  665         uint8_t                 exclusive;
  666         device_object           **newdev;
  667 {
  668         device_object           *dev;
  669 
  670         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
  671         if (dev == NULL)
  672                 return(STATUS_INSUFFICIENT_RESOURCES);
  673 
  674         dev->do_type = devtype;
  675         dev->do_drvobj = drv;
  676         dev->do_currirp = NULL;
  677         dev->do_flags = 0;
  678 
  679         if (devextlen) {
  680                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
  681                     devextlen, 0);
  682 
  683                 if (dev->do_devext == NULL) {
  684                         ExFreePool(dev);
  685                         return(STATUS_INSUFFICIENT_RESOURCES);
  686                 }
  687 
  688                 bzero(dev->do_devext, devextlen);
  689         } else
  690                 dev->do_devext = NULL;
  691 
  692         dev->do_size = sizeof(device_object) + devextlen;
  693         dev->do_refcnt = 1;
  694         dev->do_attacheddev = NULL;
  695         dev->do_nextdev = NULL;
  696         dev->do_devtype = devtype;
  697         dev->do_stacksize = 1;
  698         dev->do_alignreq = 1;
  699         dev->do_characteristics = devchars;
  700         dev->do_iotimer = NULL;
  701         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
  702 
  703         /*
  704          * Vpd is used for disk/tape devices,
  705          * but we don't support those. (Yet.)
  706          */
  707         dev->do_vpb = NULL;
  708 
  709         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
  710             sizeof(devobj_extension), 0);
  711 
  712         if (dev->do_devobj_ext == NULL) {
  713                 if (dev->do_devext != NULL)
  714                         ExFreePool(dev->do_devext);
  715                 ExFreePool(dev);
  716                 return(STATUS_INSUFFICIENT_RESOURCES);
  717         }
  718 
  719         dev->do_devobj_ext->dve_type = 0;
  720         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
  721         dev->do_devobj_ext->dve_devobj = dev;
  722 
  723         /*
  724          * Attach this device to the driver object's list
  725          * of devices. Note: this is not the same as attaching
  726          * the device to the device stack. The driver's AddDevice
  727          * routine must explicitly call IoAddDeviceToDeviceStack()
  728          * to do that.
  729          */
  730 
  731         if (drv->dro_devobj == NULL) {
  732                 drv->dro_devobj = dev;
  733                 dev->do_nextdev = NULL;
  734         } else {
  735                 dev->do_nextdev = drv->dro_devobj;
  736                 drv->dro_devobj = dev;
  737         }
  738 
  739         *newdev = dev;
  740 
  741         return(STATUS_SUCCESS);
  742 }
  743 
  744 void
  745 IoDeleteDevice(dev)
  746         device_object           *dev;
  747 {
  748         device_object           *prev;
  749 
  750         if (dev == NULL)
  751                 return;
  752 
  753         if (dev->do_devobj_ext != NULL)
  754                 ExFreePool(dev->do_devobj_ext);
  755 
  756         if (dev->do_devext != NULL)
  757                 ExFreePool(dev->do_devext);
  758 
  759         /* Unlink the device from the driver's device list. */
  760 
  761         prev = dev->do_drvobj->dro_devobj;
  762         if (prev == dev)
  763                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
  764         else {
  765                 while (prev->do_nextdev != dev)
  766                         prev = prev->do_nextdev;
  767                 prev->do_nextdev = dev->do_nextdev;
  768         }
  769 
  770         ExFreePool(dev);
  771 
  772         return;
  773 }
  774 
  775 device_object *
  776 IoGetAttachedDevice(dev)
  777         device_object           *dev;
  778 {
  779         device_object           *d;
  780 
  781         if (dev == NULL)
  782                 return (NULL);
  783 
  784         d = dev;
  785 
  786         while (d->do_attacheddev != NULL)
  787                 d = d->do_attacheddev;
  788 
  789         return (d);
  790 }
  791 
  792 static irp *
  793 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
  794         uint32_t                func;
  795         device_object           *dobj;
  796         void                    *buf;
  797         uint32_t                len;
  798         uint64_t                *off;
  799         nt_kevent               *event;
  800         io_status_block         *status;
  801 {
  802         irp                     *ip;
  803 
  804         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
  805         if (ip == NULL)
  806                 return(NULL);
  807         ip->irp_usrevent = event;
  808 
  809         return(ip);
  810 }
  811 
  812 static irp *
  813 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
  814         uint32_t                func;
  815         device_object           *dobj;
  816         void                    *buf;
  817         uint32_t                len;
  818         uint64_t                *off;
  819         io_status_block         *status;
  820 {
  821         irp                     *ip;
  822         io_stack_location       *sl;
  823 
  824         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  825         if (ip == NULL)
  826                 return(NULL);
  827 
  828         ip->irp_usriostat = status;
  829         ip->irp_tail.irp_overlay.irp_thread = NULL;
  830 
  831         sl = IoGetNextIrpStackLocation(ip);
  832         sl->isl_major = func;
  833         sl->isl_minor = 0;
  834         sl->isl_flags = 0;
  835         sl->isl_ctl = 0;
  836         sl->isl_devobj = dobj;
  837         sl->isl_fileobj = NULL;
  838         sl->isl_completionfunc = NULL;
  839 
  840         ip->irp_userbuf = buf;
  841 
  842         if (dobj->do_flags & DO_BUFFERED_IO) {
  843                 ip->irp_assoc.irp_sysbuf =
  844                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
  845                 if (ip->irp_assoc.irp_sysbuf == NULL) {
  846                         IoFreeIrp(ip);
  847                         return(NULL);
  848                 }
  849                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
  850         }
  851 
  852         if (dobj->do_flags & DO_DIRECT_IO) {
  853                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
  854                 if (ip->irp_mdl == NULL) {
  855                         if (ip->irp_assoc.irp_sysbuf != NULL)
  856                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
  857                         IoFreeIrp(ip);
  858                         return(NULL);
  859                 }
  860                 ip->irp_userbuf = NULL;
  861                 ip->irp_assoc.irp_sysbuf = NULL;
  862         }
  863 
  864         if (func == IRP_MJ_READ) {
  865                 sl->isl_parameters.isl_read.isl_len = len;
  866                 if (off != NULL)
  867                         sl->isl_parameters.isl_read.isl_byteoff = *off;
  868                 else
  869                         sl->isl_parameters.isl_read.isl_byteoff = 0;
  870         }
  871 
  872         if (func == IRP_MJ_WRITE) {
  873                 sl->isl_parameters.isl_write.isl_len = len;
  874                 if (off != NULL)
  875                         sl->isl_parameters.isl_write.isl_byteoff = *off;
  876                 else
  877                         sl->isl_parameters.isl_write.isl_byteoff = 0;
  878         }       
  879 
  880         return(ip);
  881 }
  882 
  883 static irp *
  884 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
  885     isinternal, event, status)
  886         uint32_t                iocode;
  887         device_object           *dobj;
  888         void                    *ibuf;
  889         uint32_t                ilen;
  890         void                    *obuf;
  891         uint32_t                olen;
  892         uint8_t                 isinternal;
  893         nt_kevent               *event;
  894         io_status_block         *status;
  895 {
  896         irp                     *ip;
  897         io_stack_location       *sl;
  898         uint32_t                buflen;
  899 
  900         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  901         if (ip == NULL)
  902                 return(NULL);
  903         ip->irp_usrevent = event;
  904         ip->irp_usriostat = status;
  905         ip->irp_tail.irp_overlay.irp_thread = NULL;
  906 
  907         sl = IoGetNextIrpStackLocation(ip);
  908         sl->isl_major = isinternal == TRUE ?
  909             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
  910         sl->isl_minor = 0;
  911         sl->isl_flags = 0;
  912         sl->isl_ctl = 0;
  913         sl->isl_devobj = dobj;
  914         sl->isl_fileobj = NULL;
  915         sl->isl_completionfunc = NULL;
  916         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
  917         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
  918         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
  919 
  920         switch(IO_METHOD(iocode)) {
  921         case METHOD_BUFFERED:
  922                 if (ilen > olen)
  923                         buflen = ilen;
  924                 else
  925                         buflen = olen;
  926                 if (buflen) {
  927                         ip->irp_assoc.irp_sysbuf =
  928                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
  929                         if (ip->irp_assoc.irp_sysbuf == NULL) {
  930                                 IoFreeIrp(ip);
  931                                 return(NULL);
  932                         }
  933                 }
  934                 if (ilen && ibuf != NULL) {
  935                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
  936                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
  937                             buflen - ilen);
  938                 } else
  939                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
  940                 ip->irp_userbuf = obuf;
  941                 break;
  942         case METHOD_IN_DIRECT:
  943         case METHOD_OUT_DIRECT:
  944                 if (ilen && ibuf != NULL) {
  945                         ip->irp_assoc.irp_sysbuf =
  946                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
  947                         if (ip->irp_assoc.irp_sysbuf == NULL) {
  948                                 IoFreeIrp(ip);
  949                                 return(NULL);
  950                         }
  951                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
  952                 }
  953                 if (olen && obuf != NULL) {
  954                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
  955                             FALSE, FALSE, ip);
  956                         /*
  957                          * Normally we would MmProbeAndLockPages()
  958                          * here, but we don't have to in our
  959                          * imlementation.
  960                          */
  961                 }
  962                 break;
  963         case METHOD_NEITHER:
  964                 ip->irp_userbuf = obuf;
  965                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
  966                 break;
  967         default:
  968                 break;
  969         }
  970 
  971         /*
  972          * Ideally, we should associate this IRP with the calling
  973          * thread here.
  974          */
  975 
  976         return (ip);
  977 }
  978 
  979 static irp *
  980 IoAllocateIrp(stsize, chargequota)
  981         uint8_t                 stsize;
  982         uint8_t                 chargequota;
  983 {
  984         irp                     *i;
  985 
  986         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
  987         if (i == NULL)
  988                 return (NULL);
  989 
  990         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
  991 
  992         return (i);
  993 }
  994 
  995 static irp *
  996 IoMakeAssociatedIrp(ip, stsize)
  997         irp                     *ip;
  998         uint8_t                 stsize;
  999 {
 1000         irp                     *associrp;
 1001 
 1002         associrp = IoAllocateIrp(stsize, FALSE);
 1003         if (associrp == NULL)
 1004                 return(NULL);
 1005 
 1006         mtx_lock(&ntoskrnl_dispatchlock);
 1007         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
 1008         associrp->irp_tail.irp_overlay.irp_thread =
 1009             ip->irp_tail.irp_overlay.irp_thread;
 1010         associrp->irp_assoc.irp_master = ip;
 1011         mtx_unlock(&ntoskrnl_dispatchlock);
 1012 
 1013         return(associrp);
 1014 }
 1015 
 1016 static void
 1017 IoFreeIrp(ip)
 1018         irp                     *ip;
 1019 {
 1020         ExFreePool(ip);
 1021         return;
 1022 }
 1023 
 1024 static void
 1025 IoInitializeIrp(io, psize, ssize)
 1026         irp                     *io;
 1027         uint16_t                psize;
 1028         uint8_t                 ssize;
 1029 {
 1030         bzero((char *)io, IoSizeOfIrp(ssize));
 1031         io->irp_size = psize;
 1032         io->irp_stackcnt = ssize;
 1033         io->irp_currentstackloc = ssize;
 1034         InitializeListHead(&io->irp_thlist);
 1035         io->irp_tail.irp_overlay.irp_csl =
 1036             (io_stack_location *)(io + 1) + ssize;
 1037 
 1038         return;
 1039 }
 1040 
 1041 static void
 1042 IoReuseIrp(ip, status)
 1043         irp                     *ip;
 1044         uint32_t                status;
 1045 {
 1046         uint8_t                 allocflags;
 1047 
 1048         allocflags = ip->irp_allocflags;
 1049         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
 1050         ip->irp_iostat.isb_status = status;
 1051         ip->irp_allocflags = allocflags;
 1052 
 1053         return;
 1054 }
 1055 
 1056 void
 1057 IoAcquireCancelSpinLock(irql)
 1058         uint8_t                 *irql;
 1059 {
 1060         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
 1061         return;
 1062 }
 1063 
 1064 void
 1065 IoReleaseCancelSpinLock(irql)
 1066         uint8_t                 irql;
 1067 {
 1068         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
 1069         return;
 1070 }
 1071 
 1072 uint8_t
 1073 IoCancelIrp(irp *ip)
 1074 {
 1075         cancel_func             cfunc;
 1076 
 1077         IoAcquireCancelSpinLock(&ip->irp_cancelirql);
 1078         cfunc = IoSetCancelRoutine(ip, NULL);
 1079         ip->irp_cancel = TRUE;
 1080         if (ip->irp_cancelfunc == NULL) {
 1081                 IoReleaseCancelSpinLock(ip->irp_cancelirql);
 1082                 return(FALSE);
 1083         }
 1084         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
 1085         return(TRUE);
 1086 }
 1087 
 1088 uint32_t
 1089 IofCallDriver(dobj, ip)
 1090         device_object           *dobj;
 1091         irp                     *ip;
 1092 {
 1093         driver_object           *drvobj;
 1094         io_stack_location       *sl;
 1095         uint32_t                status;
 1096         driver_dispatch         disp;
 1097 
 1098         drvobj = dobj->do_drvobj;
 1099 
 1100         if (ip->irp_currentstackloc <= 0)
 1101                 panic("IoCallDriver(): out of stack locations");
 1102 
 1103         IoSetNextIrpStackLocation(ip);
 1104         sl = IoGetCurrentIrpStackLocation(ip);
 1105 
 1106         sl->isl_devobj = dobj;
 1107 
 1108         disp = drvobj->dro_dispatch[sl->isl_major];
 1109         status = MSCALL2(disp, dobj, ip);
 1110 
 1111         return(status);
 1112 }
 1113 
 1114 void
 1115 IofCompleteRequest(ip, prioboost)
 1116         irp                     *ip;
 1117         uint8_t                 prioboost;
 1118 {
 1119         uint32_t                i;
 1120         uint32_t                status;
 1121         device_object           *dobj;
 1122         io_stack_location       *sl;
 1123         completion_func         cf;
 1124 
 1125         ip->irp_pendingreturned =
 1126             IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
 1127         sl = (io_stack_location *)(ip + 1);
 1128 
 1129         for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
 1130                 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
 1131                         IoSkipCurrentIrpStackLocation(ip);
 1132                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
 1133                 } else
 1134                         dobj = NULL;
 1135 
 1136                 if (sl[i].isl_completionfunc != NULL &&
 1137                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
 1138                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
 1139                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
 1140                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
 1141                     (ip->irp_cancel == TRUE &&
 1142                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
 1143                         cf = sl->isl_completionfunc;
 1144                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
 1145                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
 1146                                 return;
 1147                 }
 1148 
 1149                 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
 1150                     SL_PENDING_RETURNED)
 1151                         ip->irp_pendingreturned = TRUE;
 1152         }
 1153 
 1154         /* Handle any associated IRPs. */
 1155 
 1156         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
 1157                 uint32_t                masterirpcnt;
 1158                 irp                     *masterirp;
 1159                 mdl                     *m;
 1160 
 1161                 masterirp = ip->irp_assoc.irp_master;
 1162                 masterirpcnt =
 1163                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
 1164 
 1165                 while ((m = ip->irp_mdl) != NULL) {
 1166                         ip->irp_mdl = m->mdl_next;
 1167                         IoFreeMdl(m);
 1168                 }
 1169                 IoFreeIrp(ip);
 1170                 if (masterirpcnt == 0)
 1171                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
 1172                 return;
 1173         }
 1174 
 1175         /* With any luck, these conditions will never arise. */
 1176 
 1177         if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
 1178                 if (ip->irp_usriostat != NULL)
 1179                         *ip->irp_usriostat = ip->irp_iostat;
 1180                 if (ip->irp_usrevent != NULL)
 1181                         KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
 1182                 if (ip->irp_flags & IRP_PAGING_IO) {
 1183                         if (ip->irp_mdl != NULL)
 1184                                 IoFreeMdl(ip->irp_mdl);
 1185                         IoFreeIrp(ip);
 1186                 }
 1187         }
 1188 
 1189         return;
 1190 }
 1191 
 1192 void
 1193 ntoskrnl_intr(arg)
 1194         void                    *arg;
 1195 {
 1196         kinterrupt              *iobj;
 1197         uint8_t                 irql;
 1198         list_entry              *l;
 1199 
 1200         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1201         l = ntoskrnl_intlist.nle_flink;
 1202         while (l != &ntoskrnl_intlist) {
 1203                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
 1204                 MSCALL1(iobj->ki_svcfunc, iobj->ki_svcctx);
 1205                 l = l->nle_flink;
 1206         }
 1207         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1208 
 1209         return;
 1210 }
 1211 
 1212 uint8_t
 1213 KeAcquireInterruptSpinLock(iobj)
 1214         kinterrupt              *iobj;
 1215 {
 1216         uint8_t                 irql;
 1217         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1218         return(irql);
 1219 }
 1220 
 1221 void
 1222 KeReleaseInterruptSpinLock(iobj, irql)
 1223         kinterrupt              *iobj;
 1224         uint8_t                 irql;
 1225 {
 1226         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1227         return;
 1228 }
 1229 
 1230 uint8_t
 1231 KeSynchronizeExecution(iobj, syncfunc, syncctx)
 1232         kinterrupt              *iobj;
 1233         void                    *syncfunc;
 1234         void                    *syncctx;
 1235 {
 1236         uint8_t                 irql;
 1237         
 1238         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1239         MSCALL1(syncfunc, syncctx);
 1240         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1241 
 1242         return(TRUE);
 1243 }
 1244 
 1245 /*
 1246  * IoConnectInterrupt() is passed only the interrupt vector and
 1247  * irql that a device wants to use, but no device-specific tag
 1248  * of any kind. This conflicts rather badly with FreeBSD's
 1249  * bus_setup_intr(), which needs the device_t for the device
 1250  * requesting interrupt delivery. In order to bypass this
 1251  * inconsistency, we implement a second level of interrupt
 1252  * dispatching on top of bus_setup_intr(). All devices use
 1253  * ntoskrnl_intr() as their ISR, and any device requesting
 1254  * interrupts will be registered with ntoskrnl_intr()'s interrupt
 1255  * dispatch list. When an interrupt arrives, we walk the list
 1256  * and invoke all the registered ISRs. This effectively makes all
 1257  * interrupts shared, but it's the only way to duplicate the
 1258  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
 1259  */
 1260 
 1261 uint32_t
 1262 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
 1263         syncirql, imode, shared, affinity, savefloat)
 1264         kinterrupt              **iobj;
 1265         void                    *svcfunc;
 1266         void                    *svcctx;
 1267         uint32_t                vector;
 1268         kspin_lock              *lock;
 1269         uint8_t                 irql;
 1270         uint8_t                 syncirql;
 1271         uint8_t                 imode;
 1272         uint8_t                 shared;
 1273         uint32_t                affinity;
 1274         uint8_t                 savefloat;
 1275 {
 1276         uint8_t                 curirql;
 1277 
 1278         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
 1279         if (*iobj == NULL)
 1280                 return(STATUS_INSUFFICIENT_RESOURCES);
 1281 
 1282         (*iobj)->ki_svcfunc = svcfunc;
 1283         (*iobj)->ki_svcctx = svcctx;
 1284 
 1285         if (lock == NULL) {
 1286                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
 1287                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
 1288         } else
 1289                 (*iobj)->ki_lock = lock;
 1290 
 1291         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
 1292         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
 1293         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
 1294 
 1295         return(STATUS_SUCCESS);
 1296 }
 1297 
 1298 void
 1299 IoDisconnectInterrupt(iobj)
 1300         kinterrupt              *iobj;
 1301 {
 1302         uint8_t                 irql;
 1303 
 1304         if (iobj == NULL)
 1305                 return;
 1306 
 1307         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1308         RemoveEntryList((&iobj->ki_list));
 1309         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1310 
 1311         ExFreePool(iobj);
 1312 
 1313         return;
 1314 }
 1315 
 1316 device_object *
 1317 IoAttachDeviceToDeviceStack(src, dst)
 1318         device_object           *src;
 1319         device_object           *dst;
 1320 {
 1321         device_object           *attached;
 1322 
 1323         mtx_lock(&ntoskrnl_dispatchlock);
 1324         attached = IoGetAttachedDevice(dst);
 1325         attached->do_attacheddev = src;
 1326         src->do_attacheddev = NULL;
 1327         src->do_stacksize = attached->do_stacksize + 1;
 1328         mtx_unlock(&ntoskrnl_dispatchlock);
 1329 
 1330         return(attached);
 1331 }
 1332 
 1333 void
 1334 IoDetachDevice(topdev)
 1335         device_object           *topdev;
 1336 {
 1337         device_object           *tail;
 1338 
 1339         mtx_lock(&ntoskrnl_dispatchlock);
 1340 
 1341         /* First, break the chain. */
 1342         tail = topdev->do_attacheddev;
 1343         if (tail == NULL) {
 1344                 mtx_unlock(&ntoskrnl_dispatchlock);
 1345                 return;
 1346         }
 1347         topdev->do_attacheddev = tail->do_attacheddev;
 1348         topdev->do_refcnt--;
 1349 
 1350         /* Now reduce the stacksize count for the takm_il objects. */
 1351 
 1352         tail = topdev->do_attacheddev;
 1353         while (tail != NULL) {
 1354                 tail->do_stacksize--;
 1355                 tail = tail->do_attacheddev;
 1356         }
 1357 
 1358         mtx_unlock(&ntoskrnl_dispatchlock);
 1359 
 1360         return;
 1361 }
 1362 
 1363 /*
 1364  * For the most part, an object is considered signalled if
 1365  * dh_sigstate == TRUE. The exception is for mutant objects
 1366  * (mutexes), where the logic works like this:
 1367  *
 1368  * - If the thread already owns the object and sigstate is
 1369  *   less than or equal to 0, then the object is considered
 1370  *   signalled (recursive acquisition).
 1371  * - If dh_sigstate == 1, the object is also considered
 1372  *   signalled.
 1373  */
 1374 
 1375 static int
 1376 ntoskrnl_is_signalled(obj, td)
 1377         nt_dispatch_header      *obj;
 1378         struct thread           *td;
 1379 {
 1380         kmutant                 *km;
 1381         
 1382         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1383                 km = (kmutant *)obj;
 1384                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
 1385                     obj->dh_sigstate == 1)
 1386                         return(TRUE);
 1387                 return(FALSE);
 1388         }
 1389 
 1390         if (obj->dh_sigstate > 0)
 1391                 return(TRUE);
 1392         return(FALSE);
 1393 }
 1394 
 1395 static void
 1396 ntoskrnl_satisfy_wait(obj, td)
 1397         nt_dispatch_header      *obj;
 1398         struct thread           *td;
 1399 {
 1400         kmutant                 *km;
 1401 
 1402         switch (obj->dh_type) {
 1403         case DISP_TYPE_MUTANT:
 1404                 km = (struct kmutant *)obj;
 1405                 obj->dh_sigstate--;
 1406                 /*
 1407                  * If sigstate reaches 0, the mutex is now
 1408                  * non-signalled (the new thread owns it).
 1409                  */
 1410                 if (obj->dh_sigstate == 0) {
 1411                         km->km_ownerthread = td;
 1412                         if (km->km_abandoned == TRUE)
 1413                                 km->km_abandoned = FALSE;
 1414                 }
 1415                 break;
 1416         /* Synchronization objects get reset to unsignalled. */
 1417         case DISP_TYPE_SYNCHRONIZATION_EVENT:
 1418         case DISP_TYPE_SYNCHRONIZATION_TIMER:
 1419                 obj->dh_sigstate = 0;
 1420                 break;
 1421         case DISP_TYPE_SEMAPHORE:
 1422                 obj->dh_sigstate--;
 1423                 break;
 1424         default:
 1425                 break;
 1426         }
 1427 
 1428         return;
 1429 }
 1430 
 1431 static void
 1432 ntoskrnl_satisfy_multiple_waits(wb)
 1433         wait_block              *wb;
 1434 {
 1435         wait_block              *cur;
 1436         struct thread           *td;
 1437 
 1438         cur = wb;
 1439         td = wb->wb_kthread;
 1440 
 1441         do {
 1442                 ntoskrnl_satisfy_wait(wb->wb_object, td);
 1443                 cur->wb_awakened = TRUE;
 1444                 cur = cur->wb_next;
 1445         } while (cur != wb);
 1446 
 1447         return;
 1448 }
 1449 
 1450 /* Always called with dispatcher lock held. */
 1451 static void
 1452 ntoskrnl_waittest(obj, increment)
 1453         nt_dispatch_header      *obj;
 1454         uint32_t                increment;
 1455 {
 1456         wait_block              *w, *next;
 1457         list_entry              *e;
 1458         struct thread           *td;
 1459         wb_ext                  *we;
 1460         int                     satisfied;
 1461 
 1462         /*
 1463          * Once an object has been signalled, we walk its list of
 1464          * wait blocks. If a wait block can be awakened, then satisfy
 1465          * waits as necessary and wake the thread.
 1466          *
 1467          * The rules work like this:
 1468          *
 1469          * If a wait block is marked as WAITTYPE_ANY, then
 1470          * we can satisfy the wait conditions on the current
 1471          * object and wake the thread right away. Satisfying
 1472          * the wait also has the effect of breaking us out
 1473          * of the search loop.
 1474          *
 1475          * If the object is marked as WAITTYLE_ALL, then the
 1476          * wait block will be part of a circularly linked
 1477          * list of wait blocks belonging to a waiting thread
 1478          * that's sleeping in KeWaitForMultipleObjects(). In
 1479          * order to wake the thread, all the objects in the
 1480          * wait list must be in the signalled state. If they
 1481          * are, we then satisfy all of them and wake the
 1482          * thread.
 1483          *
 1484          */
 1485 
 1486         e = obj->dh_waitlisthead.nle_flink;
 1487 
 1488         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
 1489                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
 1490                 we = w->wb_ext;
 1491                 td = we->we_td;
 1492                 satisfied = FALSE;
 1493                 if (w->wb_waittype == WAITTYPE_ANY) {
 1494                         /*
 1495                          * Thread can be awakened if
 1496                          * any wait is satisfied.
 1497                          */
 1498                         ntoskrnl_satisfy_wait(obj, td);
 1499                         satisfied = TRUE;
 1500                         w->wb_awakened = TRUE;
 1501                 } else {
 1502                         /*
 1503                          * Thread can only be woken up
 1504                          * if all waits are satisfied.
 1505                          * If the thread is waiting on multiple
 1506                          * objects, they should all be linked
 1507                          * through the wb_next pointers in the
 1508                          * wait blocks.
 1509                          */
 1510                         satisfied = TRUE;
 1511                         next = w->wb_next;
 1512                         while (next != w) {
 1513                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
 1514                                         satisfied = FALSE;
 1515                                         break;
 1516                                 }
 1517                                 next = next->wb_next;
 1518                         }
 1519                         ntoskrnl_satisfy_multiple_waits(w);
 1520                 }
 1521 
 1522                 if (satisfied == TRUE)
 1523                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
 1524                             (increment * 4));
 1525 
 1526                 e = e->nle_flink;
 1527         }
 1528 
 1529         return;
 1530 }
 1531 
 1532 static void 
 1533 ntoskrnl_time(tval)
 1534         uint64_t                *tval;
 1535 {
 1536         struct timespec         ts;
 1537 
 1538         nanotime(&ts);
 1539         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
 1540             11644473600;
 1541 
 1542         return;
 1543 }
 1544 
 1545 /*
 1546  * KeWaitForSingleObject() is a tricky beast, because it can be used
 1547  * with several different object types: semaphores, timers, events,
 1548  * mutexes and threads. Semaphores don't appear very often, but the
 1549  * other object types are quite common. KeWaitForSingleObject() is
 1550  * what's normally used to acquire a mutex, and it can be used to
 1551  * wait for a thread termination.
 1552  *
 1553  * The Windows NDIS API is implemented in terms of Windows kernel
 1554  * primitives, and some of the object manipulation is duplicated in
 1555  * NDIS. For example, NDIS has timers and events, which are actually
 1556  * Windows kevents and ktimers. Now, you're supposed to only use the
 1557  * NDIS variants of these objects within the confines of the NDIS API,
 1558  * but there are some naughty developers out there who will use
 1559  * KeWaitForSingleObject() on NDIS timer and event objects, so we
 1560  * have to support that as well. Conseqently, our NDIS timer and event
 1561  * code has to be closely tied into our ntoskrnl timer and event code,
 1562  * just as it is in Windows.
 1563  *
 1564  * KeWaitForSingleObject() may do different things for different kinds
 1565  * of objects:
 1566  *
 1567  * - For events, we check if the event has been signalled. If the
 1568  *   event is already in the signalled state, we just return immediately,
 1569  *   otherwise we wait for it to be set to the signalled state by someone
 1570  *   else calling KeSetEvent(). Events can be either synchronization or
 1571  *   notification events.
 1572  *
 1573  * - For timers, if the timer has already fired and the timer is in
 1574  *   the signalled state, we just return, otherwise we wait on the
 1575  *   timer. Unlike an event, timers get signalled automatically when
 1576  *   they expire rather than someone having to trip them manually.
 1577  *   Timers initialized with KeInitializeTimer() are always notification
 1578  *   events: KeInitializeTimerEx() lets you initialize a timer as
 1579  *   either a notification or synchronization event.
 1580  *
 1581  * - For mutexes, we try to acquire the mutex and if we can't, we wait
 1582  *   on the mutex until it's available and then grab it. When a mutex is
 1583  *   released, it enters the signalled state, which wakes up one of the
 1584  *   threads waiting to acquire it. Mutexes are always synchronization
 1585  *   events.
 1586  *
 1587  * - For threads, the only thing we do is wait until the thread object
 1588  *   enters a signalled state, which occurs when the thread terminates.
 1589  *   Threads are always notification events.
 1590  *
 1591  * A notification event wakes up all threads waiting on an object. A
 1592  * synchronization event wakes up just one. Also, a synchronization event
 1593  * is auto-clearing, which means we automatically set the event back to
 1594  * the non-signalled state once the wakeup is done.
 1595  */
 1596 
 1597 uint32_t
 1598 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
 1599         void                    *arg;
 1600         uint32_t                reason;
 1601         uint32_t                mode;
 1602         uint8_t                 alertable;
 1603         int64_t                 *duetime;
 1604 {
 1605         wait_block              w;
 1606         struct thread           *td = curthread;
 1607         struct timeval          tv;
 1608         int                     error = 0;
 1609         uint64_t                curtime;
 1610         wb_ext                  we;
 1611         nt_dispatch_header      *obj;
 1612 
 1613         obj = arg;
 1614 
 1615         if (obj == NULL)
 1616                 return(STATUS_INVALID_PARAMETER);
 1617 
 1618         mtx_lock(&ntoskrnl_dispatchlock);
 1619 
 1620         cv_init(&we.we_cv, "KeWFS");
 1621         we.we_td = td;
 1622 
 1623         /*
 1624          * Check to see if this object is already signalled,
 1625          * and just return without waiting if it is.
 1626          */
 1627         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
 1628                 /* Sanity check the signal state value. */
 1629                 if (obj->dh_sigstate != INT32_MIN) {
 1630                         ntoskrnl_satisfy_wait(obj, curthread);
 1631                         mtx_unlock(&ntoskrnl_dispatchlock);
 1632                         return (STATUS_SUCCESS);
 1633                 } else {
 1634                         /*
 1635                          * There's a limit to how many times we can
 1636                          * recursively acquire a mutant. If we hit
 1637                          * the limit, something is very wrong.
 1638                          */
 1639                         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1640                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1641                                 panic("mutant limit exceeded");
 1642                         }
 1643                 }
 1644         }
 1645 
 1646         bzero((char *)&w, sizeof(wait_block));
 1647         w.wb_object = obj;
 1648         w.wb_ext = &we;
 1649         w.wb_waittype = WAITTYPE_ANY;
 1650         w.wb_next = &w;
 1651         w.wb_waitkey = 0;
 1652         w.wb_awakened = FALSE;
 1653         w.wb_oldpri = td->td_priority;
 1654 
 1655         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
 1656 
 1657         /*
 1658          * The timeout value is specified in 100 nanosecond units
 1659          * and can be a positive or negative number. If it's positive,
 1660          * then the duetime is absolute, and we need to convert it
 1661          * to an absolute offset relative to now in order to use it.
 1662          * If it's negative, then the duetime is relative and we
 1663          * just have to convert the units.
 1664          */
 1665 
 1666         if (duetime != NULL) {
 1667                 if (*duetime < 0) {
 1668                         tv.tv_sec = - (*duetime) / 10000000;
 1669                         tv.tv_usec = (- (*duetime) / 10) -
 1670                             (tv.tv_sec * 1000000);
 1671                 } else {
 1672                         ntoskrnl_time(&curtime);
 1673                         if (*duetime < curtime)
 1674                                 tv.tv_sec = tv.tv_usec = 0;
 1675                         else {
 1676                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1677                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1678                                     (tv.tv_sec * 1000000);
 1679                         }
 1680                 }
 1681         }
 1682 
 1683         if (duetime == NULL)
 1684                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1685         else
 1686                 error = cv_timedwait(&we.we_cv,
 1687                     &ntoskrnl_dispatchlock, tvtohz(&tv));
 1688 
 1689         RemoveEntryList(&w.wb_waitlist);
 1690 
 1691         cv_destroy(&we.we_cv);
 1692 
 1693         /* We timed out. Leave the object alone and return status. */
 1694 
 1695         if (error == EWOULDBLOCK) {
 1696                 mtx_unlock(&ntoskrnl_dispatchlock);
 1697                 return(STATUS_TIMEOUT);
 1698         }
 1699 
 1700         mtx_unlock(&ntoskrnl_dispatchlock);
 1701 
 1702         return(STATUS_SUCCESS);
 1703 /*
 1704         return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
 1705             mode, alertable, duetime, &w));
 1706 */
 1707 }
 1708 
 1709 static uint32_t
 1710 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
 1711         alertable, duetime, wb_array)
 1712         uint32_t                cnt;
 1713         nt_dispatch_header      *obj[];
 1714         uint32_t                wtype;
 1715         uint32_t                reason;
 1716         uint32_t                mode;
 1717         uint8_t                 alertable;
 1718         int64_t                 *duetime;
 1719         wait_block              *wb_array;
 1720 {
 1721         struct thread           *td = curthread;
 1722         wait_block              *whead, *w;
 1723         wait_block              _wb_array[MAX_WAIT_OBJECTS];
 1724         nt_dispatch_header      *cur;
 1725         struct timeval          tv;
 1726         int                     i, wcnt = 0, error = 0;
 1727         uint64_t                curtime;
 1728         struct timespec         t1, t2;
 1729         uint32_t                status = STATUS_SUCCESS;
 1730         wb_ext                  we;
 1731 
 1732         if (cnt > MAX_WAIT_OBJECTS)
 1733                 return(STATUS_INVALID_PARAMETER);
 1734         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
 1735                 return(STATUS_INVALID_PARAMETER);
 1736 
 1737         mtx_lock(&ntoskrnl_dispatchlock);
 1738 
 1739         cv_init(&we.we_cv, "KeWFM");
 1740         we.we_td = td;
 1741 
 1742         if (wb_array == NULL)
 1743                 whead = _wb_array;
 1744         else
 1745                 whead = wb_array;
 1746 
 1747         bzero((char *)whead, sizeof(wait_block) * cnt);
 1748 
 1749         /* First pass: see if we can satisfy any waits immediately. */
 1750 
 1751         wcnt = 0;
 1752         w = whead;
 1753 
 1754         for (i = 0; i < cnt; i++) {
 1755                 InsertTailList((&obj[i]->dh_waitlisthead),
 1756                     (&w->wb_waitlist));
 1757                 w->wb_ext = &we;
 1758                 w->wb_object = obj[i];
 1759                 w->wb_waittype = wtype;
 1760                 w->wb_waitkey = i;
 1761                 w->wb_awakened = FALSE;
 1762                 w->wb_oldpri = td->td_priority;
 1763                 w->wb_next = w + 1;
 1764                 w++;
 1765                 wcnt++;
 1766                 if (ntoskrnl_is_signalled(obj[i], td)) {
 1767                         /*
 1768                          * There's a limit to how many times
 1769                          * we can recursively acquire a mutant.
 1770                          * If we hit the limit, something
 1771                          * is very wrong.
 1772                          */
 1773                         if (obj[i]->dh_sigstate == INT32_MIN &&
 1774                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
 1775                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1776                                 panic("mutant limit exceeded");
 1777                         }
 1778 
 1779                         /*
 1780                          * If this is a WAITTYPE_ANY wait, then
 1781                          * satisfy the waited object and exit
 1782                          * right now.
 1783                          */
 1784 
 1785                         if (wtype == WAITTYPE_ANY) {
 1786                                 ntoskrnl_satisfy_wait(obj[i], td);
 1787                                 status = STATUS_WAIT_0 + i;
 1788                                 goto wait_done;
 1789                         } else {
 1790                                 w--;
 1791                                 wcnt--;
 1792                                 w->wb_object = NULL;
 1793                                 RemoveEntryList(&w->wb_waitlist);
 1794                         }
 1795                 }
 1796         }
 1797 
 1798         /*
 1799          * If this is a WAITTYPE_ALL wait and all objects are
 1800          * already signalled, satisfy the waits and exit now.
 1801          */
 1802 
 1803         if (wtype == WAITTYPE_ALL && wcnt == 0) {
 1804                 for (i = 0; i < cnt; i++)
 1805                         ntoskrnl_satisfy_wait(obj[i], td);
 1806                 status = STATUS_SUCCESS;
 1807                 goto wait_done;
 1808         }
 1809 
 1810         /*
 1811          * Create a circular waitblock list. The waitcount
 1812          * must always be non-zero when we get here.
 1813          */
 1814 
 1815         (w - 1)->wb_next = whead;
 1816 
 1817         /* Wait on any objects that aren't yet signalled. */
 1818 
 1819         /* Calculate timeout, if any. */
 1820 
 1821         if (duetime != NULL) {
 1822                 if (*duetime < 0) {
 1823                         tv.tv_sec = - (*duetime) / 10000000;
 1824                         tv.tv_usec = (- (*duetime) / 10) -
 1825                             (tv.tv_sec * 1000000);
 1826                 } else {
 1827                         ntoskrnl_time(&curtime);
 1828                         if (*duetime < curtime)
 1829                                 tv.tv_sec = tv.tv_usec = 0;
 1830                         else {
 1831                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1832                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1833                                     (tv.tv_sec * 1000000);
 1834                         }
 1835                 }
 1836         }
 1837 
 1838         while (wcnt) {
 1839                 nanotime(&t1);
 1840 
 1841                 if (duetime == NULL)
 1842                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1843                 else
 1844                         error = cv_timedwait(&we.we_cv,
 1845                             &ntoskrnl_dispatchlock, tvtohz(&tv));
 1846 
 1847                 /* Wait with timeout expired. */
 1848 
 1849                 if (error) {
 1850                         status = STATUS_TIMEOUT;
 1851                         goto wait_done;
 1852                 }
 1853 
 1854                 nanotime(&t2);
 1855 
 1856                 /* See what's been signalled. */
 1857 
 1858                 w = whead;
 1859                 do {
 1860                         cur = w->wb_object;
 1861                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
 1862                             w->wb_awakened == TRUE) {
 1863                                 /* Sanity check the signal state value. */
 1864                                 if (cur->dh_sigstate == INT32_MIN &&
 1865                                     cur->dh_type == DISP_TYPE_MUTANT) {
 1866                                         mtx_unlock(&ntoskrnl_dispatchlock);
 1867                                         panic("mutant limit exceeded");
 1868                                 }
 1869                                 wcnt--;
 1870                                 if (wtype == WAITTYPE_ANY) {
 1871                                         status = w->wb_waitkey &
 1872                                             STATUS_WAIT_0;
 1873                                         goto wait_done;
 1874                                 }
 1875                         }
 1876                         w = w->wb_next;
 1877                 } while (w != whead);
 1878 
 1879                 /*
 1880                  * If all objects have been signalled, or if this
 1881                  * is a WAITTYPE_ANY wait and we were woke up by
 1882                  * someone, we can bail.
 1883                  */
 1884 
 1885                 if (wcnt == 0) {
 1886                         status = STATUS_SUCCESS;
 1887                         goto wait_done;
 1888                 }
 1889 
 1890                 /*
 1891                  * If this is WAITTYPE_ALL wait, and there's still
 1892                  * objects that haven't been signalled, deduct the
 1893                  * time that's elapsed so far from the timeout and
 1894                  * wait again (or continue waiting indefinitely if
 1895                  * there's no timeout).
 1896                  */
 1897 
 1898                 if (duetime != NULL) {
 1899                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
 1900                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
 1901                 }
 1902         }
 1903 
 1904 
 1905 wait_done:
 1906 
 1907         cv_destroy(&we.we_cv);
 1908 
 1909         for (i = 0; i < cnt; i++) {
 1910                 if (whead[i].wb_object != NULL)
 1911                         RemoveEntryList(&whead[i].wb_waitlist);
 1912 
 1913         }
 1914         mtx_unlock(&ntoskrnl_dispatchlock);
 1915 
 1916         return(status);
 1917 }
 1918 
 1919 static void
 1920 WRITE_REGISTER_USHORT(reg, val)
 1921         uint16_t                *reg;
 1922         uint16_t                val;
 1923 {
 1924         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1925         return;
 1926 }
 1927 
 1928 static uint16_t
 1929 READ_REGISTER_USHORT(reg)
 1930         uint16_t                *reg;
 1931 {
 1932         return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1933 }
 1934 
 1935 static void
 1936 WRITE_REGISTER_ULONG(reg, val)
 1937         uint32_t                *reg;
 1938         uint32_t                val;
 1939 {
 1940         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1941         return;
 1942 }
 1943 
 1944 static uint32_t
 1945 READ_REGISTER_ULONG(reg)
 1946         uint32_t                *reg;
 1947 {
 1948         return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1949 }
 1950 
 1951 static uint8_t
 1952 READ_REGISTER_UCHAR(reg)
 1953         uint8_t                 *reg;
 1954 {
 1955         return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1956 }
 1957 
 1958 static void
 1959 WRITE_REGISTER_UCHAR(reg, val)
 1960         uint8_t                 *reg;
 1961         uint8_t                 val;
 1962 {
 1963         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1964         return;
 1965 }
 1966 
 1967 static int64_t
 1968 _allmul(a, b)
 1969         int64_t                 a;
 1970         int64_t                 b;
 1971 {
 1972         return (a * b);
 1973 }
 1974 
 1975 static int64_t
 1976 _alldiv(a, b)
 1977         int64_t                 a;
 1978         int64_t                 b;
 1979 {
 1980         return (a / b);
 1981 }
 1982 
 1983 static int64_t
 1984 _allrem(a, b)
 1985         int64_t                 a;
 1986         int64_t                 b;
 1987 {
 1988         return (a % b);
 1989 }
 1990 
 1991 static uint64_t
 1992 _aullmul(a, b)
 1993         uint64_t                a;
 1994         uint64_t                b;
 1995 {
 1996         return (a * b);
 1997 }
 1998 
 1999 static uint64_t
 2000 _aulldiv(a, b)
 2001         uint64_t                a;
 2002         uint64_t                b;
 2003 {
 2004         return (a / b);
 2005 }
 2006 
 2007 static uint64_t
 2008 _aullrem(a, b)
 2009         uint64_t                a;
 2010         uint64_t                b;
 2011 {
 2012         return (a % b);
 2013 }
 2014 
 2015 static int64_t
 2016 _allshl(a, b)
 2017         int64_t                 a;
 2018         uint8_t                 b;
 2019 {
 2020         return (a << b);
 2021 }
 2022 
 2023 static uint64_t
 2024 _aullshl(a, b)
 2025         uint64_t                a;
 2026         uint8_t                 b;
 2027 {
 2028         return (a << b);
 2029 }
 2030 
 2031 static int64_t
 2032 _allshr(a, b)
 2033         int64_t                 a;
 2034         uint8_t                 b;
 2035 {
 2036         return (a >> b);
 2037 }
 2038 
 2039 static uint64_t
 2040 _aullshr(a, b)
 2041         uint64_t                a;
 2042         uint8_t                 b;
 2043 {
 2044         return (a >> b);
 2045 }
 2046 
 2047 static slist_entry *
 2048 ntoskrnl_pushsl(head, entry)
 2049         slist_header            *head;
 2050         slist_entry             *entry;
 2051 {
 2052         slist_entry             *oldhead;
 2053 
 2054         oldhead = head->slh_list.slh_next;
 2055         entry->sl_next = head->slh_list.slh_next;
 2056         head->slh_list.slh_next = entry;
 2057         head->slh_list.slh_depth++;
 2058         head->slh_list.slh_seq++;
 2059 
 2060         return(oldhead);
 2061 }
 2062 
 2063 static slist_entry *
 2064 ntoskrnl_popsl(head)
 2065         slist_header            *head;
 2066 {
 2067         slist_entry             *first;
 2068 
 2069         first = head->slh_list.slh_next;
 2070         if (first != NULL) {
 2071                 head->slh_list.slh_next = first->sl_next;
 2072                 head->slh_list.slh_depth--;
 2073                 head->slh_list.slh_seq++;
 2074         }
 2075 
 2076         return(first);
 2077 }
 2078 
 2079 /*
 2080  * We need this to make lookaside lists work for amd64.
 2081  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
 2082  * list structure. For amd64 to work right, this has to be a
 2083  * pointer to the wrapped version of the routine, not the
 2084  * original. Letting the Windows driver invoke the original
 2085  * function directly will result in a convention calling
 2086  * mismatch and a pretty crash. On x86, this effectively
 2087  * becomes a no-op since ipt_func and ipt_wrap are the same.
 2088  */
 2089 
 2090 static funcptr
 2091 ntoskrnl_findwrap(func)
 2092         funcptr                 func;
 2093 {
 2094         image_patch_table       *patch;
 2095 
 2096         patch = ntoskrnl_functbl;
 2097         while (patch->ipt_func != NULL) {
 2098                 if ((funcptr)patch->ipt_func == func)
 2099                         return((funcptr)patch->ipt_wrap);
 2100                 patch++;
 2101         }
 2102 
 2103         return(NULL);
 2104 }
 2105 
 2106 static void
 2107 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
 2108     flags, size, tag, depth)
 2109         paged_lookaside_list    *lookaside;
 2110         lookaside_alloc_func    *allocfunc;
 2111         lookaside_free_func     *freefunc;
 2112         uint32_t                flags;
 2113         size_t                  size;
 2114         uint32_t                tag;
 2115         uint16_t                depth;
 2116 {
 2117         bzero((char *)lookaside, sizeof(paged_lookaside_list));
 2118 
 2119         if (size < sizeof(slist_entry))
 2120                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2121         else
 2122                 lookaside->nll_l.gl_size = size;
 2123         lookaside->nll_l.gl_tag = tag;
 2124         if (allocfunc == NULL)
 2125                 lookaside->nll_l.gl_allocfunc =
 2126                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2127         else
 2128                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2129 
 2130         if (freefunc == NULL)
 2131                 lookaside->nll_l.gl_freefunc =
 2132                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2133         else
 2134                 lookaside->nll_l.gl_freefunc = freefunc;
 2135 
 2136 #ifdef __i386__
 2137         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2138 #endif
 2139 
 2140         lookaside->nll_l.gl_type = NonPagedPool;
 2141         lookaside->nll_l.gl_depth = depth;
 2142         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2143 
 2144         return;
 2145 }
 2146 
 2147 static void
 2148 ExDeletePagedLookasideList(lookaside)
 2149         paged_lookaside_list   *lookaside;
 2150 {
 2151         void                    *buf;
 2152         void            (*freefunc)(void *);
 2153 
 2154         freefunc = lookaside->nll_l.gl_freefunc;
 2155         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2156                 MSCALL1(freefunc, buf);
 2157 
 2158         return;
 2159 }
 2160 
 2161 static void
 2162 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
 2163     flags, size, tag, depth)
 2164         npaged_lookaside_list   *lookaside;
 2165         lookaside_alloc_func    *allocfunc;
 2166         lookaside_free_func     *freefunc;
 2167         uint32_t                flags;
 2168         size_t                  size;
 2169         uint32_t                tag;
 2170         uint16_t                depth;
 2171 {
 2172         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
 2173 
 2174         if (size < sizeof(slist_entry))
 2175                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2176         else
 2177                 lookaside->nll_l.gl_size = size;
 2178         lookaside->nll_l.gl_tag = tag;
 2179         if (allocfunc == NULL)
 2180                 lookaside->nll_l.gl_allocfunc =
 2181                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2182         else
 2183                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2184 
 2185         if (freefunc == NULL)
 2186                 lookaside->nll_l.gl_freefunc =
 2187                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2188         else
 2189                 lookaside->nll_l.gl_freefunc = freefunc;
 2190 
 2191 #ifdef __i386__
 2192         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2193 #endif
 2194 
 2195         lookaside->nll_l.gl_type = NonPagedPool;
 2196         lookaside->nll_l.gl_depth = depth;
 2197         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2198 
 2199         return;
 2200 }
 2201 
 2202 static void
 2203 ExDeleteNPagedLookasideList(lookaside)
 2204         npaged_lookaside_list   *lookaside;
 2205 {
 2206         void                    *buf;
 2207         void            (*freefunc)(void *);
 2208 
 2209         freefunc = lookaside->nll_l.gl_freefunc;
 2210         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2211                 MSCALL1(freefunc, buf);
 2212 
 2213         return;
 2214 }
 2215 
 2216 slist_entry *
 2217 InterlockedPushEntrySList(head, entry)
 2218         slist_header            *head;
 2219         slist_entry             *entry;
 2220 {
 2221         slist_entry             *oldhead;
 2222 
 2223         mtx_lock_spin(&ntoskrnl_interlock);
 2224         oldhead = ntoskrnl_pushsl(head, entry);
 2225         mtx_unlock_spin(&ntoskrnl_interlock);
 2226 
 2227         return(oldhead);
 2228 }
 2229 
 2230 slist_entry *
 2231 InterlockedPopEntrySList(head)
 2232         slist_header            *head;
 2233 {
 2234         slist_entry             *first;
 2235 
 2236         mtx_lock_spin(&ntoskrnl_interlock);
 2237         first = ntoskrnl_popsl(head);
 2238         mtx_unlock_spin(&ntoskrnl_interlock);
 2239 
 2240         return(first);
 2241 }
 2242 
 2243 static slist_entry *
 2244 ExInterlockedPushEntrySList(head, entry, lock)
 2245         slist_header            *head;
 2246         slist_entry             *entry;
 2247         kspin_lock              *lock;
 2248 {
 2249         return(InterlockedPushEntrySList(head, entry));
 2250 }
 2251 
 2252 static slist_entry *
 2253 ExInterlockedPopEntrySList(head, lock)
 2254         slist_header            *head;
 2255         kspin_lock              *lock;
 2256 {
 2257         return(InterlockedPopEntrySList(head));
 2258 }
 2259 
 2260 uint16_t
 2261 ExQueryDepthSList(head)
 2262         slist_header            *head;
 2263 {
 2264         uint16_t                depth;
 2265 
 2266         mtx_lock_spin(&ntoskrnl_interlock);
 2267         depth = head->slh_list.slh_depth;
 2268         mtx_unlock_spin(&ntoskrnl_interlock);
 2269 
 2270         return(depth);
 2271 }
 2272 
 2273 void
 2274 KeInitializeSpinLock(lock)
 2275         kspin_lock              *lock;
 2276 {
 2277         *lock = 0;
 2278 
 2279         return;
 2280 }
 2281 
 2282 #ifdef __i386__
 2283 void
 2284 KefAcquireSpinLockAtDpcLevel(lock)
 2285         kspin_lock              *lock;
 2286 {
 2287 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2288         int                     i = 0;
 2289 #endif
 2290 
 2291         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
 2292                 /* sit and spin */;
 2293 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2294                 i++;
 2295                 if (i > 200000000)
 2296                         panic("DEADLOCK!");
 2297 #endif
 2298         }
 2299 
 2300         return;
 2301 }
 2302 
 2303 void
 2304 KefReleaseSpinLockFromDpcLevel(lock)
 2305         kspin_lock              *lock;
 2306 {
 2307         atomic_store_rel_int((volatile u_int *)lock, 0);
 2308 
 2309         return;
 2310 }
 2311 
 2312 uint8_t
 2313 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
 2314 {
 2315         uint8_t                 oldirql;
 2316 
 2317         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
 2318                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
 2319 
 2320         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
 2321         KeAcquireSpinLockAtDpcLevel(lock);
 2322 
 2323         return(oldirql);
 2324 }
 2325 #else
 2326 void
 2327 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
 2328 {
 2329         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
 2330                 /* sit and spin */;
 2331 
 2332         return;
 2333 }
 2334 
 2335 void
 2336 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
 2337 {
 2338         atomic_store_rel_int((volatile u_int *)lock, 0);
 2339 
 2340         return;
 2341 }
 2342 #endif /* __i386__ */
 2343 
 2344 uintptr_t
 2345 InterlockedExchange(dst, val)
 2346         volatile uint32_t       *dst;
 2347         uintptr_t               val;
 2348 {
 2349         uintptr_t               r;
 2350 
 2351         mtx_lock_spin(&ntoskrnl_interlock);
 2352         r = *dst;
 2353         *dst = val;
 2354         mtx_unlock_spin(&ntoskrnl_interlock);
 2355 
 2356         return(r);
 2357 }
 2358 
 2359 static uint32_t
 2360 InterlockedIncrement(addend)
 2361         volatile uint32_t       *addend;
 2362 {
 2363         atomic_add_long((volatile u_long *)addend, 1);
 2364         return(*addend);
 2365 }
 2366 
 2367 static uint32_t
 2368 InterlockedDecrement(addend)
 2369         volatile uint32_t       *addend;
 2370 {
 2371         atomic_subtract_long((volatile u_long *)addend, 1);
 2372         return(*addend);
 2373 }
 2374 
 2375 static void
 2376 ExInterlockedAddLargeStatistic(addend, inc)
 2377         uint64_t                *addend;
 2378         uint32_t                inc;
 2379 {
 2380         mtx_lock_spin(&ntoskrnl_interlock);
 2381         *addend += inc;
 2382         mtx_unlock_spin(&ntoskrnl_interlock);
 2383 
 2384         return;
 2385 };
 2386 
 2387 mdl *
 2388 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
 2389         void                    *vaddr;
 2390         uint32_t                len;
 2391         uint8_t                 secondarybuf;
 2392         uint8_t                 chargequota;
 2393         irp                     *iopkt;
 2394 {
 2395         mdl                     *m;
 2396         int                     zone = 0;
 2397 
 2398         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
 2399                 m = ExAllocatePoolWithTag(NonPagedPool,
 2400                     MmSizeOfMdl(vaddr, len), 0);
 2401         else {
 2402                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
 2403                 zone++;
 2404         }
 2405 
 2406         if (m == NULL)
 2407                 return (NULL);
 2408 
 2409         MmInitializeMdl(m, vaddr, len);
 2410 
 2411         /*
 2412          * MmInitializMdl() clears the flags field, so we
 2413          * have to set this here. If the MDL came from the
 2414          * MDL UMA zone, tag it so we can release it to
 2415          * the right place later.
 2416          */
 2417         if (zone)
 2418                 m->mdl_flags = MDL_ZONE_ALLOCED;
 2419 
 2420         if (iopkt != NULL) {
 2421                 if (secondarybuf == TRUE) {
 2422                         mdl                     *last;
 2423                         last = iopkt->irp_mdl;
 2424                         while (last->mdl_next != NULL)
 2425                                 last = last->mdl_next;
 2426                         last->mdl_next = m;
 2427                 } else {
 2428                         if (iopkt->irp_mdl != NULL)
 2429                                 panic("leaking an MDL in IoAllocateMdl()");
 2430                         iopkt->irp_mdl = m;
 2431                 }
 2432         }
 2433 
 2434         return (m);
 2435 }
 2436 
 2437 void
 2438 IoFreeMdl(m)
 2439         mdl                     *m;
 2440 {
 2441         if (m == NULL)
 2442                 return;
 2443 
 2444         if (m->mdl_flags & MDL_ZONE_ALLOCED)
 2445                 uma_zfree(mdl_zone, m);
 2446         else
 2447                 ExFreePool(m);
 2448 
 2449         return;
 2450 }
 2451 
 2452 static uint32_t
 2453 MmSizeOfMdl(vaddr, len)
 2454         void                    *vaddr;
 2455         size_t                  len;
 2456 {
 2457         uint32_t                l;
 2458 
 2459         l = sizeof(struct mdl) +
 2460             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
 2461 
 2462         return(l);
 2463 }
 2464 
 2465 /*
 2466  * The Microsoft documentation says this routine fills in the
 2467  * page array of an MDL with the _physical_ page addresses that
 2468  * comprise the buffer, but we don't really want to do that here.
 2469  * Instead, we just fill in the page array with the kernel virtual
 2470  * addresses of the buffers.
 2471  */
 2472 void
 2473 MmBuildMdlForNonPagedPool(m)
 2474         mdl                     *m;
 2475 {
 2476         vm_offset_t             *mdl_pages;
 2477         int                     pagecnt, i;
 2478 
 2479         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
 2480 
 2481         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
 2482                 panic("not enough pages in MDL to describe buffer");
 2483 
 2484         mdl_pages = MmGetMdlPfnArray(m);
 2485 
 2486         for (i = 0; i < pagecnt; i++)
 2487                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
 2488 
 2489         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
 2490         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
 2491 
 2492         return;
 2493 }
 2494 
 2495 static void *
 2496 MmMapLockedPages(buf, accessmode)
 2497         mdl                     *buf;
 2498         uint8_t                 accessmode;
 2499 {
 2500         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
 2501         return(MmGetMdlVirtualAddress(buf));
 2502 }
 2503 
 2504 static void *
 2505 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
 2506     bugcheck, prio)
 2507         mdl                     *buf;
 2508         uint8_t                 accessmode;
 2509         uint32_t                cachetype;
 2510         void                    *vaddr;
 2511         uint32_t                bugcheck;
 2512         uint32_t                prio;
 2513 {
 2514         return(MmMapLockedPages(buf, accessmode));
 2515 }
 2516 
 2517 static void
 2518 MmUnmapLockedPages(vaddr, buf)
 2519         void                    *vaddr;
 2520         mdl                     *buf;
 2521 {
 2522         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
 2523         return;
 2524 }
 2525 
 2526 /*
 2527  * This function has a problem in that it will break if you
 2528  * compile this module without PAE and try to use it on a PAE
 2529  * kernel. Unfortunately, there's no way around this at the
 2530  * moment. It's slightly less broken that using pmap_kextract().
 2531  * You'd think the virtual memory subsystem would help us out
 2532  * here, but it doesn't.
 2533  */
 2534 
 2535 static uint8_t
 2536 MmIsAddressValid(vaddr)
 2537         void                    *vaddr;
 2538 {
 2539         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
 2540                 return(TRUE);
 2541 
 2542         return(FALSE);
 2543 }
 2544 
 2545 void *
 2546 MmMapIoSpace(paddr, len, cachetype)
 2547         uint64_t                paddr;
 2548         uint32_t                len;
 2549         uint32_t                cachetype;
 2550 {
 2551         devclass_t              nexus_class;
 2552         device_t                *nexus_devs, devp;
 2553         int                     nexus_count = 0;
 2554         device_t                matching_dev = NULL;
 2555         struct resource         *res;
 2556         int                     i;
 2557         vm_offset_t             v;
 2558 
 2559         /* There will always be at least one nexus. */
 2560 
 2561         nexus_class = devclass_find("nexus");
 2562         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
 2563 
 2564         for (i = 0; i < nexus_count; i++) {
 2565                 devp = nexus_devs[i];
 2566                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
 2567                 if (matching_dev)
 2568                         break;
 2569         }
 2570 
 2571         free(nexus_devs, M_TEMP);
 2572 
 2573         if (matching_dev == NULL)
 2574                 return(NULL);
 2575 
 2576         v = (vm_offset_t)rman_get_virtual(res);
 2577         if (paddr > rman_get_start(res))
 2578                 v += paddr - rman_get_start(res);
 2579 
 2580         return((void *)v);
 2581 }
 2582 
 2583 void
 2584 MmUnmapIoSpace(vaddr, len)
 2585         void                    *vaddr;
 2586         size_t                  len;
 2587 {
 2588         return;
 2589 }
 2590 
 2591 
 2592 static device_t
 2593 ntoskrnl_finddev(dev, paddr, res)
 2594         device_t                dev;
 2595         uint64_t                paddr;
 2596         struct resource         **res;
 2597 {
 2598         device_t                *children = NULL;
 2599         device_t                matching_dev;
 2600         int                     childcnt;
 2601         struct resource         *r;
 2602         struct resource_list    *rl;
 2603         struct resource_list_entry      *rle;
 2604         uint32_t                flags;
 2605         int                     i;
 2606 
 2607         /* We only want devices that have been successfully probed. */
 2608 
 2609         if (device_is_alive(dev) == FALSE)
 2610                 return(NULL);
 2611 
 2612         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 2613         if (rl != NULL) {
 2614 #if __FreeBSD_version < 600022
 2615                 SLIST_FOREACH(rle, rl, link) {
 2616 #else
 2617                 STAILQ_FOREACH(rle, rl, link) {
 2618 #endif
 2619                         r = rle->res;
 2620 
 2621                         if (r == NULL)
 2622                                 continue;
 2623 
 2624                         flags = rman_get_flags(r);
 2625 
 2626                         if (rle->type == SYS_RES_MEMORY &&
 2627                             paddr >= rman_get_start(r) &&
 2628                             paddr <= rman_get_end(r)) {
 2629                                 if (!(flags & RF_ACTIVE))
 2630                                         bus_activate_resource(dev,
 2631                                             SYS_RES_MEMORY, 0, r);
 2632                                 *res = r;
 2633                                 return(dev);
 2634                         }
 2635                 }
 2636         }
 2637 
 2638         /*
 2639          * If this device has children, do another
 2640          * level of recursion to inspect them.
 2641          */
 2642 
 2643         device_get_children(dev, &children, &childcnt);
 2644 
 2645         for (i = 0; i < childcnt; i++) {
 2646                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
 2647                 if (matching_dev != NULL) {
 2648                         free(children, M_TEMP);
 2649                         return(matching_dev);
 2650                 }
 2651         }
 2652 
 2653         
 2654         /* Won't somebody please think of the children! */
 2655 
 2656         if (children != NULL)
 2657                 free(children, M_TEMP);
 2658 
 2659         return(NULL);
 2660 }
 2661 
 2662 /*
 2663  * Workitems are unlike DPCs, in that they run in a user-mode thread
 2664  * context rather than at DISPATCH_LEVEL in kernel context. In our
 2665  * case we run them in kernel context anyway.
 2666  */
 2667 static void
 2668 ntoskrnl_workitem_thread(arg)
 2669         void                    *arg;
 2670 {
 2671         kdpc_queue              *kq;
 2672         list_entry              *l;
 2673         io_workitem             *iw;
 2674         uint8_t                 irql;
 2675 
 2676         kq = arg;
 2677 
 2678         InitializeListHead(&kq->kq_disp);
 2679         kq->kq_td = curthread;
 2680         kq->kq_exit = 0;
 2681         KeInitializeSpinLock(&kq->kq_lock);
 2682         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 2683 
 2684         while (1) {
 2685                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 2686 
 2687                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 2688 
 2689                 if (kq->kq_exit) {
 2690                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2691                         break;
 2692                 }
 2693 
 2694                 while (!IsListEmpty(&kq->kq_disp)) {
 2695                         l = RemoveHeadList(&kq->kq_disp);
 2696                         iw = CONTAINING_RECORD(l,
 2697                             io_workitem, iw_listentry);
 2698                         InitializeListHead((&iw->iw_listentry));
 2699                         if (iw->iw_func == NULL)
 2700                                 continue;
 2701                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2702                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
 2703                         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2704                 }
 2705 
 2706                 KeReleaseSpinLock(&kq->kq_lock, irql);
 2707         }
 2708 
 2709 #if __FreeBSD_version < 502113
 2710         mtx_lock(&Giant);
 2711 #endif
 2712         kthread_exit(0);
 2713         return; /* notreached */
 2714 }
 2715 
 2716 static void
 2717 ntoskrnl_destroy_workitem_threads(void)
 2718 {
 2719         kdpc_queue              *kq;
 2720         int                     i;
 2721 
 2722         for (i = 0; i < WORKITEM_THREADS; i++) {
 2723                 kq = wq_queues + i;
 2724                 kq->kq_exit = 1;
 2725                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);       
 2726                 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", 0);
 2727         }
 2728 
 2729         return;
 2730 }
 2731 
 2732 io_workitem *
 2733 IoAllocateWorkItem(dobj)
 2734         device_object           *dobj;
 2735 {
 2736         io_workitem             *iw;
 2737 
 2738         iw = uma_zalloc(iw_zone, M_NOWAIT);
 2739         if (iw == NULL)
 2740                 return(NULL);
 2741 
 2742         InitializeListHead(&iw->iw_listentry);
 2743         iw->iw_dobj = dobj;
 2744 
 2745         mtx_lock(&ntoskrnl_dispatchlock);
 2746         iw->iw_idx = wq_idx;
 2747         WORKIDX_INC(wq_idx);
 2748         mtx_unlock(&ntoskrnl_dispatchlock);
 2749 
 2750         return(iw);
 2751 }
 2752 
 2753 void
 2754 IoFreeWorkItem(iw)
 2755         io_workitem             *iw;
 2756 {
 2757         uma_zfree(iw_zone, iw);
 2758         return;
 2759 }
 2760 
 2761 void
 2762 IoQueueWorkItem(iw, iw_func, qtype, ctx)
 2763         io_workitem             *iw;
 2764         io_workitem_func        iw_func;
 2765         uint32_t                qtype;
 2766         void                    *ctx;
 2767 {
 2768         kdpc_queue              *kq;
 2769         list_entry              *l;
 2770         io_workitem             *cur;
 2771         uint8_t                 irql;
 2772 
 2773         kq = wq_queues + iw->iw_idx;
 2774 
 2775         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2776 
 2777         /*
 2778          * Traverse the list and make sure this workitem hasn't
 2779          * already been inserted. Queuing the same workitem
 2780          * twice will hose the list but good.
 2781          */
 2782 
 2783         l = kq->kq_disp.nle_flink;
 2784         while (l != &kq->kq_disp) {
 2785                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2786                 if (cur == iw) {
 2787                         /* Already queued -- do nothing. */
 2788                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2789                         return;
 2790                 }
 2791                 l = l->nle_flink;
 2792         }
 2793 
 2794         iw->iw_func = iw_func;
 2795         iw->iw_ctx = ctx;
 2796 
 2797         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
 2798         KeReleaseSpinLock(&kq->kq_lock, irql);
 2799 
 2800         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 2801 
 2802         return;
 2803 }
 2804 
 2805 static void
 2806 ntoskrnl_workitem(dobj, arg)
 2807         device_object           *dobj;
 2808         void                    *arg;
 2809 {
 2810         io_workitem             *iw;
 2811         work_queue_item         *w;
 2812         work_item_func          f;
 2813 
 2814         iw = arg;
 2815         w = (work_queue_item *)dobj;
 2816         f = (work_item_func)w->wqi_func;
 2817         uma_zfree(iw_zone, iw);
 2818         MSCALL2(f, w, w->wqi_ctx);
 2819 
 2820         return;
 2821 }
 2822 
 2823 /*
 2824  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
 2825  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
 2826  * problem with ExQueueWorkItem() is that it can't guard against
 2827  * the condition where a driver submits a job to the work queue and
 2828  * is then unloaded before the job is able to run. IoQueueWorkItem()
 2829  * acquires a reference to the device's device_object via the
 2830  * object manager and retains it until after the job has completed,
 2831  * which prevents the driver from being unloaded before the job
 2832  * runs. (We don't currently support this behavior, though hopefully
 2833  * that will change once the object manager API is fleshed out a bit.)
 2834  *
 2835  * Having said all that, the ExQueueWorkItem() API remains, because
 2836  * there are still other parts of Windows that use it, including
 2837  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
 2838  * We fake up the ExQueueWorkItem() API on top of our implementation
 2839  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
 2840  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
 2841  * queue item (provided by the caller) in to IoAllocateWorkItem()
 2842  * instead of the device_object. We need to save this pointer so
 2843  * we can apply a sanity check: as with the DPC queue and other
 2844  * workitem queues, we can't allow the same work queue item to
 2845  * be queued twice. If it's already pending, we silently return
 2846  */
 2847 
 2848 void
 2849 ExQueueWorkItem(w, qtype)
 2850         work_queue_item         *w;
 2851         uint32_t                qtype;
 2852 {
 2853         io_workitem             *iw;
 2854         io_workitem_func        iwf;
 2855         kdpc_queue              *kq;
 2856         list_entry              *l;
 2857         io_workitem             *cur;
 2858         uint8_t                 irql;
 2859 
 2860 
 2861         /*
 2862          * We need to do a special sanity test to make sure
 2863          * the ExQueueWorkItem() API isn't used to queue
 2864          * the same workitem twice. Rather than checking the
 2865          * io_workitem pointer itself, we test the attached
 2866          * device object, which is really a pointer to the
 2867          * legacy work queue item structure.
 2868          */
 2869 
 2870         kq = wq_queues + WORKITEM_LEGACY_THREAD;
 2871         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2872         l = kq->kq_disp.nle_flink;
 2873         while (l != &kq->kq_disp) {
 2874                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2875                 if (cur->iw_dobj == (device_object *)w) {
 2876                         /* Already queued -- do nothing. */
 2877                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2878                         return;
 2879                 }
 2880                 l = l->nle_flink;
 2881         }
 2882         KeReleaseSpinLock(&kq->kq_lock, irql);
 2883 
 2884         iw = IoAllocateWorkItem((device_object *)w);
 2885         if (iw == NULL)
 2886                 return;
 2887 
 2888         iw->iw_idx = WORKITEM_LEGACY_THREAD;
 2889         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
 2890         IoQueueWorkItem(iw, iwf, qtype, iw);
 2891 
 2892         return;
 2893 }
 2894 
 2895 static size_t
 2896 RtlCompareMemory(s1, s2, len)
 2897         const void              *s1;
 2898         const void              *s2;
 2899         size_t                  len;
 2900 {
 2901         size_t                  i, total = 0;
 2902         uint8_t                 *m1, *m2;
 2903 
 2904         m1 = __DECONST(char *, s1);
 2905         m2 = __DECONST(char *, s2);
 2906 
 2907         for (i = 0; i < len; i++) {
 2908                 if (m1[i] == m2[i])
 2909                         total++;
 2910         }
 2911         return(total);
 2912 }
 2913 
 2914 void
 2915 RtlInitAnsiString(dst, src)
 2916         ansi_string             *dst;
 2917         char                    *src;
 2918 {
 2919         ansi_string             *a;
 2920 
 2921         a = dst;
 2922         if (a == NULL)
 2923                 return;
 2924         if (src == NULL) {
 2925                 a->as_len = a->as_maxlen = 0;
 2926                 a->as_buf = NULL;
 2927         } else {
 2928                 a->as_buf = src;
 2929                 a->as_len = a->as_maxlen = strlen(src);
 2930         }
 2931 
 2932         return;
 2933 }
 2934 
 2935 void
 2936 RtlInitUnicodeString(dst, src)
 2937         unicode_string          *dst;
 2938         uint16_t                *src;
 2939 {
 2940         unicode_string          *u;
 2941         int                     i;
 2942 
 2943         u = dst;
 2944         if (u == NULL)
 2945                 return;
 2946         if (src == NULL) {
 2947                 u->us_len = u->us_maxlen = 0;
 2948                 u->us_buf = NULL;
 2949         } else {
 2950                 i = 0;
 2951                 while(src[i] != 0)
 2952                         i++;
 2953                 u->us_buf = src;
 2954                 u->us_len = u->us_maxlen = i * 2;
 2955         }
 2956 
 2957         return;
 2958 }
 2959 
 2960 ndis_status
 2961 RtlUnicodeStringToInteger(ustr, base, val)
 2962         unicode_string          *ustr;
 2963         uint32_t                base;
 2964         uint32_t                *val;
 2965 {
 2966         uint16_t                *uchr;
 2967         int                     len, neg = 0;
 2968         char                    abuf[64];
 2969         char                    *astr;
 2970 
 2971         uchr = ustr->us_buf;
 2972         len = ustr->us_len;
 2973         bzero(abuf, sizeof(abuf));
 2974 
 2975         if ((char)((*uchr) & 0xFF) == '-') {
 2976                 neg = 1;
 2977                 uchr++;
 2978                 len -= 2;
 2979         } else if ((char)((*uchr) & 0xFF) == '+') {
 2980                 neg = 0;
 2981                 uchr++;
 2982                 len -= 2;
 2983         }
 2984 
 2985         if (base == 0) {
 2986                 if ((char)((*uchr) & 0xFF) == 'b') {
 2987                         base = 2;
 2988                         uchr++;
 2989                         len -= 2;
 2990                 } else if ((char)((*uchr) & 0xFF) == 'o') {
 2991                         base = 8;
 2992                         uchr++;
 2993                         len -= 2;
 2994                 } else if ((char)((*uchr) & 0xFF) == 'x') {
 2995                         base = 16;
 2996                         uchr++;
 2997                         len -= 2;
 2998                 } else
 2999                         base = 10;
 3000         }
 3001 
 3002         astr = abuf;
 3003         if (neg) {
 3004                 strcpy(astr, "-");
 3005                 astr++;
 3006         }
 3007 
 3008         ntoskrnl_unicode_to_ascii(uchr, astr, len);
 3009         *val = strtoul(abuf, NULL, base);
 3010 
 3011         return(NDIS_STATUS_SUCCESS);
 3012 }
 3013 
 3014 void
 3015 RtlFreeUnicodeString(ustr)
 3016         unicode_string          *ustr;
 3017 {
 3018         if (ustr->us_buf == NULL)
 3019                 return;
 3020         ExFreePool(ustr->us_buf);
 3021         ustr->us_buf = NULL;
 3022         return;
 3023 }
 3024 
 3025 void
 3026 RtlFreeAnsiString(astr)
 3027         ansi_string             *astr;
 3028 {
 3029         if (astr->as_buf == NULL)
 3030                 return;
 3031         ExFreePool(astr->as_buf);
 3032         astr->as_buf = NULL;
 3033         return;
 3034 }
 3035 
 3036 static int
 3037 atoi(str)
 3038         const char              *str;
 3039 {
 3040         return (int)strtol(str, (char **)NULL, 10);
 3041 }
 3042 
 3043 static long
 3044 atol(str)
 3045         const char              *str;
 3046 {
 3047         return strtol(str, (char **)NULL, 10);
 3048 }
 3049 
 3050 static int
 3051 rand(void)
 3052 {
 3053         struct timeval          tv;
 3054 
 3055         microtime(&tv);
 3056         srandom(tv.tv_usec);
 3057         return((int)random());
 3058 }
 3059 
 3060 static void
 3061 srand(seed)
 3062         unsigned int            seed;
 3063 {
 3064         srandom(seed);
 3065         return;
 3066 }
 3067 
 3068 static uint8_t
 3069 IoIsWdmVersionAvailable(major, minor)
 3070         uint8_t                 major;
 3071         uint8_t                 minor;
 3072 {
 3073         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
 3074                 return(TRUE);
 3075         return(FALSE);
 3076 }
 3077 
 3078 static ndis_status
 3079 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
 3080         device_object           *devobj;
 3081         uint32_t                regprop;
 3082         uint32_t                buflen;
 3083         void                    *prop;
 3084         uint32_t                *reslen;
 3085 {
 3086         driver_object           *drv;
 3087         uint16_t                **name;
 3088 
 3089         drv = devobj->do_drvobj;
 3090 
 3091         switch (regprop) {
 3092         case DEVPROP_DRIVER_KEYNAME:
 3093                 name = prop;
 3094                 *name = drv->dro_drivername.us_buf;
 3095                 *reslen = drv->dro_drivername.us_len;
 3096                 break;
 3097         default:
 3098                 return(STATUS_INVALID_PARAMETER_2);
 3099                 break;
 3100         }
 3101 
 3102         return(STATUS_SUCCESS);
 3103 }
 3104 
 3105 static void
 3106 KeInitializeMutex(kmutex, level)
 3107         kmutant                 *kmutex;
 3108         uint32_t                level;
 3109 {
 3110         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
 3111         kmutex->km_abandoned = FALSE;
 3112         kmutex->km_apcdisable = 1;
 3113         kmutex->km_header.dh_sigstate = 1;
 3114         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
 3115         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
 3116         kmutex->km_ownerthread = NULL;
 3117         return;
 3118 }
 3119 
 3120 static uint32_t
 3121 KeReleaseMutex(kmutex, kwait)
 3122         kmutant                 *kmutex;
 3123         uint8_t                 kwait;
 3124 {
 3125         uint32_t                prevstate;
 3126 
 3127         mtx_lock(&ntoskrnl_dispatchlock);
 3128         prevstate = kmutex->km_header.dh_sigstate;
 3129         if (kmutex->km_ownerthread != curthread) {
 3130                 mtx_unlock(&ntoskrnl_dispatchlock);
 3131                 return(STATUS_MUTANT_NOT_OWNED);
 3132         }
 3133 
 3134         kmutex->km_header.dh_sigstate++;
 3135         kmutex->km_abandoned = FALSE;
 3136 
 3137         if (kmutex->km_header.dh_sigstate == 1) {
 3138                 kmutex->km_ownerthread = NULL;
 3139                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
 3140         }
 3141 
 3142         mtx_unlock(&ntoskrnl_dispatchlock);
 3143 
 3144         return(prevstate);
 3145 }
 3146 
 3147 static uint32_t
 3148 KeReadStateMutex(kmutex)
 3149         kmutant                 *kmutex;
 3150 {
 3151         return(kmutex->km_header.dh_sigstate);
 3152 }
 3153 
 3154 void
 3155 KeInitializeEvent(kevent, type, state)
 3156         nt_kevent               *kevent;
 3157         uint32_t                type;
 3158         uint8_t                 state;
 3159 {
 3160         InitializeListHead((&kevent->k_header.dh_waitlisthead));
 3161         kevent->k_header.dh_sigstate = state;
 3162         if (type == EVENT_TYPE_NOTIFY)
 3163                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
 3164         else
 3165                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
 3166         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
 3167         return;
 3168 }
 3169 
 3170 uint32_t
 3171 KeResetEvent(kevent)
 3172         nt_kevent               *kevent;
 3173 {
 3174         uint32_t                prevstate;
 3175 
 3176         mtx_lock(&ntoskrnl_dispatchlock);
 3177         prevstate = kevent->k_header.dh_sigstate;
 3178         kevent->k_header.dh_sigstate = FALSE;
 3179         mtx_unlock(&ntoskrnl_dispatchlock);
 3180 
 3181         return(prevstate);
 3182 }
 3183 
 3184 uint32_t
 3185 KeSetEvent(kevent, increment, kwait)
 3186         nt_kevent               *kevent;
 3187         uint32_t                increment;
 3188         uint8_t                 kwait;
 3189 {
 3190         uint32_t                prevstate;
 3191         wait_block              *w;
 3192         nt_dispatch_header      *dh;
 3193         struct thread           *td;
 3194         wb_ext                  *we;
 3195 
 3196         mtx_lock(&ntoskrnl_dispatchlock);
 3197         prevstate = kevent->k_header.dh_sigstate;
 3198         dh = &kevent->k_header;
 3199 
 3200         if (IsListEmpty(&dh->dh_waitlisthead))
 3201                 /*
 3202                  * If there's nobody in the waitlist, just set
 3203                  * the state to signalled.
 3204                  */
 3205                 dh->dh_sigstate = 1;
 3206         else {
 3207                 /*
 3208                  * Get the first waiter. If this is a synchronization
 3209                  * event, just wake up that one thread (don't bother
 3210                  * setting the state to signalled since we're supposed
 3211                  * to automatically clear synchronization events anyway).
 3212                  *
 3213                  * If it's a notification event, or the the first
 3214                  * waiter is doing a WAITTYPE_ALL wait, go through
 3215                  * the full wait satisfaction process.
 3216                  */
 3217                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
 3218                     wait_block, wb_waitlist);
 3219                 we = w->wb_ext;
 3220                 td = we->we_td;
 3221                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
 3222                     w->wb_waittype == WAITTYPE_ALL) {
 3223                         if (prevstate == 0) {
 3224                                 dh->dh_sigstate = 1;
 3225                                 ntoskrnl_waittest(dh, increment);
 3226                         }
 3227                 } else {
 3228                         w->wb_awakened |= TRUE;
 3229                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
 3230                             (increment * 4));
 3231                 }
 3232         }
 3233 
 3234         mtx_unlock(&ntoskrnl_dispatchlock);
 3235 
 3236         return(prevstate);
 3237 }
 3238 
 3239 void
 3240 KeClearEvent(kevent)
 3241         nt_kevent               *kevent;
 3242 {
 3243         kevent->k_header.dh_sigstate = FALSE;
 3244         return;
 3245 }
 3246 
 3247 uint32_t
 3248 KeReadStateEvent(kevent)
 3249         nt_kevent               *kevent;
 3250 {
 3251         return(kevent->k_header.dh_sigstate);
 3252 }
 3253 
 3254 /*
 3255  * The object manager in Windows is responsible for managing
 3256  * references and access to various types of objects, including
 3257  * device_objects, events, threads, timers and so on. However,
 3258  * there's a difference in the way objects are handled in user
 3259  * mode versus kernel mode.
 3260  *
 3261  * In user mode (i.e. Win32 applications), all objects are
 3262  * managed by the object manager. For example, when you create
 3263  * a timer or event object, you actually end up with an 
 3264  * object_header (for the object manager's bookkeeping
 3265  * purposes) and an object body (which contains the actual object
 3266  * structure, e.g. ktimer, kevent, etc...). This allows Windows
 3267  * to manage resource quotas and to enforce access restrictions
 3268  * on basically every kind of system object handled by the kernel.
 3269  *
 3270  * However, in kernel mode, you only end up using the object
 3271  * manager some of the time. For example, in a driver, you create
 3272  * a timer object by simply allocating the memory for a ktimer
 3273  * structure and initializing it with KeInitializeTimer(). Hence,
 3274  * the timer has no object_header and no reference counting or
 3275  * security/resource checks are done on it. The assumption in
 3276  * this case is that if you're running in kernel mode, you know
 3277  * what you're doing, and you're already at an elevated privilege
 3278  * anyway.
 3279  *
 3280  * There are some exceptions to this. The two most important ones
 3281  * for our purposes are device_objects and threads. We need to use
 3282  * the object manager to do reference counting on device_objects,
 3283  * and for threads, you can only get a pointer to a thread's
 3284  * dispatch header by using ObReferenceObjectByHandle() on the
 3285  * handle returned by PsCreateSystemThread().
 3286  */
 3287 
 3288 static ndis_status
 3289 ObReferenceObjectByHandle(handle, reqaccess, otype,
 3290     accessmode, object, handleinfo)
 3291         ndis_handle             handle;
 3292         uint32_t                reqaccess;
 3293         void                    *otype;
 3294         uint8_t                 accessmode;
 3295         void                    **object;
 3296         void                    **handleinfo;
 3297 {
 3298         nt_objref               *nr;
 3299 
 3300         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
 3301         if (nr == NULL)
 3302                 return(NDIS_STATUS_FAILURE);
 3303 
 3304         InitializeListHead((&nr->no_dh.dh_waitlisthead));
 3305         nr->no_obj = handle;
 3306         nr->no_dh.dh_type = DISP_TYPE_THREAD;
 3307         nr->no_dh.dh_sigstate = 0;
 3308         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
 3309             sizeof(uint32_t));
 3310         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
 3311         *object = nr;
 3312 
 3313         return(NDIS_STATUS_SUCCESS);
 3314 }
 3315 
 3316 static void
 3317 ObfDereferenceObject(object)
 3318         void                    *object;
 3319 {
 3320         nt_objref               *nr;
 3321 
 3322         nr = object;
 3323         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
 3324         free(nr, M_DEVBUF);
 3325 
 3326         return;
 3327 }
 3328 
 3329 static uint32_t
 3330 ZwClose(handle)
 3331         ndis_handle             handle;
 3332 {
 3333         return(STATUS_SUCCESS);
 3334 }
 3335 
 3336 /*
 3337  * This is here just in case the thread returns without calling
 3338  * PsTerminateSystemThread().
 3339  */
 3340 static void
 3341 ntoskrnl_thrfunc(arg)
 3342         void                    *arg;
 3343 {
 3344         thread_context          *thrctx;
 3345         uint32_t (*tfunc)(void *);
 3346         void                    *tctx;
 3347         uint32_t                rval;
 3348 
 3349         thrctx = arg;
 3350         tfunc = thrctx->tc_thrfunc;
 3351         tctx = thrctx->tc_thrctx;
 3352         free(thrctx, M_TEMP);
 3353 
 3354         rval = MSCALL1(tfunc, tctx);
 3355 
 3356         PsTerminateSystemThread(rval);
 3357         return; /* notreached */
 3358 }
 3359 
 3360 static ndis_status
 3361 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
 3362         clientid, thrfunc, thrctx)
 3363         ndis_handle             *handle;
 3364         uint32_t                reqaccess;
 3365         void                    *objattrs;
 3366         ndis_handle             phandle;
 3367         void                    *clientid;
 3368         void                    *thrfunc;
 3369         void                    *thrctx;
 3370 {
 3371         int                     error;
 3372         char                    tname[128];
 3373         thread_context          *tc;
 3374         struct proc             *p;
 3375 
 3376         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
 3377         if (tc == NULL)
 3378                 return(NDIS_STATUS_FAILURE);
 3379 
 3380         tc->tc_thrctx = thrctx;
 3381         tc->tc_thrfunc = thrfunc;
 3382 
 3383         sprintf(tname, "windows kthread %d", ntoskrnl_kth);
 3384         error = kthread_create(ntoskrnl_thrfunc, tc, &p,
 3385             RFHIGHPID, NDIS_KSTACK_PAGES, tname);
 3386         *handle = p;
 3387 
 3388         ntoskrnl_kth++;
 3389 
 3390         return(error);
 3391 }
 3392 
 3393 /*
 3394  * In Windows, the exit of a thread is an event that you're allowed
 3395  * to wait on, assuming you've obtained a reference to the thread using
 3396  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
 3397  * simulate this behavior is to register each thread we create in a
 3398  * reference list, and if someone holds a reference to us, we poke
 3399  * them.
 3400  */
 3401 static ndis_status
 3402 PsTerminateSystemThread(status)
 3403         ndis_status             status;
 3404 {
 3405         struct nt_objref        *nr;
 3406 
 3407         mtx_lock(&ntoskrnl_dispatchlock);
 3408         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
 3409                 if (nr->no_obj != curthread->td_proc)
 3410                         continue;
 3411                 nr->no_dh.dh_sigstate = 1;
 3412                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
 3413                 break;
 3414         }
 3415         mtx_unlock(&ntoskrnl_dispatchlock);
 3416 
 3417         ntoskrnl_kth--;
 3418 
 3419 #if __FreeBSD_version < 502113
 3420         mtx_lock(&Giant);
 3421 #endif
 3422         kthread_exit(0);
 3423         return(0);      /* notreached */
 3424 }
 3425 
 3426 static uint32_t
 3427 DbgPrint(char *fmt, ...)
 3428 {
 3429         va_list                 ap;
 3430 
 3431         if (bootverbose) {
 3432                 va_start(ap, fmt);
 3433                 vprintf(fmt, ap);
 3434         }
 3435 
 3436         return(STATUS_SUCCESS);
 3437 }
 3438 
 3439 static void
 3440 DbgBreakPoint(void)
 3441 {
 3442 
 3443 #if __FreeBSD_version < 502113
 3444         Debugger("DbgBreakPoint(): breakpoint");
 3445 #else
 3446         kdb_enter("DbgBreakPoint(): breakpoint");
 3447 #endif
 3448 }
 3449 
 3450 static void
 3451 ntoskrnl_timercall(arg)
 3452         void                    *arg;
 3453 {
 3454         ktimer                  *timer;
 3455         struct timeval          tv;
 3456         kdpc                    *dpc;
 3457 
 3458         mtx_lock(&ntoskrnl_dispatchlock);
 3459 
 3460         timer = arg;
 3461 
 3462 #ifdef NTOSKRNL_DEBUG_TIMERS
 3463         ntoskrnl_timer_fires++;
 3464 #endif
 3465         ntoskrnl_remove_timer(timer);
 3466 
 3467         /*
 3468          * This should never happen, but complain
 3469          * if it does.
 3470          */
 3471 
 3472         if (timer->k_header.dh_inserted == FALSE) {
 3473                 mtx_unlock(&ntoskrnl_dispatchlock);
 3474                 printf("NTOS: timer %p fired even though "
 3475                     "it was canceled\n", timer);
 3476                 return;
 3477         }
 3478 
 3479         /* Mark the timer as no longer being on the timer queue. */
 3480 
 3481         timer->k_header.dh_inserted = FALSE;
 3482 
 3483         /* Now signal the object and satisfy any waits on it. */
 3484 
 3485         timer->k_header.dh_sigstate = 1;
 3486         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
 3487 
 3488         /*
 3489          * If this is a periodic timer, re-arm it
 3490          * so it will fire again. We do this before
 3491          * calling any deferred procedure calls because
 3492          * it's possible the DPC might cancel the timer,
 3493          * in which case it would be wrong for us to
 3494          * re-arm it again afterwards.
 3495          */
 3496 
 3497         if (timer->k_period) {
 3498                 tv.tv_sec = 0;
 3499                 tv.tv_usec = timer->k_period * 1000;
 3500                 timer->k_header.dh_inserted = TRUE;
 3501                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
 3502 #ifdef NTOSKRNL_DEBUG_TIMERS
 3503                 ntoskrnl_timer_reloads++;
 3504 #endif
 3505         }
 3506 
 3507         dpc = timer->k_dpc;
 3508 
 3509         mtx_unlock(&ntoskrnl_dispatchlock);
 3510 
 3511         /* If there's a DPC associated with the timer, queue it up. */
 3512 
 3513         if (dpc != NULL)
 3514                 KeInsertQueueDpc(dpc, NULL, NULL);
 3515 
 3516         return;
 3517 }
 3518 
 3519 #ifdef NTOSKRNL_DEBUG_TIMERS
 3520 static int
 3521 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
 3522 {
 3523         int                     ret;
 3524 
 3525         ret = 0;
 3526         ntoskrnl_show_timers();
 3527         return (sysctl_handle_int(oidp, &ret, 0, req));
 3528 }
 3529 
 3530 static void
 3531 ntoskrnl_show_timers()
 3532 {
 3533         int                     i = 0;
 3534         list_entry              *l;
 3535 
 3536         mtx_lock_spin(&ntoskrnl_calllock);
 3537         l = ntoskrnl_calllist.nle_flink;
 3538         while(l != &ntoskrnl_calllist) {
 3539                 i++;
 3540                 l = l->nle_flink;
 3541         }
 3542         mtx_unlock_spin(&ntoskrnl_calllock);
 3543 
 3544         printf("\n");
 3545         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
 3546         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
 3547         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
 3548         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
 3549         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
 3550         printf("\n");
 3551 
 3552         return;
 3553 }
 3554 #endif
 3555 
 3556 /*
 3557  * Must be called with dispatcher lock held.
 3558  */
 3559 
 3560 static void
 3561 ntoskrnl_insert_timer(timer, ticks)
 3562         ktimer                  *timer;
 3563         int                     ticks;
 3564 {
 3565         callout_entry           *e;
 3566         list_entry              *l;
 3567         struct callout          *c;
 3568 
 3569         /*
 3570          * Try and allocate a timer.
 3571          */
 3572         mtx_lock_spin(&ntoskrnl_calllock);
 3573         if (IsListEmpty(&ntoskrnl_calllist)) {
 3574                 mtx_unlock_spin(&ntoskrnl_calllock);
 3575 #ifdef NTOSKRNL_DEBUG_TIMERS
 3576                 ntoskrnl_show_timers();
 3577 #endif
 3578                 panic("out of timers!");
 3579         }
 3580         l = RemoveHeadList(&ntoskrnl_calllist);
 3581         mtx_unlock_spin(&ntoskrnl_calllock);
 3582 
 3583         e = CONTAINING_RECORD(l, callout_entry, ce_list);
 3584         c = &e->ce_callout;
 3585 
 3586         timer->k_callout = c;
 3587 
 3588         callout_init(c, CALLOUT_MPSAFE);
 3589         callout_reset(c, ticks, ntoskrnl_timercall, timer);
 3590 
 3591         return;
 3592 }
 3593 
 3594 static void
 3595 ntoskrnl_remove_timer(timer)
 3596         ktimer                  *timer;
 3597 {
 3598         callout_entry           *e;
 3599 
 3600         e = (callout_entry *)timer->k_callout;
 3601         callout_stop(timer->k_callout);
 3602 
 3603         mtx_lock_spin(&ntoskrnl_calllock);
 3604         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
 3605         mtx_unlock_spin(&ntoskrnl_calllock);
 3606 
 3607         return;
 3608 }
 3609 
 3610 void
 3611 KeInitializeTimer(timer)
 3612         ktimer                  *timer;
 3613 {
 3614         if (timer == NULL)
 3615                 return;
 3616 
 3617         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
 3618 
 3619         return;
 3620 }
 3621 
 3622 void
 3623 KeInitializeTimerEx(timer, type)
 3624         ktimer                  *timer;
 3625         uint32_t                type;
 3626 {
 3627         if (timer == NULL)
 3628                 return;
 3629 
 3630         bzero((char *)timer, sizeof(ktimer));
 3631         InitializeListHead((&timer->k_header.dh_waitlisthead));
 3632         timer->k_header.dh_sigstate = FALSE;
 3633         timer->k_header.dh_inserted = FALSE;
 3634         if (type == EVENT_TYPE_NOTIFY)
 3635                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
 3636         else
 3637                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
 3638         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
 3639 
 3640         return;
 3641 }
 3642 
 3643 /*
 3644  * DPC subsystem. A Windows Defered Procedure Call has the following
 3645  * properties:
 3646  * - It runs at DISPATCH_LEVEL.
 3647  * - It can have one of 3 importance values that control when it
 3648  *   runs relative to other DPCs in the queue.
 3649  * - On SMP systems, it can be set to run on a specific processor.
 3650  * In order to satisfy the last property, we create a DPC thread for
 3651  * each CPU in the system and bind it to that CPU. Each thread
 3652  * maintains three queues with different importance levels, which
 3653  * will be processed in order from lowest to highest.
 3654  *
 3655  * In Windows, interrupt handlers run as DPCs. (Not to be confused
 3656  * with ISRs, which run in interrupt context and can preempt DPCs.)
 3657  * ISRs are given the highest importance so that they'll take
 3658  * precedence over timers and other things.
 3659  */
 3660 
 3661 static void
 3662 ntoskrnl_dpc_thread(arg)
 3663         void                    *arg;
 3664 {
 3665         kdpc_queue              *kq;
 3666         kdpc                    *d;
 3667         list_entry              *l;
 3668         uint8_t                 irql;
 3669 
 3670         kq = arg;
 3671 
 3672         InitializeListHead(&kq->kq_disp);
 3673         kq->kq_td = curthread;
 3674         kq->kq_exit = 0;
 3675         kq->kq_running = FALSE;
 3676         KeInitializeSpinLock(&kq->kq_lock);
 3677         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 3678         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
 3679 
 3680         /*
 3681          * Elevate our priority. DPCs are used to run interrupt
 3682          * handlers, and they should trigger as soon as possible
 3683          * once scheduled by an ISR.
 3684          */
 3685 
 3686         mtx_lock_spin(&sched_lock);
 3687 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3688 #if __FreeBSD_version >= 502102
 3689         sched_bind(curthread, kq->kq_cpu);
 3690 #endif
 3691 #endif
 3692         sched_prio(curthread, PRI_MIN_KERN);
 3693 #if __FreeBSD_version < 600000
 3694         curthread->td_base_pri = PRI_MIN_KERN;
 3695 #endif
 3696         mtx_unlock_spin(&sched_lock);
 3697 
 3698         while (1) {
 3699                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 3700 
 3701                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 3702 
 3703                 if (kq->kq_exit) {
 3704                         KeReleaseSpinLock(&kq->kq_lock, irql);
 3705                         break;
 3706                 }
 3707 
 3708                 kq->kq_running = TRUE;
 3709 
 3710                 while (!IsListEmpty(&kq->kq_disp)) {
 3711                         l = RemoveHeadList((&kq->kq_disp));
 3712                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3713                         InitializeListHead((&d->k_dpclistentry));
 3714                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 3715                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
 3716                             d->k_sysarg1, d->k_sysarg2);
 3717                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3718                 }
 3719 
 3720                 kq->kq_running = FALSE;
 3721 
 3722                 KeReleaseSpinLock(&kq->kq_lock, irql);
 3723 
 3724                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
 3725         }
 3726 
 3727 #if __FreeBSD_version < 502113
 3728         mtx_lock(&Giant);
 3729 #endif
 3730         kthread_exit(0);
 3731         return; /* notreached */
 3732 }
 3733 
 3734 static void
 3735 ntoskrnl_destroy_dpc_threads(void)
 3736 {
 3737         kdpc_queue              *kq;
 3738         kdpc                    dpc;
 3739         int                     i;
 3740 
 3741         kq = kq_queues;
 3742 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3743         for (i = 0; i < mp_ncpus; i++) {
 3744 #else
 3745         for (i = 0; i < 1; i++) {
 3746 #endif
 3747                 kq += i;
 3748 
 3749                 kq->kq_exit = 1;
 3750                 KeInitializeDpc(&dpc, NULL, NULL);
 3751                 KeSetTargetProcessorDpc(&dpc, i);
 3752                 KeInsertQueueDpc(&dpc, NULL, NULL);
 3753                 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", 0);
 3754         }
 3755 
 3756         return;
 3757 }
 3758 
 3759 static uint8_t
 3760 ntoskrnl_insert_dpc(head, dpc)
 3761         list_entry              *head;
 3762         kdpc                    *dpc;
 3763 {
 3764         list_entry              *l;
 3765         kdpc                    *d;
 3766 
 3767         l = head->nle_flink;
 3768         while (l != head) {
 3769                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3770                 if (d == dpc)
 3771                         return(FALSE);
 3772                 l = l->nle_flink;
 3773         }
 3774 
 3775         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
 3776                 InsertTailList((head), (&dpc->k_dpclistentry));
 3777         else
 3778                 InsertHeadList((head), (&dpc->k_dpclistentry));
 3779 
 3780         return (TRUE);
 3781 }
 3782 
 3783 void
 3784 KeInitializeDpc(dpc, dpcfunc, dpcctx)
 3785         kdpc                    *dpc;
 3786         void                    *dpcfunc;
 3787         void                    *dpcctx;
 3788 {
 3789 
 3790         if (dpc == NULL)
 3791                 return;
 3792 
 3793         dpc->k_deferedfunc = dpcfunc;
 3794         dpc->k_deferredctx = dpcctx;
 3795         dpc->k_num = KDPC_CPU_DEFAULT;
 3796         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
 3797         InitializeListHead((&dpc->k_dpclistentry));
 3798 
 3799         return;
 3800 }
 3801 
 3802 uint8_t
 3803 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
 3804         kdpc                    *dpc;
 3805         void                    *sysarg1;
 3806         void                    *sysarg2;
 3807 {
 3808         kdpc_queue              *kq;
 3809         uint8_t                 r;
 3810         uint8_t                 irql;
 3811 
 3812         if (dpc == NULL)
 3813                 return(FALSE);
 3814 
 3815         kq = kq_queues;
 3816 
 3817 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3818         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 3819 
 3820         /*
 3821          * By default, the DPC is queued to run on the same CPU
 3822          * that scheduled it.
 3823          */
 3824 
 3825         if (dpc->k_num == KDPC_CPU_DEFAULT)
 3826                 kq += curthread->td_oncpu;
 3827         else
 3828                 kq += dpc->k_num;
 3829         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3830 #else
 3831         KeAcquireSpinLock(&kq->kq_lock, &irql);
 3832 #endif
 3833 
 3834         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
 3835         if (r == TRUE) {
 3836                 dpc->k_sysarg1 = sysarg1;
 3837                 dpc->k_sysarg2 = sysarg2;
 3838         }
 3839         KeReleaseSpinLock(&kq->kq_lock, irql);
 3840 
 3841         if (r == FALSE)
 3842                 return(r);
 3843 
 3844         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 3845 
 3846         return(r);
 3847 }
 3848 
 3849 uint8_t
 3850 KeRemoveQueueDpc(dpc)
 3851         kdpc                    *dpc;
 3852 {
 3853         kdpc_queue              *kq;
 3854         uint8_t                 irql;
 3855 
 3856         if (dpc == NULL)
 3857                 return(FALSE);
 3858 
 3859 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3860         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 3861 
 3862         kq = kq_queues + dpc->k_num;
 3863 
 3864         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3865 #else
 3866         kq = kq_queues;
 3867         KeAcquireSpinLock(&kq->kq_lock, &irql);
 3868 #endif
 3869 
 3870         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
 3871                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 3872                 KeLowerIrql(irql);
 3873                 return(FALSE);
 3874         }
 3875 
 3876         RemoveEntryList((&dpc->k_dpclistentry));
 3877         InitializeListHead((&dpc->k_dpclistentry));
 3878 
 3879         KeReleaseSpinLock(&kq->kq_lock, irql);
 3880 
 3881         return(TRUE);
 3882 }
 3883 
 3884 void
 3885 KeSetImportanceDpc(dpc, imp)
 3886         kdpc                    *dpc;
 3887         uint32_t                imp;
 3888 {
 3889         if (imp != KDPC_IMPORTANCE_LOW &&
 3890             imp != KDPC_IMPORTANCE_MEDIUM &&
 3891             imp != KDPC_IMPORTANCE_HIGH)
 3892                 return;
 3893 
 3894         dpc->k_importance = (uint8_t)imp;
 3895         return;
 3896 }
 3897 
 3898 void
 3899 KeSetTargetProcessorDpc(dpc, cpu)
 3900         kdpc                    *dpc;
 3901         uint8_t                 cpu;
 3902 {
 3903         if (cpu > mp_ncpus)
 3904                 return;
 3905 
 3906         dpc->k_num = cpu;
 3907         return;
 3908 }
 3909 
 3910 void
 3911 KeFlushQueuedDpcs(void)
 3912 {
 3913         kdpc_queue              *kq;
 3914         int                     i;
 3915 
 3916         /*
 3917          * Poke each DPC queue and wait
 3918          * for them to drain.
 3919          */
 3920 
 3921 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3922         for (i = 0; i < mp_ncpus; i++) {
 3923 #else
 3924         for (i = 0; i < 1; i++) {
 3925 #endif
 3926                 kq = kq_queues + i;
 3927                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 3928                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
 3929         }
 3930 
 3931         return;
 3932 }
 3933 
 3934 uint32_t
 3935 KeGetCurrentProcessorNumber(void)
 3936 {
 3937         return((uint32_t)curthread->td_oncpu);
 3938 }
 3939 
 3940 uint8_t
 3941 KeSetTimerEx(timer, duetime, period, dpc)
 3942         ktimer                  *timer;
 3943         int64_t                 duetime;
 3944         uint32_t                period;
 3945         kdpc                    *dpc;
 3946 {
 3947         struct timeval          tv;
 3948         uint64_t                curtime;
 3949         uint8_t                 pending;
 3950 
 3951         if (timer == NULL)
 3952                 return(FALSE);
 3953 
 3954         mtx_lock(&ntoskrnl_dispatchlock);
 3955 
 3956         if (timer->k_header.dh_inserted == TRUE) {
 3957                 ntoskrnl_remove_timer(timer);
 3958 #ifdef NTOSKRNL_DEBUG_TIMERS
 3959                 ntoskrnl_timer_cancels++;
 3960 #endif
 3961                 timer->k_header.dh_inserted = FALSE;
 3962                 pending = TRUE;
 3963         } else
 3964                 pending = FALSE;
 3965 
 3966         timer->k_duetime = duetime;
 3967         timer->k_period = period;
 3968         timer->k_header.dh_sigstate = FALSE;
 3969         timer->k_dpc = dpc;
 3970 
 3971         if (duetime < 0) {
 3972                 tv.tv_sec = - (duetime) / 10000000;
 3973                 tv.tv_usec = (- (duetime) / 10) -
 3974                     (tv.tv_sec * 1000000);
 3975         } else {
 3976                 ntoskrnl_time(&curtime);
 3977                 if (duetime < curtime)
 3978                         tv.tv_sec = tv.tv_usec = 0;
 3979                 else {
 3980                         tv.tv_sec = ((duetime) - curtime) / 10000000;
 3981                         tv.tv_usec = ((duetime) - curtime) / 10 -
 3982                             (tv.tv_sec * 1000000);
 3983                 }
 3984         }
 3985 
 3986         timer->k_header.dh_inserted = TRUE;
 3987         ntoskrnl_insert_timer(timer, tvtohz(&tv));
 3988 #ifdef NTOSKRNL_DEBUG_TIMERS
 3989         ntoskrnl_timer_sets++;
 3990 #endif
 3991 
 3992         mtx_unlock(&ntoskrnl_dispatchlock);
 3993 
 3994         return(pending);
 3995 }
 3996 
 3997 uint8_t
 3998 KeSetTimer(timer, duetime, dpc)
 3999         ktimer                  *timer;
 4000         int64_t                 duetime;
 4001         kdpc                    *dpc;
 4002 {
 4003         return (KeSetTimerEx(timer, duetime, 0, dpc));
 4004 }
 4005 
 4006 /*
 4007  * The Windows DDK documentation seems to say that cancelling
 4008  * a timer that has a DPC will result in the DPC also being
 4009  * cancelled, but this isn't really the case.
 4010  */
 4011 
 4012 uint8_t
 4013 KeCancelTimer(timer)
 4014         ktimer                  *timer;
 4015 {
 4016         uint8_t                 pending;
 4017 
 4018         if (timer == NULL)
 4019                 return(FALSE);
 4020 
 4021         mtx_lock(&ntoskrnl_dispatchlock);
 4022 
 4023         pending = timer->k_header.dh_inserted;
 4024 
 4025         if (timer->k_header.dh_inserted == TRUE) {
 4026                 timer->k_header.dh_inserted = FALSE;
 4027                 ntoskrnl_remove_timer(timer);
 4028 #ifdef NTOSKRNL_DEBUG_TIMERS
 4029                 ntoskrnl_timer_cancels++;
 4030 #endif
 4031         }
 4032 
 4033         mtx_unlock(&ntoskrnl_dispatchlock);
 4034 
 4035         return(pending);
 4036 }
 4037 
 4038 uint8_t
 4039 KeReadStateTimer(timer)
 4040         ktimer                  *timer;
 4041 {
 4042         return(timer->k_header.dh_sigstate);
 4043 }
 4044 
 4045 static void
 4046 dummy()
 4047 {
 4048         printf ("ntoskrnl dummy called...\n");
 4049         return;
 4050 }
 4051 
 4052 
 4053 image_patch_table ntoskrnl_functbl[] = {
 4054         IMPORT_SFUNC(RtlCompareMemory, 3),
 4055         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
 4056         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
 4057         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
 4058         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
 4059         IMPORT_SFUNC(RtlInitAnsiString, 2),
 4060         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
 4061         IMPORT_SFUNC(RtlInitUnicodeString, 2),
 4062         IMPORT_SFUNC(RtlFreeAnsiString, 1),
 4063         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
 4064         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
 4065         IMPORT_CFUNC(sprintf, 0),
 4066         IMPORT_CFUNC(vsprintf, 0),
 4067         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
 4068         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
 4069         IMPORT_CFUNC(DbgPrint, 0),
 4070         IMPORT_SFUNC(DbgBreakPoint, 0),
 4071         IMPORT_CFUNC(strncmp, 0),
 4072         IMPORT_CFUNC(strcmp, 0),
 4073         IMPORT_CFUNC(strncpy, 0),
 4074         IMPORT_CFUNC(strcpy, 0),
 4075         IMPORT_CFUNC(strlen, 0),
 4076         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
 4077         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
 4078         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
 4079         IMPORT_CFUNC_MAP(strchr, index, 0),
 4080         IMPORT_CFUNC(memcpy, 0),
 4081         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memset, 0),
 4082         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
 4083         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
 4084         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
 4085         IMPORT_FFUNC(IofCallDriver, 2),
 4086         IMPORT_FFUNC(IofCompleteRequest, 2),
 4087         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
 4088         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
 4089         IMPORT_SFUNC(IoCancelIrp, 1),
 4090         IMPORT_SFUNC(IoConnectInterrupt, 11),
 4091         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
 4092         IMPORT_SFUNC(IoCreateDevice, 7),
 4093         IMPORT_SFUNC(IoDeleteDevice, 1),
 4094         IMPORT_SFUNC(IoGetAttachedDevice, 1),
 4095         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
 4096         IMPORT_SFUNC(IoDetachDevice, 1),
 4097         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
 4098         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
 4099         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
 4100         IMPORT_SFUNC(IoAllocateIrp, 2),
 4101         IMPORT_SFUNC(IoReuseIrp, 2),
 4102         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
 4103         IMPORT_SFUNC(IoFreeIrp, 1),
 4104         IMPORT_SFUNC(IoInitializeIrp, 3),
 4105         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
 4106         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
 4107         IMPORT_SFUNC(KeSynchronizeExecution, 3),
 4108         IMPORT_SFUNC(KeWaitForSingleObject, 5),
 4109         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
 4110         IMPORT_SFUNC(_allmul, 4),
 4111         IMPORT_SFUNC(_alldiv, 4),
 4112         IMPORT_SFUNC(_allrem, 4),
 4113         IMPORT_RFUNC(_allshr, 0),
 4114         IMPORT_RFUNC(_allshl, 0),
 4115         IMPORT_SFUNC(_aullmul, 4),
 4116         IMPORT_SFUNC(_aulldiv, 4),
 4117         IMPORT_SFUNC(_aullrem, 4),
 4118         IMPORT_RFUNC(_aullshr, 0),
 4119         IMPORT_RFUNC(_aullshl, 0),
 4120         IMPORT_CFUNC(atoi, 0),
 4121         IMPORT_CFUNC(atol, 0),
 4122         IMPORT_CFUNC(rand, 0),
 4123         IMPORT_CFUNC(srand, 0),
 4124         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
 4125         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
 4126         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
 4127         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
 4128         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
 4129         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
 4130         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
 4131         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
 4132         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
 4133         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
 4134         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
 4135         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
 4136         IMPORT_SFUNC(ExQueryDepthSList, 1),
 4137         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
 4138                 InterlockedPopEntrySList, 1),
 4139         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
 4140                 InterlockedPushEntrySList, 2),
 4141         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
 4142         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
 4143         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
 4144         IMPORT_SFUNC(ExFreePool, 1),
 4145 #ifdef __i386__
 4146         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
 4147         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
 4148         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
 4149 #else
 4150         /*
 4151          * For AMD64, we can get away with just mapping
 4152          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
 4153          * because the calling conventions end up being the same.
 4154          * On i386, we have to be careful because KfAcquireSpinLock()
 4155          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
 4156          */
 4157         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
 4158         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
 4159         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
 4160 #endif
 4161         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
 4162         IMPORT_FFUNC(InterlockedIncrement, 1),
 4163         IMPORT_FFUNC(InterlockedDecrement, 1),
 4164         IMPORT_FFUNC(InterlockedExchange, 2),
 4165         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
 4166         IMPORT_SFUNC(IoAllocateMdl, 5),
 4167         IMPORT_SFUNC(IoFreeMdl, 1),
 4168         IMPORT_SFUNC(MmSizeOfMdl, 1),
 4169         IMPORT_SFUNC(MmMapLockedPages, 2),
 4170         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
 4171         IMPORT_SFUNC(MmUnmapLockedPages, 2),
 4172         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
 4173         IMPORT_SFUNC(MmIsAddressValid, 1),
 4174         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
 4175         IMPORT_SFUNC(MmUnmapIoSpace, 2),
 4176         IMPORT_SFUNC(KeInitializeSpinLock, 1),
 4177         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
 4178         IMPORT_SFUNC(IoGetDeviceProperty, 5),
 4179         IMPORT_SFUNC(IoAllocateWorkItem, 1),
 4180         IMPORT_SFUNC(IoFreeWorkItem, 1),
 4181         IMPORT_SFUNC(IoQueueWorkItem, 4),
 4182         IMPORT_SFUNC(ExQueueWorkItem, 2),
 4183         IMPORT_SFUNC(ntoskrnl_workitem, 2),
 4184         IMPORT_SFUNC(KeInitializeMutex, 2),
 4185         IMPORT_SFUNC(KeReleaseMutex, 2),
 4186         IMPORT_SFUNC(KeReadStateMutex, 1),
 4187         IMPORT_SFUNC(KeInitializeEvent, 3),
 4188         IMPORT_SFUNC(KeSetEvent, 3),
 4189         IMPORT_SFUNC(KeResetEvent, 1),
 4190         IMPORT_SFUNC(KeClearEvent, 1),
 4191         IMPORT_SFUNC(KeReadStateEvent, 1),
 4192         IMPORT_SFUNC(KeInitializeTimer, 1),
 4193         IMPORT_SFUNC(KeInitializeTimerEx, 2),
 4194         IMPORT_SFUNC(KeSetTimer, 3),
 4195         IMPORT_SFUNC(KeSetTimerEx, 4),
 4196         IMPORT_SFUNC(KeCancelTimer, 1),
 4197         IMPORT_SFUNC(KeReadStateTimer, 1),
 4198         IMPORT_SFUNC(KeInitializeDpc, 3),
 4199         IMPORT_SFUNC(KeInsertQueueDpc, 3),
 4200         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
 4201         IMPORT_SFUNC(KeSetImportanceDpc, 2),
 4202         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
 4203         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
 4204         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
 4205         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
 4206         IMPORT_FFUNC(ObfDereferenceObject, 1),
 4207         IMPORT_SFUNC(ZwClose, 1),
 4208         IMPORT_SFUNC(PsCreateSystemThread, 7),
 4209         IMPORT_SFUNC(PsTerminateSystemThread, 1),
 4210 
 4211         /*
 4212          * This last entry is a catch-all for any function we haven't
 4213          * implemented yet. The PE import list patching routine will
 4214          * use it for any function that doesn't have an explicit match
 4215          * in this table.
 4216          */
 4217 
 4218         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
 4219 
 4220         /* End of list. */
 4221 
 4222         { NULL, NULL, NULL }
 4223 };

Cache object: 4844950a5121c006d13b2a0ec35a6bde


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.