The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/ndis/subr_ntoskrnl.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003
    3  *      Bill Paul <wpaul@windriver.com>.  All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 3. All advertising materials mentioning features or use of this software
   14  *    must display the following acknowledgement:
   15  *      This product includes software developed by Bill Paul.
   16  * 4. Neither the name of the author nor the names of any co-contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
   30  * THE POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/ctype.h>
   37 #include <sys/unistd.h>
   38 #include <sys/param.h>
   39 #include <sys/types.h>
   40 #include <sys/errno.h>
   41 #include <sys/systm.h>
   42 #include <sys/malloc.h>
   43 #include <sys/lock.h>
   44 #include <sys/mutex.h>
   45 
   46 #include <sys/callout.h>
   47 #if __FreeBSD_version > 502113
   48 #include <sys/kdb.h>
   49 #endif
   50 #include <sys/kernel.h>
   51 #include <sys/proc.h>
   52 #include <sys/condvar.h>
   53 #include <sys/kthread.h>
   54 #include <sys/module.h>
   55 #include <sys/smp.h>
   56 #include <sys/sched.h>
   57 #include <sys/sysctl.h>
   58 
   59 #include <machine/atomic.h>
   60 #include <machine/clock.h>
   61 #include <machine/bus.h>
   62 #include <machine/stdarg.h>
   63 #include <machine/resource.h>
   64 
   65 #include <sys/bus.h>
   66 #include <sys/rman.h>
   67 
   68 #include <vm/vm.h>
   69 #include <vm/vm_param.h>
   70 #include <vm/pmap.h>
   71 #include <vm/uma.h>
   72 #include <vm/vm_kern.h>
   73 #include <vm/vm_map.h>
   74 
   75 #include <compat/ndis/pe_var.h>
   76 #include <compat/ndis/cfg_var.h>
   77 #include <compat/ndis/resource_var.h>
   78 #include <compat/ndis/ntoskrnl_var.h>
   79 #include <compat/ndis/hal_var.h>
   80 #include <compat/ndis/ndis_var.h>
   81 
   82 #ifdef NTOSKRNL_DEBUG_TIMERS
   83 static int sysctl_show_timers(SYSCTL_HANDLER_ARGS);
   84 
   85 SYSCTL_PROC(_debug, OID_AUTO, ntoskrnl_timers, CTLFLAG_RW, 0, 0,
   86         sysctl_show_timers, "I", "Show ntoskrnl timer stats");
   87 #endif
   88 
   89 struct kdpc_queue {
   90         list_entry              kq_disp;
   91         struct thread           *kq_td;
   92         int                     kq_cpu;
   93         int                     kq_exit;
   94         int                     kq_running;
   95         kspin_lock              kq_lock;
   96         nt_kevent               kq_proc;
   97         nt_kevent               kq_done;
   98 };
   99 
  100 typedef struct kdpc_queue kdpc_queue;
  101 
  102 struct wb_ext {
  103         struct cv               we_cv;
  104         struct thread           *we_td;
  105 };
  106 
  107 typedef struct wb_ext wb_ext;
  108 
  109 #define NTOSKRNL_TIMEOUTS       256
  110 #ifdef NTOSKRNL_DEBUG_TIMERS
  111 static uint64_t ntoskrnl_timer_fires;
  112 static uint64_t ntoskrnl_timer_sets;
  113 static uint64_t ntoskrnl_timer_reloads;
  114 static uint64_t ntoskrnl_timer_cancels;
  115 #endif
  116 
  117 struct callout_entry {
  118         struct callout          ce_callout;
  119         list_entry              ce_list;
  120 };
  121 
  122 typedef struct callout_entry callout_entry;
  123 
  124 static struct list_entry ntoskrnl_calllist;
  125 static struct mtx ntoskrnl_calllock;
  126 
  127 static struct list_entry ntoskrnl_intlist;
  128 static kspin_lock ntoskrnl_intlock;
  129 
  130 static uint8_t RtlEqualUnicodeString(unicode_string *,
  131         unicode_string *, uint8_t);
  132 static void RtlCopyUnicodeString(unicode_string *,
  133         unicode_string *);
  134 static irp *IoBuildSynchronousFsdRequest(uint32_t, device_object *,
  135          void *, uint32_t, uint64_t *, nt_kevent *, io_status_block *);
  136 static irp *IoBuildAsynchronousFsdRequest(uint32_t,
  137         device_object *, void *, uint32_t, uint64_t *, io_status_block *);
  138 static irp *IoBuildDeviceIoControlRequest(uint32_t,
  139         device_object *, void *, uint32_t, void *, uint32_t,
  140         uint8_t, nt_kevent *, io_status_block *);
  141 static irp *IoAllocateIrp(uint8_t, uint8_t);
  142 static void IoReuseIrp(irp *, uint32_t);
  143 static void IoFreeIrp(irp *);
  144 static void IoInitializeIrp(irp *, uint16_t, uint8_t);
  145 static irp *IoMakeAssociatedIrp(irp *, uint8_t);
  146 static uint32_t KeWaitForMultipleObjects(uint32_t,
  147         nt_dispatch_header **, uint32_t, uint32_t, uint32_t, uint8_t,
  148         int64_t *, wait_block *);
  149 static void ntoskrnl_waittest(nt_dispatch_header *, uint32_t);
  150 static void ntoskrnl_satisfy_wait(nt_dispatch_header *, struct thread *);
  151 static void ntoskrnl_satisfy_multiple_waits(wait_block *);
  152 static int ntoskrnl_is_signalled(nt_dispatch_header *, struct thread *);
  153 static void ntoskrnl_insert_timer(ktimer *, int);
  154 static void ntoskrnl_remove_timer(ktimer *);
  155 #ifdef NTOSKRNL_DEBUG_TIMERS
  156 static void ntoskrnl_show_timers(void);
  157 #endif
  158 static void ntoskrnl_timercall(void *);
  159 static void ntoskrnl_dpc_thread(void *);
  160 static void ntoskrnl_destroy_dpc_threads(void);
  161 static void ntoskrnl_destroy_workitem_threads(void);
  162 static void ntoskrnl_workitem_thread(void *);
  163 static void ntoskrnl_workitem(device_object *, void *);
  164 static void ntoskrnl_unicode_to_ascii(uint16_t *, char *, int);
  165 static void ntoskrnl_ascii_to_unicode(char *, uint16_t *, int);
  166 static uint8_t ntoskrnl_insert_dpc(list_entry *, kdpc *);
  167 static void WRITE_REGISTER_USHORT(uint16_t *, uint16_t);
  168 static uint16_t READ_REGISTER_USHORT(uint16_t *);
  169 static void WRITE_REGISTER_ULONG(uint32_t *, uint32_t);
  170 static uint32_t READ_REGISTER_ULONG(uint32_t *);
  171 static void WRITE_REGISTER_UCHAR(uint8_t *, uint8_t);
  172 static uint8_t READ_REGISTER_UCHAR(uint8_t *);
  173 static int64_t _allmul(int64_t, int64_t);
  174 static int64_t _alldiv(int64_t, int64_t);
  175 static int64_t _allrem(int64_t, int64_t);
  176 static int64_t _allshr(int64_t, uint8_t);
  177 static int64_t _allshl(int64_t, uint8_t);
  178 static uint64_t _aullmul(uint64_t, uint64_t);
  179 static uint64_t _aulldiv(uint64_t, uint64_t);
  180 static uint64_t _aullrem(uint64_t, uint64_t);
  181 static uint64_t _aullshr(uint64_t, uint8_t);
  182 static uint64_t _aullshl(uint64_t, uint8_t);
  183 static slist_entry *ntoskrnl_pushsl(slist_header *, slist_entry *);
  184 static slist_entry *ntoskrnl_popsl(slist_header *);
  185 static void ExInitializePagedLookasideList(paged_lookaside_list *,
  186         lookaside_alloc_func *, lookaside_free_func *,
  187         uint32_t, size_t, uint32_t, uint16_t);
  188 static void ExDeletePagedLookasideList(paged_lookaside_list *);
  189 static void ExInitializeNPagedLookasideList(npaged_lookaside_list *,
  190         lookaside_alloc_func *, lookaside_free_func *,
  191         uint32_t, size_t, uint32_t, uint16_t);
  192 static void ExDeleteNPagedLookasideList(npaged_lookaside_list *);
  193 static slist_entry
  194         *ExInterlockedPushEntrySList(slist_header *,
  195         slist_entry *, kspin_lock *);
  196 static slist_entry
  197         *ExInterlockedPopEntrySList(slist_header *, kspin_lock *);
  198 static uint32_t InterlockedIncrement(volatile uint32_t *);
  199 static uint32_t InterlockedDecrement(volatile uint32_t *);
  200 static void ExInterlockedAddLargeStatistic(uint64_t *, uint32_t);
  201 static uint32_t MmSizeOfMdl(void *, size_t);
  202 static void *MmMapLockedPages(mdl *, uint8_t);
  203 static void *MmMapLockedPagesSpecifyCache(mdl *,
  204         uint8_t, uint32_t, void *, uint32_t, uint32_t);
  205 static void MmUnmapLockedPages(void *, mdl *);
  206 static uint8_t MmIsAddressValid(void *);
  207 static device_t ntoskrnl_finddev(device_t, uint64_t, struct resource **);
  208 static void RtlZeroMemory(void *, size_t);
  209 static void RtlCopyMemory(void *, const void *, size_t);
  210 static size_t RtlCompareMemory(const void *, const void *, size_t);
  211 static ndis_status RtlUnicodeStringToInteger(unicode_string *,
  212         uint32_t, uint32_t *);
  213 static int atoi (const char *);
  214 static long atol (const char *);
  215 static int rand(void);
  216 static void srand(unsigned int);
  217 static void ntoskrnl_time(uint64_t *);
  218 static uint8_t IoIsWdmVersionAvailable(uint8_t, uint8_t);
  219 static void ntoskrnl_thrfunc(void *);
  220 static ndis_status PsCreateSystemThread(ndis_handle *,
  221         uint32_t, void *, ndis_handle, void *, void *, void *);
  222 static ndis_status PsTerminateSystemThread(ndis_status);
  223 static ndis_status IoGetDeviceProperty(device_object *, uint32_t,
  224         uint32_t, void *, uint32_t *);
  225 static void KeInitializeMutex(kmutant *, uint32_t);
  226 static uint32_t KeReleaseMutex(kmutant *, uint8_t);
  227 static uint32_t KeReadStateMutex(kmutant *);
  228 static ndis_status ObReferenceObjectByHandle(ndis_handle,
  229         uint32_t, void *, uint8_t, void **, void **);
  230 static void ObfDereferenceObject(void *);
  231 static uint32_t ZwClose(ndis_handle);
  232 static uint32_t WmiQueryTraceInformation(uint32_t, void *, uint32_t,
  233         uint32_t, void *);
  234 static uint32_t WmiTraceMessage(uint64_t, uint32_t, void *, uint16_t, ...);
  235 static uint32_t IoWMIRegistrationControl(device_object *, uint32_t);
  236 static void *ntoskrnl_memset(void *, int, size_t);
  237 static void *ntoskrnl_memmove(void *, void *, size_t);
  238 static char *ntoskrnl_strstr(char *, char *);
  239 static int ntoskrnl_toupper(int);
  240 static int ntoskrnl_tolower(int);
  241 static funcptr ntoskrnl_findwrap(funcptr);
  242 static uint32_t DbgPrint(char *, ...);
  243 static void DbgBreakPoint(void);
  244 static void dummy(void);
  245 
  246 static struct mtx ntoskrnl_dispatchlock;
  247 static struct mtx ntoskrnl_interlock;
  248 static kspin_lock ntoskrnl_cancellock;
  249 static int ntoskrnl_kth = 0;
  250 static struct nt_objref_head ntoskrnl_reflist;
  251 static uma_zone_t mdl_zone;
  252 static uma_zone_t iw_zone;
  253 static struct kdpc_queue *kq_queues;
  254 static struct kdpc_queue *wq_queues;
  255 static int wq_idx = 0;
  256 
  257 int
  258 ntoskrnl_libinit()
  259 {
  260         image_patch_table       *patch;
  261         int                     error;
  262         struct proc             *p;
  263         kdpc_queue              *kq;
  264         callout_entry           *e;
  265         int                     i;
  266         char                    name[64];
  267 
  268         mtx_init(&ntoskrnl_dispatchlock,
  269             "ntoskrnl dispatch lock", MTX_NDIS_LOCK, MTX_DEF|MTX_RECURSE);
  270         mtx_init(&ntoskrnl_interlock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  271         KeInitializeSpinLock(&ntoskrnl_cancellock);
  272         KeInitializeSpinLock(&ntoskrnl_intlock);
  273         TAILQ_INIT(&ntoskrnl_reflist);
  274 
  275         InitializeListHead(&ntoskrnl_calllist);
  276         InitializeListHead(&ntoskrnl_intlist);
  277         mtx_init(&ntoskrnl_calllock, MTX_NTOSKRNL_SPIN_LOCK, NULL, MTX_SPIN);
  278 
  279         kq_queues = ExAllocatePoolWithTag(NonPagedPool,
  280 #ifdef NTOSKRNL_MULTIPLE_DPCS
  281             sizeof(kdpc_queue) * mp_ncpus, 0);
  282 #else
  283             sizeof(kdpc_queue), 0);
  284 #endif
  285 
  286         if (kq_queues == NULL)
  287                 return(ENOMEM);
  288 
  289         wq_queues = ExAllocatePoolWithTag(NonPagedPool,
  290             sizeof(kdpc_queue) * WORKITEM_THREADS, 0);
  291 
  292         if (wq_queues == NULL)
  293                 return(ENOMEM);
  294 
  295 #ifdef NTOSKRNL_MULTIPLE_DPCS
  296         bzero((char *)kq_queues, sizeof(kdpc_queue) * mp_ncpus);
  297 #else
  298         bzero((char *)kq_queues, sizeof(kdpc_queue));
  299 #endif
  300         bzero((char *)wq_queues, sizeof(kdpc_queue) * WORKITEM_THREADS);
  301 
  302         /*
  303          * Launch the DPC threads.
  304          */
  305 
  306 #ifdef NTOSKRNL_MULTIPLE_DPCS
  307         for (i = 0; i < mp_ncpus; i++) {
  308 #else
  309         for (i = 0; i < 1; i++) {
  310 #endif
  311                 kq = kq_queues + i;
  312                 kq->kq_cpu = i;
  313                 sprintf(name, "Windows DPC %d", i);
  314                 error = kthread_create(ntoskrnl_dpc_thread, kq, &p,
  315                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
  316                 if (error)
  317                         panic("failed to launch DPC thread");
  318         }
  319 
  320         /*
  321          * Launch the workitem threads.
  322          */
  323 
  324         for (i = 0; i < WORKITEM_THREADS; i++) {
  325                 kq = wq_queues + i;
  326                 sprintf(name, "Windows Workitem %d", i);
  327                 error = kthread_create(ntoskrnl_workitem_thread, kq, &p,
  328                     RFHIGHPID, NDIS_KSTACK_PAGES, name);
  329                 if (error)
  330                         panic("failed to launch workitem thread");
  331         }
  332 
  333         patch = ntoskrnl_functbl;
  334         while (patch->ipt_func != NULL) {
  335                 windrv_wrap((funcptr)patch->ipt_func,
  336                     (funcptr *)&patch->ipt_wrap,
  337                     patch->ipt_argcnt, patch->ipt_ftype);
  338                 patch++;
  339         }
  340 
  341         for (i = 0; i < NTOSKRNL_TIMEOUTS; i++) {
  342                 e = ExAllocatePoolWithTag(NonPagedPool,
  343                     sizeof(callout_entry), 0);
  344                 if (e == NULL)
  345                         panic("failed to allocate timeouts");
  346                 mtx_lock_spin(&ntoskrnl_calllock);
  347                 InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
  348                 mtx_unlock_spin(&ntoskrnl_calllock);
  349         }
  350 
  351         /*
  352          * MDLs are supposed to be variable size (they describe
  353          * buffers containing some number of pages, but we don't
  354          * know ahead of time how many pages that will be). But
  355          * always allocating them off the heap is very slow. As
  356          * a compromise, we create an MDL UMA zone big enough to
  357          * handle any buffer requiring up to 16 pages, and we
  358          * use those for any MDLs for buffers of 16 pages or less
  359          * in size. For buffers larger than that (which we assume
  360          * will be few and far between, we allocate the MDLs off
  361          * the heap.
  362          */
  363 
  364         mdl_zone = uma_zcreate("Windows MDL", MDL_ZONE_SIZE,
  365             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  366 
  367         iw_zone = uma_zcreate("Windows WorkItem", sizeof(io_workitem),
  368             NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
  369 
  370         return(0);
  371 }
  372 
  373 int
  374 ntoskrnl_libfini()
  375 {
  376         image_patch_table       *patch;
  377         callout_entry           *e;
  378         list_entry              *l;
  379 
  380         patch = ntoskrnl_functbl;
  381         while (patch->ipt_func != NULL) {
  382                 windrv_unwrap(patch->ipt_wrap);
  383                 patch++;
  384         }
  385 
  386         /* Stop the workitem queues. */
  387         ntoskrnl_destroy_workitem_threads();
  388         /* Stop the DPC queues. */
  389         ntoskrnl_destroy_dpc_threads();
  390 
  391         ExFreePool(kq_queues);
  392         ExFreePool(wq_queues);
  393 
  394         uma_zdestroy(mdl_zone);
  395         uma_zdestroy(iw_zone);
  396 
  397         mtx_lock_spin(&ntoskrnl_calllock);
  398         while(!IsListEmpty(&ntoskrnl_calllist)) {
  399                 l = RemoveHeadList(&ntoskrnl_calllist);
  400                 e = CONTAINING_RECORD(l, callout_entry, ce_list);
  401                 mtx_unlock_spin(&ntoskrnl_calllock);
  402                 ExFreePool(e);
  403                 mtx_lock_spin(&ntoskrnl_calllock);
  404         }
  405         mtx_unlock_spin(&ntoskrnl_calllock);
  406 
  407         mtx_destroy(&ntoskrnl_dispatchlock);
  408         mtx_destroy(&ntoskrnl_interlock);
  409         mtx_destroy(&ntoskrnl_calllock);
  410 
  411         return(0);
  412 }
  413 
  414 /*
  415  * We need to be able to reference this externally from the wrapper;
  416  * GCC only generates a local implementation of memset.
  417  */
  418 static void *
  419 ntoskrnl_memset(buf, ch, size)
  420         void                    *buf;
  421         int                     ch;
  422         size_t                  size;
  423 {
  424         return(memset(buf, ch, size));
  425 }
  426 
  427 static void *
  428 ntoskrnl_memmove(dst, src, size)
  429         void                    *src;
  430         void                    *dst;
  431         size_t                  size;
  432 {
  433         bcopy(src, dst, size);
  434         return(dst);
  435 }
  436 
  437 static char *
  438 ntoskrnl_strstr(s, find)
  439         char *s, *find;
  440 {
  441         char c, sc;
  442         size_t len;
  443 
  444         if ((c = *find++) != 0) {
  445                 len = strlen(find);
  446                 do {
  447                         do {
  448                                 if ((sc = *s++) == 0)
  449                                         return (NULL);
  450                         } while (sc != c);
  451                 } while (strncmp(s, find, len) != 0);
  452                 s--;
  453         }
  454         return ((char *)s);
  455 }
  456 
  457 static int
  458 ntoskrnl_toupper(c)
  459         int                     c;
  460 {
  461         return(toupper(c));
  462 }
  463 
  464 static int
  465 ntoskrnl_tolower(c)
  466         int                     c;
  467 {
  468         return(tolower(c));
  469 }
  470 
  471 static uint8_t 
  472 RtlEqualUnicodeString(str1, str2, caseinsensitive)
  473         unicode_string          *str1;
  474         unicode_string          *str2;
  475         uint8_t                 caseinsensitive;
  476 {
  477         int                     i;
  478 
  479         if (str1->us_len != str2->us_len)
  480                 return(FALSE);
  481 
  482         for (i = 0; i < str1->us_len; i++) {
  483                 if (caseinsensitive == TRUE) {
  484                         if (toupper((char)(str1->us_buf[i] & 0xFF)) !=
  485                             toupper((char)(str2->us_buf[i] & 0xFF)))
  486                                 return(FALSE);
  487                 } else {
  488                         if (str1->us_buf[i] != str2->us_buf[i])
  489                                 return(FALSE);
  490                 }
  491         }
  492 
  493         return(TRUE);
  494 }
  495 
  496 static void
  497 RtlCopyUnicodeString(dest, src)
  498         unicode_string          *dest;
  499         unicode_string          *src;
  500 {
  501 
  502         if (dest->us_maxlen >= src->us_len)
  503                 dest->us_len = src->us_len;
  504         else
  505                 dest->us_len = dest->us_maxlen;
  506         memcpy(dest->us_buf, src->us_buf, dest->us_len);
  507         return;
  508 }
  509 
  510 static void
  511 ntoskrnl_ascii_to_unicode(ascii, unicode, len)
  512         char                    *ascii;
  513         uint16_t                *unicode;
  514         int                     len;
  515 {
  516         int                     i;
  517         uint16_t                *ustr;
  518 
  519         ustr = unicode;
  520         for (i = 0; i < len; i++) {
  521                 *ustr = (uint16_t)ascii[i];
  522                 ustr++;
  523         }
  524 
  525         return;
  526 }
  527 
  528 static void
  529 ntoskrnl_unicode_to_ascii(unicode, ascii, len)
  530         uint16_t                *unicode;
  531         char                    *ascii;
  532         int                     len;
  533 {
  534         int                     i;
  535         uint8_t                 *astr;
  536 
  537         astr = ascii;
  538         for (i = 0; i < len / 2; i++) {
  539                 *astr = (uint8_t)unicode[i];
  540                 astr++;
  541         }
  542 
  543         return;
  544 }
  545 
  546 uint32_t
  547 RtlUnicodeStringToAnsiString(dest, src, allocate)
  548         ansi_string             *dest;
  549         unicode_string          *src;
  550         uint8_t                 allocate;
  551 {
  552         if (dest == NULL || src == NULL)
  553                 return(STATUS_INVALID_PARAMETER);
  554 
  555         dest->as_len = src->us_len / 2;
  556         if (dest->as_maxlen < dest->as_len)
  557                 dest->as_len = dest->as_maxlen;
  558 
  559         if (allocate == TRUE) {
  560                 dest->as_buf = ExAllocatePoolWithTag(NonPagedPool,
  561                     (src->us_len / 2) + 1, 0);
  562                 if (dest->as_buf == NULL)
  563                         return(STATUS_INSUFFICIENT_RESOURCES);
  564                 dest->as_len = dest->as_maxlen = src->us_len / 2;
  565         } else {
  566                 dest->as_len = src->us_len / 2; /* XXX */
  567                 if (dest->as_maxlen < dest->as_len)
  568                         dest->as_len = dest->as_maxlen;
  569         }
  570 
  571         ntoskrnl_unicode_to_ascii(src->us_buf, dest->as_buf,
  572             dest->as_len * 2);
  573 
  574         return (STATUS_SUCCESS);
  575 }
  576 
  577 uint32_t
  578 RtlAnsiStringToUnicodeString(dest, src, allocate)
  579         unicode_string          *dest;
  580         ansi_string             *src;
  581         uint8_t                 allocate;
  582 {
  583         if (dest == NULL || src == NULL)
  584                 return(STATUS_INVALID_PARAMETER);
  585 
  586         if (allocate == TRUE) {
  587                 dest->us_buf = ExAllocatePoolWithTag(NonPagedPool,
  588                     src->as_len * 2, 0);
  589                 if (dest->us_buf == NULL)
  590                         return(STATUS_INSUFFICIENT_RESOURCES);
  591                 dest->us_len = dest->us_maxlen = strlen(src->as_buf) * 2;
  592         } else {
  593                 dest->us_len = src->as_len * 2; /* XXX */
  594                 if (dest->us_maxlen < dest->us_len)
  595                         dest->us_len = dest->us_maxlen;
  596         }
  597 
  598         ntoskrnl_ascii_to_unicode(src->as_buf, dest->us_buf,
  599             dest->us_len / 2);
  600 
  601         return (STATUS_SUCCESS);
  602 }
  603 
  604 void *
  605 ExAllocatePoolWithTag(pooltype, len, tag)
  606         uint32_t                pooltype;
  607         size_t                  len;
  608         uint32_t                tag;
  609 {
  610         void                    *buf;
  611 
  612         buf = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
  613         if (buf == NULL)
  614                 return(NULL);
  615 
  616         return(buf);
  617 }
  618 
  619 void
  620 ExFreePool(buf)
  621         void                    *buf;
  622 {
  623         free(buf, M_DEVBUF);
  624         return;
  625 }
  626 
  627 uint32_t
  628 IoAllocateDriverObjectExtension(drv, clid, extlen, ext)
  629         driver_object           *drv;
  630         void                    *clid;
  631         uint32_t                extlen;
  632         void                    **ext;
  633 {
  634         custom_extension        *ce;
  635 
  636         ce = ExAllocatePoolWithTag(NonPagedPool, sizeof(custom_extension)
  637             + extlen, 0);
  638 
  639         if (ce == NULL)
  640                 return(STATUS_INSUFFICIENT_RESOURCES);
  641 
  642         ce->ce_clid = clid;
  643         InsertTailList((&drv->dro_driverext->dre_usrext), (&ce->ce_list));
  644 
  645         *ext = (void *)(ce + 1);
  646 
  647         return(STATUS_SUCCESS);
  648 }
  649 
  650 void *
  651 IoGetDriverObjectExtension(drv, clid)
  652         driver_object           *drv;
  653         void                    *clid;
  654 {
  655         list_entry              *e;
  656         custom_extension        *ce;
  657 
  658         /*
  659          * Sanity check. Our dummy bus drivers don't have
  660          * any driver extentions.
  661          */
  662 
  663         if (drv->dro_driverext == NULL)
  664                 return(NULL);
  665 
  666         e = drv->dro_driverext->dre_usrext.nle_flink;
  667         while (e != &drv->dro_driverext->dre_usrext) {
  668                 ce = (custom_extension *)e;
  669                 if (ce->ce_clid == clid)
  670                         return((void *)(ce + 1));
  671                 e = e->nle_flink;
  672         }
  673 
  674         return(NULL);
  675 }
  676 
  677 
  678 uint32_t
  679 IoCreateDevice(drv, devextlen, devname, devtype, devchars, exclusive, newdev)
  680         driver_object           *drv;
  681         uint32_t                devextlen;
  682         unicode_string          *devname;
  683         uint32_t                devtype;
  684         uint32_t                devchars;
  685         uint8_t                 exclusive;
  686         device_object           **newdev;
  687 {
  688         device_object           *dev;
  689 
  690         dev = ExAllocatePoolWithTag(NonPagedPool, sizeof(device_object), 0);
  691         if (dev == NULL)
  692                 return(STATUS_INSUFFICIENT_RESOURCES);
  693 
  694         dev->do_type = devtype;
  695         dev->do_drvobj = drv;
  696         dev->do_currirp = NULL;
  697         dev->do_flags = 0;
  698 
  699         if (devextlen) {
  700                 dev->do_devext = ExAllocatePoolWithTag(NonPagedPool,
  701                     devextlen, 0);
  702 
  703                 if (dev->do_devext == NULL) {
  704                         ExFreePool(dev);
  705                         return(STATUS_INSUFFICIENT_RESOURCES);
  706                 }
  707 
  708                 bzero(dev->do_devext, devextlen);
  709         } else
  710                 dev->do_devext = NULL;
  711 
  712         dev->do_size = sizeof(device_object) + devextlen;
  713         dev->do_refcnt = 1;
  714         dev->do_attacheddev = NULL;
  715         dev->do_nextdev = NULL;
  716         dev->do_devtype = devtype;
  717         dev->do_stacksize = 1;
  718         dev->do_alignreq = 1;
  719         dev->do_characteristics = devchars;
  720         dev->do_iotimer = NULL;
  721         KeInitializeEvent(&dev->do_devlock, EVENT_TYPE_SYNC, TRUE);
  722 
  723         /*
  724          * Vpd is used for disk/tape devices,
  725          * but we don't support those. (Yet.)
  726          */
  727         dev->do_vpb = NULL;
  728 
  729         dev->do_devobj_ext = ExAllocatePoolWithTag(NonPagedPool,
  730             sizeof(devobj_extension), 0);
  731 
  732         if (dev->do_devobj_ext == NULL) {
  733                 if (dev->do_devext != NULL)
  734                         ExFreePool(dev->do_devext);
  735                 ExFreePool(dev);
  736                 return(STATUS_INSUFFICIENT_RESOURCES);
  737         }
  738 
  739         dev->do_devobj_ext->dve_type = 0;
  740         dev->do_devobj_ext->dve_size = sizeof(devobj_extension);
  741         dev->do_devobj_ext->dve_devobj = dev;
  742 
  743         /*
  744          * Attach this device to the driver object's list
  745          * of devices. Note: this is not the same as attaching
  746          * the device to the device stack. The driver's AddDevice
  747          * routine must explicitly call IoAddDeviceToDeviceStack()
  748          * to do that.
  749          */
  750 
  751         if (drv->dro_devobj == NULL) {
  752                 drv->dro_devobj = dev;
  753                 dev->do_nextdev = NULL;
  754         } else {
  755                 dev->do_nextdev = drv->dro_devobj;
  756                 drv->dro_devobj = dev;
  757         }
  758 
  759         *newdev = dev;
  760 
  761         return(STATUS_SUCCESS);
  762 }
  763 
  764 void
  765 IoDeleteDevice(dev)
  766         device_object           *dev;
  767 {
  768         device_object           *prev;
  769 
  770         if (dev == NULL)
  771                 return;
  772 
  773         if (dev->do_devobj_ext != NULL)
  774                 ExFreePool(dev->do_devobj_ext);
  775 
  776         if (dev->do_devext != NULL)
  777                 ExFreePool(dev->do_devext);
  778 
  779         /* Unlink the device from the driver's device list. */
  780 
  781         prev = dev->do_drvobj->dro_devobj;
  782         if (prev == dev)
  783                 dev->do_drvobj->dro_devobj = dev->do_nextdev;
  784         else {
  785                 while (prev->do_nextdev != dev)
  786                         prev = prev->do_nextdev;
  787                 prev->do_nextdev = dev->do_nextdev;
  788         }
  789 
  790         ExFreePool(dev);
  791 
  792         return;
  793 }
  794 
  795 device_object *
  796 IoGetAttachedDevice(dev)
  797         device_object           *dev;
  798 {
  799         device_object           *d;
  800 
  801         if (dev == NULL)
  802                 return (NULL);
  803 
  804         d = dev;
  805 
  806         while (d->do_attacheddev != NULL)
  807                 d = d->do_attacheddev;
  808 
  809         return (d);
  810 }
  811 
  812 static irp *
  813 IoBuildSynchronousFsdRequest(func, dobj, buf, len, off, event, status)
  814         uint32_t                func;
  815         device_object           *dobj;
  816         void                    *buf;
  817         uint32_t                len;
  818         uint64_t                *off;
  819         nt_kevent               *event;
  820         io_status_block         *status;
  821 {
  822         irp                     *ip;
  823 
  824         ip = IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status);
  825         if (ip == NULL)
  826                 return(NULL);
  827         ip->irp_usrevent = event;
  828 
  829         return(ip);
  830 }
  831 
  832 static irp *
  833 IoBuildAsynchronousFsdRequest(func, dobj, buf, len, off, status)
  834         uint32_t                func;
  835         device_object           *dobj;
  836         void                    *buf;
  837         uint32_t                len;
  838         uint64_t                *off;
  839         io_status_block         *status;
  840 {
  841         irp                     *ip;
  842         io_stack_location       *sl;
  843 
  844         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  845         if (ip == NULL)
  846                 return(NULL);
  847 
  848         ip->irp_usriostat = status;
  849         ip->irp_tail.irp_overlay.irp_thread = NULL;
  850 
  851         sl = IoGetNextIrpStackLocation(ip);
  852         sl->isl_major = func;
  853         sl->isl_minor = 0;
  854         sl->isl_flags = 0;
  855         sl->isl_ctl = 0;
  856         sl->isl_devobj = dobj;
  857         sl->isl_fileobj = NULL;
  858         sl->isl_completionfunc = NULL;
  859 
  860         ip->irp_userbuf = buf;
  861 
  862         if (dobj->do_flags & DO_BUFFERED_IO) {
  863                 ip->irp_assoc.irp_sysbuf =
  864                     ExAllocatePoolWithTag(NonPagedPool, len, 0);
  865                 if (ip->irp_assoc.irp_sysbuf == NULL) {
  866                         IoFreeIrp(ip);
  867                         return(NULL);
  868                 }
  869                 bcopy(buf, ip->irp_assoc.irp_sysbuf, len);
  870         }
  871 
  872         if (dobj->do_flags & DO_DIRECT_IO) {
  873                 ip->irp_mdl = IoAllocateMdl(buf, len, FALSE, FALSE, ip);
  874                 if (ip->irp_mdl == NULL) {
  875                         if (ip->irp_assoc.irp_sysbuf != NULL)
  876                                 ExFreePool(ip->irp_assoc.irp_sysbuf);
  877                         IoFreeIrp(ip);
  878                         return(NULL);
  879                 }
  880                 ip->irp_userbuf = NULL;
  881                 ip->irp_assoc.irp_sysbuf = NULL;
  882         }
  883 
  884         if (func == IRP_MJ_READ) {
  885                 sl->isl_parameters.isl_read.isl_len = len;
  886                 if (off != NULL)
  887                         sl->isl_parameters.isl_read.isl_byteoff = *off;
  888                 else
  889                         sl->isl_parameters.isl_read.isl_byteoff = 0;
  890         }
  891 
  892         if (func == IRP_MJ_WRITE) {
  893                 sl->isl_parameters.isl_write.isl_len = len;
  894                 if (off != NULL)
  895                         sl->isl_parameters.isl_write.isl_byteoff = *off;
  896                 else
  897                         sl->isl_parameters.isl_write.isl_byteoff = 0;
  898         }       
  899 
  900         return(ip);
  901 }
  902 
  903 static irp *
  904 IoBuildDeviceIoControlRequest(iocode, dobj, ibuf, ilen, obuf, olen,
  905     isinternal, event, status)
  906         uint32_t                iocode;
  907         device_object           *dobj;
  908         void                    *ibuf;
  909         uint32_t                ilen;
  910         void                    *obuf;
  911         uint32_t                olen;
  912         uint8_t                 isinternal;
  913         nt_kevent               *event;
  914         io_status_block         *status;
  915 {
  916         irp                     *ip;
  917         io_stack_location       *sl;
  918         uint32_t                buflen;
  919 
  920         ip = IoAllocateIrp(dobj->do_stacksize, TRUE);
  921         if (ip == NULL)
  922                 return(NULL);
  923         ip->irp_usrevent = event;
  924         ip->irp_usriostat = status;
  925         ip->irp_tail.irp_overlay.irp_thread = NULL;
  926 
  927         sl = IoGetNextIrpStackLocation(ip);
  928         sl->isl_major = isinternal == TRUE ?
  929             IRP_MJ_INTERNAL_DEVICE_CONTROL : IRP_MJ_DEVICE_CONTROL;
  930         sl->isl_minor = 0;
  931         sl->isl_flags = 0;
  932         sl->isl_ctl = 0;
  933         sl->isl_devobj = dobj;
  934         sl->isl_fileobj = NULL;
  935         sl->isl_completionfunc = NULL;
  936         sl->isl_parameters.isl_ioctl.isl_iocode = iocode;
  937         sl->isl_parameters.isl_ioctl.isl_ibuflen = ilen;
  938         sl->isl_parameters.isl_ioctl.isl_obuflen = olen;
  939 
  940         switch(IO_METHOD(iocode)) {
  941         case METHOD_BUFFERED:
  942                 if (ilen > olen)
  943                         buflen = ilen;
  944                 else
  945                         buflen = olen;
  946                 if (buflen) {
  947                         ip->irp_assoc.irp_sysbuf =
  948                             ExAllocatePoolWithTag(NonPagedPool, buflen, 0);
  949                         if (ip->irp_assoc.irp_sysbuf == NULL) {
  950                                 IoFreeIrp(ip);
  951                                 return(NULL);
  952                         }
  953                 }
  954                 if (ilen && ibuf != NULL) {
  955                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
  956                         bzero((char *)ip->irp_assoc.irp_sysbuf + ilen,
  957                             buflen - ilen);
  958                 } else
  959                         bzero(ip->irp_assoc.irp_sysbuf, ilen);
  960                 ip->irp_userbuf = obuf;
  961                 break;
  962         case METHOD_IN_DIRECT:
  963         case METHOD_OUT_DIRECT:
  964                 if (ilen && ibuf != NULL) {
  965                         ip->irp_assoc.irp_sysbuf =
  966                             ExAllocatePoolWithTag(NonPagedPool, ilen, 0);
  967                         if (ip->irp_assoc.irp_sysbuf == NULL) {
  968                                 IoFreeIrp(ip);
  969                                 return(NULL);
  970                         }
  971                         bcopy(ibuf, ip->irp_assoc.irp_sysbuf, ilen);
  972                 }
  973                 if (olen && obuf != NULL) {
  974                         ip->irp_mdl = IoAllocateMdl(obuf, olen,
  975                             FALSE, FALSE, ip);
  976                         /*
  977                          * Normally we would MmProbeAndLockPages()
  978                          * here, but we don't have to in our
  979                          * imlementation.
  980                          */
  981                 }
  982                 break;
  983         case METHOD_NEITHER:
  984                 ip->irp_userbuf = obuf;
  985                 sl->isl_parameters.isl_ioctl.isl_type3ibuf = ibuf;
  986                 break;
  987         default:
  988                 break;
  989         }
  990 
  991         /*
  992          * Ideally, we should associate this IRP with the calling
  993          * thread here.
  994          */
  995 
  996         return (ip);
  997 }
  998 
  999 static irp *
 1000 IoAllocateIrp(stsize, chargequota)
 1001         uint8_t                 stsize;
 1002         uint8_t                 chargequota;
 1003 {
 1004         irp                     *i;
 1005 
 1006         i = ExAllocatePoolWithTag(NonPagedPool, IoSizeOfIrp(stsize), 0);
 1007         if (i == NULL)
 1008                 return (NULL);
 1009 
 1010         IoInitializeIrp(i, IoSizeOfIrp(stsize), stsize);
 1011 
 1012         return (i);
 1013 }
 1014 
 1015 static irp *
 1016 IoMakeAssociatedIrp(ip, stsize)
 1017         irp                     *ip;
 1018         uint8_t                 stsize;
 1019 {
 1020         irp                     *associrp;
 1021 
 1022         associrp = IoAllocateIrp(stsize, FALSE);
 1023         if (associrp == NULL)
 1024                 return(NULL);
 1025 
 1026         mtx_lock(&ntoskrnl_dispatchlock);
 1027         associrp->irp_flags |= IRP_ASSOCIATED_IRP;
 1028         associrp->irp_tail.irp_overlay.irp_thread =
 1029             ip->irp_tail.irp_overlay.irp_thread;
 1030         associrp->irp_assoc.irp_master = ip;
 1031         mtx_unlock(&ntoskrnl_dispatchlock);
 1032 
 1033         return(associrp);
 1034 }
 1035 
 1036 static void
 1037 IoFreeIrp(ip)
 1038         irp                     *ip;
 1039 {
 1040         ExFreePool(ip);
 1041         return;
 1042 }
 1043 
 1044 static void
 1045 IoInitializeIrp(io, psize, ssize)
 1046         irp                     *io;
 1047         uint16_t                psize;
 1048         uint8_t                 ssize;
 1049 {
 1050         bzero((char *)io, IoSizeOfIrp(ssize));
 1051         io->irp_size = psize;
 1052         io->irp_stackcnt = ssize;
 1053         io->irp_currentstackloc = ssize;
 1054         InitializeListHead(&io->irp_thlist);
 1055         io->irp_tail.irp_overlay.irp_csl =
 1056             (io_stack_location *)(io + 1) + ssize;
 1057 
 1058         return;
 1059 }
 1060 
 1061 static void
 1062 IoReuseIrp(ip, status)
 1063         irp                     *ip;
 1064         uint32_t                status;
 1065 {
 1066         uint8_t                 allocflags;
 1067 
 1068         allocflags = ip->irp_allocflags;
 1069         IoInitializeIrp(ip, ip->irp_size, ip->irp_stackcnt);
 1070         ip->irp_iostat.isb_status = status;
 1071         ip->irp_allocflags = allocflags;
 1072 
 1073         return;
 1074 }
 1075 
 1076 void
 1077 IoAcquireCancelSpinLock(irql)
 1078         uint8_t                 *irql;
 1079 {
 1080         KeAcquireSpinLock(&ntoskrnl_cancellock, irql);
 1081         return;
 1082 }
 1083 
 1084 void
 1085 IoReleaseCancelSpinLock(irql)
 1086         uint8_t                 irql;
 1087 {
 1088         KeReleaseSpinLock(&ntoskrnl_cancellock, irql);
 1089         return;
 1090 }
 1091 
 1092 uint8_t
 1093 IoCancelIrp(irp *ip)
 1094 {
 1095         cancel_func             cfunc;
 1096 
 1097         IoAcquireCancelSpinLock(&ip->irp_cancelirql);
 1098         cfunc = IoSetCancelRoutine(ip, NULL);
 1099         ip->irp_cancel = TRUE;
 1100         if (ip->irp_cancelfunc == NULL) {
 1101                 IoReleaseCancelSpinLock(ip->irp_cancelirql);
 1102                 return(FALSE);
 1103         }
 1104         MSCALL2(cfunc, IoGetCurrentIrpStackLocation(ip)->isl_devobj, ip);
 1105         return(TRUE);
 1106 }
 1107 
 1108 uint32_t
 1109 IofCallDriver(dobj, ip)
 1110         device_object           *dobj;
 1111         irp                     *ip;
 1112 {
 1113         driver_object           *drvobj;
 1114         io_stack_location       *sl;
 1115         uint32_t                status;
 1116         driver_dispatch         disp;
 1117 
 1118         drvobj = dobj->do_drvobj;
 1119 
 1120         if (ip->irp_currentstackloc <= 0)
 1121                 panic("IoCallDriver(): out of stack locations");
 1122 
 1123         IoSetNextIrpStackLocation(ip);
 1124         sl = IoGetCurrentIrpStackLocation(ip);
 1125 
 1126         sl->isl_devobj = dobj;
 1127 
 1128         disp = drvobj->dro_dispatch[sl->isl_major];
 1129         status = MSCALL2(disp, dobj, ip);
 1130 
 1131         return(status);
 1132 }
 1133 
 1134 void
 1135 IofCompleteRequest(ip, prioboost)
 1136         irp                     *ip;
 1137         uint8_t                 prioboost;
 1138 {
 1139         uint32_t                i;
 1140         uint32_t                status;
 1141         device_object           *dobj;
 1142         io_stack_location       *sl;
 1143         completion_func         cf;
 1144 
 1145         ip->irp_pendingreturned =
 1146             IoGetCurrentIrpStackLocation(ip)->isl_ctl & SL_PENDING_RETURNED;
 1147         sl = (io_stack_location *)(ip + 1);
 1148 
 1149         for (i = ip->irp_currentstackloc; i < (uint32_t)ip->irp_stackcnt; i++) {
 1150                 if (ip->irp_currentstackloc < ip->irp_stackcnt - 1) {
 1151                         IoSkipCurrentIrpStackLocation(ip);
 1152                         dobj = IoGetCurrentIrpStackLocation(ip)->isl_devobj;
 1153                 } else
 1154                         dobj = NULL;
 1155 
 1156                 if (sl[i].isl_completionfunc != NULL &&
 1157                     ((ip->irp_iostat.isb_status == STATUS_SUCCESS &&
 1158                     sl->isl_ctl & SL_INVOKE_ON_SUCCESS) ||
 1159                     (ip->irp_iostat.isb_status != STATUS_SUCCESS &&
 1160                     sl->isl_ctl & SL_INVOKE_ON_ERROR) ||
 1161                     (ip->irp_cancel == TRUE &&
 1162                     sl->isl_ctl & SL_INVOKE_ON_CANCEL))) {
 1163                         cf = sl->isl_completionfunc;
 1164                         status = MSCALL3(cf, dobj, ip, sl->isl_completionctx);
 1165                         if (status == STATUS_MORE_PROCESSING_REQUIRED)
 1166                                 return;
 1167                 }
 1168 
 1169                 if (IoGetCurrentIrpStackLocation(ip)->isl_ctl &
 1170                     SL_PENDING_RETURNED)
 1171                         ip->irp_pendingreturned = TRUE;
 1172         }
 1173 
 1174         /* Handle any associated IRPs. */
 1175 
 1176         if (ip->irp_flags & IRP_ASSOCIATED_IRP) {
 1177                 uint32_t                masterirpcnt;
 1178                 irp                     *masterirp;
 1179                 mdl                     *m;
 1180 
 1181                 masterirp = ip->irp_assoc.irp_master;
 1182                 masterirpcnt =
 1183                     InterlockedDecrement(&masterirp->irp_assoc.irp_irpcnt);
 1184 
 1185                 while ((m = ip->irp_mdl) != NULL) {
 1186                         ip->irp_mdl = m->mdl_next;
 1187                         IoFreeMdl(m);
 1188                 }
 1189                 IoFreeIrp(ip);
 1190                 if (masterirpcnt == 0)
 1191                         IoCompleteRequest(masterirp, IO_NO_INCREMENT);
 1192                 return;
 1193         }
 1194 
 1195         /* With any luck, these conditions will never arise. */
 1196 
 1197         if (ip->irp_flags & (IRP_PAGING_IO|IRP_CLOSE_OPERATION)) {
 1198                 if (ip->irp_usriostat != NULL)
 1199                         *ip->irp_usriostat = ip->irp_iostat;
 1200                 if (ip->irp_usrevent != NULL)
 1201                         KeSetEvent(ip->irp_usrevent, prioboost, FALSE);
 1202                 if (ip->irp_flags & IRP_PAGING_IO) {
 1203                         if (ip->irp_mdl != NULL)
 1204                                 IoFreeMdl(ip->irp_mdl);
 1205                         IoFreeIrp(ip);
 1206                 }
 1207         }
 1208 
 1209         return;
 1210 }
 1211 
 1212 void
 1213 ntoskrnl_intr(arg)
 1214         void                    *arg;
 1215 {
 1216         kinterrupt              *iobj;
 1217         uint8_t                 irql;
 1218         uint8_t                 claimed;
 1219         list_entry              *l;
 1220 
 1221         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1222         l = ntoskrnl_intlist.nle_flink;
 1223         while (l != &ntoskrnl_intlist) {
 1224                 iobj = CONTAINING_RECORD(l, kinterrupt, ki_list);
 1225                 claimed = MSCALL2(iobj->ki_svcfunc, iobj, iobj->ki_svcctx);
 1226                 if (claimed == TRUE)
 1227                         break;
 1228                 l = l->nle_flink;
 1229         }
 1230         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1231 
 1232         return;
 1233 }
 1234 
 1235 uint8_t
 1236 KeAcquireInterruptSpinLock(iobj)
 1237         kinterrupt              *iobj;
 1238 {
 1239         uint8_t                 irql;
 1240         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1241         return(irql);
 1242 }
 1243 
 1244 void
 1245 KeReleaseInterruptSpinLock(iobj, irql)
 1246         kinterrupt              *iobj;
 1247         uint8_t                 irql;
 1248 {
 1249         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1250         return;
 1251 }
 1252 
 1253 uint8_t
 1254 KeSynchronizeExecution(iobj, syncfunc, syncctx)
 1255         kinterrupt              *iobj;
 1256         void                    *syncfunc;
 1257         void                    *syncctx;
 1258 {
 1259         uint8_t                 irql;
 1260         
 1261         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1262         MSCALL1(syncfunc, syncctx);
 1263         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1264 
 1265         return(TRUE);
 1266 }
 1267 
 1268 /*
 1269  * IoConnectInterrupt() is passed only the interrupt vector and
 1270  * irql that a device wants to use, but no device-specific tag
 1271  * of any kind. This conflicts rather badly with FreeBSD's
 1272  * bus_setup_intr(), which needs the device_t for the device
 1273  * requesting interrupt delivery. In order to bypass this
 1274  * inconsistency, we implement a second level of interrupt
 1275  * dispatching on top of bus_setup_intr(). All devices use
 1276  * ntoskrnl_intr() as their ISR, and any device requesting
 1277  * interrupts will be registered with ntoskrnl_intr()'s interrupt
 1278  * dispatch list. When an interrupt arrives, we walk the list
 1279  * and invoke all the registered ISRs. This effectively makes all
 1280  * interrupts shared, but it's the only way to duplicate the
 1281  * semantics of IoConnectInterrupt() and IoDisconnectInterrupt() properly.
 1282  */
 1283 
 1284 uint32_t
 1285 IoConnectInterrupt(iobj, svcfunc, svcctx, lock, vector, irql,
 1286         syncirql, imode, shared, affinity, savefloat)
 1287         kinterrupt              **iobj;
 1288         void                    *svcfunc;
 1289         void                    *svcctx;
 1290         uint32_t                vector;
 1291         kspin_lock              *lock;
 1292         uint8_t                 irql;
 1293         uint8_t                 syncirql;
 1294         uint8_t                 imode;
 1295         uint8_t                 shared;
 1296         uint32_t                affinity;
 1297         uint8_t                 savefloat;
 1298 {
 1299         uint8_t                 curirql;
 1300 
 1301         *iobj = ExAllocatePoolWithTag(NonPagedPool, sizeof(kinterrupt), 0);
 1302         if (*iobj == NULL)
 1303                 return(STATUS_INSUFFICIENT_RESOURCES);
 1304 
 1305         (*iobj)->ki_svcfunc = svcfunc;
 1306         (*iobj)->ki_svcctx = svcctx;
 1307 
 1308         if (lock == NULL) {
 1309                 KeInitializeSpinLock(&(*iobj)->ki_lock_priv);
 1310                 (*iobj)->ki_lock = &(*iobj)->ki_lock_priv;
 1311         } else
 1312                 (*iobj)->ki_lock = lock;
 1313 
 1314         KeAcquireSpinLock(&ntoskrnl_intlock, &curirql);
 1315         InsertHeadList((&ntoskrnl_intlist), (&(*iobj)->ki_list));
 1316         KeReleaseSpinLock(&ntoskrnl_intlock, curirql);
 1317 
 1318         return(STATUS_SUCCESS);
 1319 }
 1320 
 1321 void
 1322 IoDisconnectInterrupt(iobj)
 1323         kinterrupt              *iobj;
 1324 {
 1325         uint8_t                 irql;
 1326 
 1327         if (iobj == NULL)
 1328                 return;
 1329 
 1330         KeAcquireSpinLock(&ntoskrnl_intlock, &irql);
 1331         RemoveEntryList((&iobj->ki_list));
 1332         KeReleaseSpinLock(&ntoskrnl_intlock, irql);
 1333 
 1334         ExFreePool(iobj);
 1335 
 1336         return;
 1337 }
 1338 
 1339 device_object *
 1340 IoAttachDeviceToDeviceStack(src, dst)
 1341         device_object           *src;
 1342         device_object           *dst;
 1343 {
 1344         device_object           *attached;
 1345 
 1346         mtx_lock(&ntoskrnl_dispatchlock);
 1347         attached = IoGetAttachedDevice(dst);
 1348         attached->do_attacheddev = src;
 1349         src->do_attacheddev = NULL;
 1350         src->do_stacksize = attached->do_stacksize + 1;
 1351         mtx_unlock(&ntoskrnl_dispatchlock);
 1352 
 1353         return(attached);
 1354 }
 1355 
 1356 void
 1357 IoDetachDevice(topdev)
 1358         device_object           *topdev;
 1359 {
 1360         device_object           *tail;
 1361 
 1362         mtx_lock(&ntoskrnl_dispatchlock);
 1363 
 1364         /* First, break the chain. */
 1365         tail = topdev->do_attacheddev;
 1366         if (tail == NULL) {
 1367                 mtx_unlock(&ntoskrnl_dispatchlock);
 1368                 return;
 1369         }
 1370         topdev->do_attacheddev = tail->do_attacheddev;
 1371         topdev->do_refcnt--;
 1372 
 1373         /* Now reduce the stacksize count for the takm_il objects. */
 1374 
 1375         tail = topdev->do_attacheddev;
 1376         while (tail != NULL) {
 1377                 tail->do_stacksize--;
 1378                 tail = tail->do_attacheddev;
 1379         }
 1380 
 1381         mtx_unlock(&ntoskrnl_dispatchlock);
 1382 
 1383         return;
 1384 }
 1385 
 1386 /*
 1387  * For the most part, an object is considered signalled if
 1388  * dh_sigstate == TRUE. The exception is for mutant objects
 1389  * (mutexes), where the logic works like this:
 1390  *
 1391  * - If the thread already owns the object and sigstate is
 1392  *   less than or equal to 0, then the object is considered
 1393  *   signalled (recursive acquisition).
 1394  * - If dh_sigstate == 1, the object is also considered
 1395  *   signalled.
 1396  */
 1397 
 1398 static int
 1399 ntoskrnl_is_signalled(obj, td)
 1400         nt_dispatch_header      *obj;
 1401         struct thread           *td;
 1402 {
 1403         kmutant                 *km;
 1404         
 1405         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1406                 km = (kmutant *)obj;
 1407                 if ((obj->dh_sigstate <= 0 && km->km_ownerthread == td) ||
 1408                     obj->dh_sigstate == 1)
 1409                         return(TRUE);
 1410                 return(FALSE);
 1411         }
 1412 
 1413         if (obj->dh_sigstate > 0)
 1414                 return(TRUE);
 1415         return(FALSE);
 1416 }
 1417 
 1418 static void
 1419 ntoskrnl_satisfy_wait(obj, td)
 1420         nt_dispatch_header      *obj;
 1421         struct thread           *td;
 1422 {
 1423         kmutant                 *km;
 1424 
 1425         switch (obj->dh_type) {
 1426         case DISP_TYPE_MUTANT:
 1427                 km = (struct kmutant *)obj;
 1428                 obj->dh_sigstate--;
 1429                 /*
 1430                  * If sigstate reaches 0, the mutex is now
 1431                  * non-signalled (the new thread owns it).
 1432                  */
 1433                 if (obj->dh_sigstate == 0) {
 1434                         km->km_ownerthread = td;
 1435                         if (km->km_abandoned == TRUE)
 1436                                 km->km_abandoned = FALSE;
 1437                 }
 1438                 break;
 1439         /* Synchronization objects get reset to unsignalled. */
 1440         case DISP_TYPE_SYNCHRONIZATION_EVENT:
 1441         case DISP_TYPE_SYNCHRONIZATION_TIMER:
 1442                 obj->dh_sigstate = 0;
 1443                 break;
 1444         case DISP_TYPE_SEMAPHORE:
 1445                 obj->dh_sigstate--;
 1446                 break;
 1447         default:
 1448                 break;
 1449         }
 1450 
 1451         return;
 1452 }
 1453 
 1454 static void
 1455 ntoskrnl_satisfy_multiple_waits(wb)
 1456         wait_block              *wb;
 1457 {
 1458         wait_block              *cur;
 1459         struct thread           *td;
 1460 
 1461         cur = wb;
 1462         td = wb->wb_kthread;
 1463 
 1464         do {
 1465                 ntoskrnl_satisfy_wait(wb->wb_object, td);
 1466                 cur->wb_awakened = TRUE;
 1467                 cur = cur->wb_next;
 1468         } while (cur != wb);
 1469 
 1470         return;
 1471 }
 1472 
 1473 /* Always called with dispatcher lock held. */
 1474 static void
 1475 ntoskrnl_waittest(obj, increment)
 1476         nt_dispatch_header      *obj;
 1477         uint32_t                increment;
 1478 {
 1479         wait_block              *w, *next;
 1480         list_entry              *e;
 1481         struct thread           *td;
 1482         wb_ext                  *we;
 1483         int                     satisfied;
 1484 
 1485         /*
 1486          * Once an object has been signalled, we walk its list of
 1487          * wait blocks. If a wait block can be awakened, then satisfy
 1488          * waits as necessary and wake the thread.
 1489          *
 1490          * The rules work like this:
 1491          *
 1492          * If a wait block is marked as WAITTYPE_ANY, then
 1493          * we can satisfy the wait conditions on the current
 1494          * object and wake the thread right away. Satisfying
 1495          * the wait also has the effect of breaking us out
 1496          * of the search loop.
 1497          *
 1498          * If the object is marked as WAITTYLE_ALL, then the
 1499          * wait block will be part of a circularly linked
 1500          * list of wait blocks belonging to a waiting thread
 1501          * that's sleeping in KeWaitForMultipleObjects(). In
 1502          * order to wake the thread, all the objects in the
 1503          * wait list must be in the signalled state. If they
 1504          * are, we then satisfy all of them and wake the
 1505          * thread.
 1506          *
 1507          */
 1508 
 1509         e = obj->dh_waitlisthead.nle_flink;
 1510 
 1511         while (e != &obj->dh_waitlisthead && obj->dh_sigstate > 0) {
 1512                 w = CONTAINING_RECORD(e, wait_block, wb_waitlist);
 1513                 we = w->wb_ext;
 1514                 td = we->we_td;
 1515                 satisfied = FALSE;
 1516                 if (w->wb_waittype == WAITTYPE_ANY) {
 1517                         /*
 1518                          * Thread can be awakened if
 1519                          * any wait is satisfied.
 1520                          */
 1521                         ntoskrnl_satisfy_wait(obj, td);
 1522                         satisfied = TRUE;
 1523                         w->wb_awakened = TRUE;
 1524                 } else {
 1525                         /*
 1526                          * Thread can only be woken up
 1527                          * if all waits are satisfied.
 1528                          * If the thread is waiting on multiple
 1529                          * objects, they should all be linked
 1530                          * through the wb_next pointers in the
 1531                          * wait blocks.
 1532                          */
 1533                         satisfied = TRUE;
 1534                         next = w->wb_next;
 1535                         while (next != w) {
 1536                                 if (ntoskrnl_is_signalled(obj, td) == FALSE) {
 1537                                         satisfied = FALSE;
 1538                                         break;
 1539                                 }
 1540                                 next = next->wb_next;
 1541                         }
 1542                         ntoskrnl_satisfy_multiple_waits(w);
 1543                 }
 1544 
 1545                 if (satisfied == TRUE)
 1546                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
 1547                             (increment * 4));
 1548 
 1549                 e = e->nle_flink;
 1550         }
 1551 
 1552         return;
 1553 }
 1554 
 1555 static void 
 1556 ntoskrnl_time(tval)
 1557         uint64_t                *tval;
 1558 {
 1559         struct timespec         ts;
 1560 
 1561         nanotime(&ts);
 1562         *tval = (uint64_t)ts.tv_nsec / 100 + (uint64_t)ts.tv_sec * 10000000 +
 1563             11644473600;
 1564 
 1565         return;
 1566 }
 1567 
 1568 /*
 1569  * KeWaitForSingleObject() is a tricky beast, because it can be used
 1570  * with several different object types: semaphores, timers, events,
 1571  * mutexes and threads. Semaphores don't appear very often, but the
 1572  * other object types are quite common. KeWaitForSingleObject() is
 1573  * what's normally used to acquire a mutex, and it can be used to
 1574  * wait for a thread termination.
 1575  *
 1576  * The Windows NDIS API is implemented in terms of Windows kernel
 1577  * primitives, and some of the object manipulation is duplicated in
 1578  * NDIS. For example, NDIS has timers and events, which are actually
 1579  * Windows kevents and ktimers. Now, you're supposed to only use the
 1580  * NDIS variants of these objects within the confines of the NDIS API,
 1581  * but there are some naughty developers out there who will use
 1582  * KeWaitForSingleObject() on NDIS timer and event objects, so we
 1583  * have to support that as well. Conseqently, our NDIS timer and event
 1584  * code has to be closely tied into our ntoskrnl timer and event code,
 1585  * just as it is in Windows.
 1586  *
 1587  * KeWaitForSingleObject() may do different things for different kinds
 1588  * of objects:
 1589  *
 1590  * - For events, we check if the event has been signalled. If the
 1591  *   event is already in the signalled state, we just return immediately,
 1592  *   otherwise we wait for it to be set to the signalled state by someone
 1593  *   else calling KeSetEvent(). Events can be either synchronization or
 1594  *   notification events.
 1595  *
 1596  * - For timers, if the timer has already fired and the timer is in
 1597  *   the signalled state, we just return, otherwise we wait on the
 1598  *   timer. Unlike an event, timers get signalled automatically when
 1599  *   they expire rather than someone having to trip them manually.
 1600  *   Timers initialized with KeInitializeTimer() are always notification
 1601  *   events: KeInitializeTimerEx() lets you initialize a timer as
 1602  *   either a notification or synchronization event.
 1603  *
 1604  * - For mutexes, we try to acquire the mutex and if we can't, we wait
 1605  *   on the mutex until it's available and then grab it. When a mutex is
 1606  *   released, it enters the signalled state, which wakes up one of the
 1607  *   threads waiting to acquire it. Mutexes are always synchronization
 1608  *   events.
 1609  *
 1610  * - For threads, the only thing we do is wait until the thread object
 1611  *   enters a signalled state, which occurs when the thread terminates.
 1612  *   Threads are always notification events.
 1613  *
 1614  * A notification event wakes up all threads waiting on an object. A
 1615  * synchronization event wakes up just one. Also, a synchronization event
 1616  * is auto-clearing, which means we automatically set the event back to
 1617  * the non-signalled state once the wakeup is done.
 1618  */
 1619 
 1620 uint32_t
 1621 KeWaitForSingleObject(arg, reason, mode, alertable, duetime)
 1622         void                    *arg;
 1623         uint32_t                reason;
 1624         uint32_t                mode;
 1625         uint8_t                 alertable;
 1626         int64_t                 *duetime;
 1627 {
 1628         wait_block              w;
 1629         struct thread           *td = curthread;
 1630         struct timeval          tv;
 1631         int                     error = 0;
 1632         uint64_t                curtime;
 1633         wb_ext                  we;
 1634         nt_dispatch_header      *obj;
 1635 
 1636         obj = arg;
 1637 
 1638         if (obj == NULL)
 1639                 return(STATUS_INVALID_PARAMETER);
 1640 
 1641         mtx_lock(&ntoskrnl_dispatchlock);
 1642 
 1643         cv_init(&we.we_cv, "KeWFS");
 1644         we.we_td = td;
 1645 
 1646         /*
 1647          * Check to see if this object is already signalled,
 1648          * and just return without waiting if it is.
 1649          */
 1650         if (ntoskrnl_is_signalled(obj, td) == TRUE) {
 1651                 /* Sanity check the signal state value. */
 1652                 if (obj->dh_sigstate != INT32_MIN) {
 1653                         ntoskrnl_satisfy_wait(obj, curthread);
 1654                         mtx_unlock(&ntoskrnl_dispatchlock);
 1655                         return (STATUS_SUCCESS);
 1656                 } else {
 1657                         /*
 1658                          * There's a limit to how many times we can
 1659                          * recursively acquire a mutant. If we hit
 1660                          * the limit, something is very wrong.
 1661                          */
 1662                         if (obj->dh_type == DISP_TYPE_MUTANT) {
 1663                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1664                                 panic("mutant limit exceeded");
 1665                         }
 1666                 }
 1667         }
 1668 
 1669         bzero((char *)&w, sizeof(wait_block));
 1670         w.wb_object = obj;
 1671         w.wb_ext = &we;
 1672         w.wb_waittype = WAITTYPE_ANY;
 1673         w.wb_next = &w;
 1674         w.wb_waitkey = 0;
 1675         w.wb_awakened = FALSE;
 1676         w.wb_oldpri = td->td_priority;
 1677 
 1678         InsertTailList((&obj->dh_waitlisthead), (&w.wb_waitlist));
 1679 
 1680         /*
 1681          * The timeout value is specified in 100 nanosecond units
 1682          * and can be a positive or negative number. If it's positive,
 1683          * then the duetime is absolute, and we need to convert it
 1684          * to an absolute offset relative to now in order to use it.
 1685          * If it's negative, then the duetime is relative and we
 1686          * just have to convert the units.
 1687          */
 1688 
 1689         if (duetime != NULL) {
 1690                 if (*duetime < 0) {
 1691                         tv.tv_sec = - (*duetime) / 10000000;
 1692                         tv.tv_usec = (- (*duetime) / 10) -
 1693                             (tv.tv_sec * 1000000);
 1694                 } else {
 1695                         ntoskrnl_time(&curtime);
 1696                         if (*duetime < curtime)
 1697                                 tv.tv_sec = tv.tv_usec = 0;
 1698                         else {
 1699                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1700                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1701                                     (tv.tv_sec * 1000000);
 1702                         }
 1703                 }
 1704         }
 1705 
 1706         if (duetime == NULL)
 1707                 cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1708         else
 1709                 error = cv_timedwait(&we.we_cv,
 1710                     &ntoskrnl_dispatchlock, tvtohz(&tv));
 1711 
 1712         RemoveEntryList(&w.wb_waitlist);
 1713 
 1714         cv_destroy(&we.we_cv);
 1715 
 1716         /* We timed out. Leave the object alone and return status. */
 1717 
 1718         if (error == EWOULDBLOCK) {
 1719                 mtx_unlock(&ntoskrnl_dispatchlock);
 1720                 return(STATUS_TIMEOUT);
 1721         }
 1722 
 1723         mtx_unlock(&ntoskrnl_dispatchlock);
 1724 
 1725         return(STATUS_SUCCESS);
 1726 /*
 1727         return(KeWaitForMultipleObjects(1, &obj, WAITTYPE_ALL, reason,
 1728             mode, alertable, duetime, &w));
 1729 */
 1730 }
 1731 
 1732 static uint32_t
 1733 KeWaitForMultipleObjects(cnt, obj, wtype, reason, mode,
 1734         alertable, duetime, wb_array)
 1735         uint32_t                cnt;
 1736         nt_dispatch_header      *obj[];
 1737         uint32_t                wtype;
 1738         uint32_t                reason;
 1739         uint32_t                mode;
 1740         uint8_t                 alertable;
 1741         int64_t                 *duetime;
 1742         wait_block              *wb_array;
 1743 {
 1744         struct thread           *td = curthread;
 1745         wait_block              *whead, *w;
 1746         wait_block              _wb_array[MAX_WAIT_OBJECTS];
 1747         nt_dispatch_header      *cur;
 1748         struct timeval          tv;
 1749         int                     i, wcnt = 0, error = 0;
 1750         uint64_t                curtime;
 1751         struct timespec         t1, t2;
 1752         uint32_t                status = STATUS_SUCCESS;
 1753         wb_ext                  we;
 1754 
 1755         if (cnt > MAX_WAIT_OBJECTS)
 1756                 return(STATUS_INVALID_PARAMETER);
 1757         if (cnt > THREAD_WAIT_OBJECTS && wb_array == NULL)
 1758                 return(STATUS_INVALID_PARAMETER);
 1759 
 1760         mtx_lock(&ntoskrnl_dispatchlock);
 1761 
 1762         cv_init(&we.we_cv, "KeWFM");
 1763         we.we_td = td;
 1764 
 1765         if (wb_array == NULL)
 1766                 whead = _wb_array;
 1767         else
 1768                 whead = wb_array;
 1769 
 1770         bzero((char *)whead, sizeof(wait_block) * cnt);
 1771 
 1772         /* First pass: see if we can satisfy any waits immediately. */
 1773 
 1774         wcnt = 0;
 1775         w = whead;
 1776 
 1777         for (i = 0; i < cnt; i++) {
 1778                 InsertTailList((&obj[i]->dh_waitlisthead),
 1779                     (&w->wb_waitlist));
 1780                 w->wb_ext = &we;
 1781                 w->wb_object = obj[i];
 1782                 w->wb_waittype = wtype;
 1783                 w->wb_waitkey = i;
 1784                 w->wb_awakened = FALSE;
 1785                 w->wb_oldpri = td->td_priority;
 1786                 w->wb_next = w + 1;
 1787                 w++;
 1788                 wcnt++;
 1789                 if (ntoskrnl_is_signalled(obj[i], td)) {
 1790                         /*
 1791                          * There's a limit to how many times
 1792                          * we can recursively acquire a mutant.
 1793                          * If we hit the limit, something
 1794                          * is very wrong.
 1795                          */
 1796                         if (obj[i]->dh_sigstate == INT32_MIN &&
 1797                             obj[i]->dh_type == DISP_TYPE_MUTANT) {
 1798                                 mtx_unlock(&ntoskrnl_dispatchlock);
 1799                                 panic("mutant limit exceeded");
 1800                         }
 1801 
 1802                         /*
 1803                          * If this is a WAITTYPE_ANY wait, then
 1804                          * satisfy the waited object and exit
 1805                          * right now.
 1806                          */
 1807 
 1808                         if (wtype == WAITTYPE_ANY) {
 1809                                 ntoskrnl_satisfy_wait(obj[i], td);
 1810                                 status = STATUS_WAIT_0 + i;
 1811                                 goto wait_done;
 1812                         } else {
 1813                                 w--;
 1814                                 wcnt--;
 1815                                 w->wb_object = NULL;
 1816                                 RemoveEntryList(&w->wb_waitlist);
 1817                         }
 1818                 }
 1819         }
 1820 
 1821         /*
 1822          * If this is a WAITTYPE_ALL wait and all objects are
 1823          * already signalled, satisfy the waits and exit now.
 1824          */
 1825 
 1826         if (wtype == WAITTYPE_ALL && wcnt == 0) {
 1827                 for (i = 0; i < cnt; i++)
 1828                         ntoskrnl_satisfy_wait(obj[i], td);
 1829                 status = STATUS_SUCCESS;
 1830                 goto wait_done;
 1831         }
 1832 
 1833         /*
 1834          * Create a circular waitblock list. The waitcount
 1835          * must always be non-zero when we get here.
 1836          */
 1837 
 1838         (w - 1)->wb_next = whead;
 1839 
 1840         /* Wait on any objects that aren't yet signalled. */
 1841 
 1842         /* Calculate timeout, if any. */
 1843 
 1844         if (duetime != NULL) {
 1845                 if (*duetime < 0) {
 1846                         tv.tv_sec = - (*duetime) / 10000000;
 1847                         tv.tv_usec = (- (*duetime) / 10) -
 1848                             (tv.tv_sec * 1000000);
 1849                 } else {
 1850                         ntoskrnl_time(&curtime);
 1851                         if (*duetime < curtime)
 1852                                 tv.tv_sec = tv.tv_usec = 0;
 1853                         else {
 1854                                 tv.tv_sec = ((*duetime) - curtime) / 10000000;
 1855                                 tv.tv_usec = ((*duetime) - curtime) / 10 -
 1856                                     (tv.tv_sec * 1000000);
 1857                         }
 1858                 }
 1859         }
 1860 
 1861         while (wcnt) {
 1862                 nanotime(&t1);
 1863 
 1864                 if (duetime == NULL)
 1865                         cv_wait(&we.we_cv, &ntoskrnl_dispatchlock);
 1866                 else
 1867                         error = cv_timedwait(&we.we_cv,
 1868                             &ntoskrnl_dispatchlock, tvtohz(&tv));
 1869 
 1870                 /* Wait with timeout expired. */
 1871 
 1872                 if (error) {
 1873                         status = STATUS_TIMEOUT;
 1874                         goto wait_done;
 1875                 }
 1876 
 1877                 nanotime(&t2);
 1878 
 1879                 /* See what's been signalled. */
 1880 
 1881                 w = whead;
 1882                 do {
 1883                         cur = w->wb_object;
 1884                         if (ntoskrnl_is_signalled(cur, td) == TRUE ||
 1885                             w->wb_awakened == TRUE) {
 1886                                 /* Sanity check the signal state value. */
 1887                                 if (cur->dh_sigstate == INT32_MIN &&
 1888                                     cur->dh_type == DISP_TYPE_MUTANT) {
 1889                                         mtx_unlock(&ntoskrnl_dispatchlock);
 1890                                         panic("mutant limit exceeded");
 1891                                 }
 1892                                 wcnt--;
 1893                                 if (wtype == WAITTYPE_ANY) {
 1894                                         status = w->wb_waitkey &
 1895                                             STATUS_WAIT_0;
 1896                                         goto wait_done;
 1897                                 }
 1898                         }
 1899                         w = w->wb_next;
 1900                 } while (w != whead);
 1901 
 1902                 /*
 1903                  * If all objects have been signalled, or if this
 1904                  * is a WAITTYPE_ANY wait and we were woke up by
 1905                  * someone, we can bail.
 1906                  */
 1907 
 1908                 if (wcnt == 0) {
 1909                         status = STATUS_SUCCESS;
 1910                         goto wait_done;
 1911                 }
 1912 
 1913                 /*
 1914                  * If this is WAITTYPE_ALL wait, and there's still
 1915                  * objects that haven't been signalled, deduct the
 1916                  * time that's elapsed so far from the timeout and
 1917                  * wait again (or continue waiting indefinitely if
 1918                  * there's no timeout).
 1919                  */
 1920 
 1921                 if (duetime != NULL) {
 1922                         tv.tv_sec -= (t2.tv_sec - t1.tv_sec);
 1923                         tv.tv_usec -= (t2.tv_nsec - t1.tv_nsec) / 1000;
 1924                 }
 1925         }
 1926 
 1927 
 1928 wait_done:
 1929 
 1930         cv_destroy(&we.we_cv);
 1931 
 1932         for (i = 0; i < cnt; i++) {
 1933                 if (whead[i].wb_object != NULL)
 1934                         RemoveEntryList(&whead[i].wb_waitlist);
 1935 
 1936         }
 1937         mtx_unlock(&ntoskrnl_dispatchlock);
 1938 
 1939         return(status);
 1940 }
 1941 
 1942 static void
 1943 WRITE_REGISTER_USHORT(reg, val)
 1944         uint16_t                *reg;
 1945         uint16_t                val;
 1946 {
 1947         bus_space_write_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1948         return;
 1949 }
 1950 
 1951 static uint16_t
 1952 READ_REGISTER_USHORT(reg)
 1953         uint16_t                *reg;
 1954 {
 1955         return(bus_space_read_2(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1956 }
 1957 
 1958 static void
 1959 WRITE_REGISTER_ULONG(reg, val)
 1960         uint32_t                *reg;
 1961         uint32_t                val;
 1962 {
 1963         bus_space_write_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1964         return;
 1965 }
 1966 
 1967 static uint32_t
 1968 READ_REGISTER_ULONG(reg)
 1969         uint32_t                *reg;
 1970 {
 1971         return(bus_space_read_4(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1972 }
 1973 
 1974 static uint8_t
 1975 READ_REGISTER_UCHAR(reg)
 1976         uint8_t                 *reg;
 1977 {
 1978         return(bus_space_read_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg));
 1979 }
 1980 
 1981 static void
 1982 WRITE_REGISTER_UCHAR(reg, val)
 1983         uint8_t                 *reg;
 1984         uint8_t                 val;
 1985 {
 1986         bus_space_write_1(NDIS_BUS_SPACE_MEM, 0x0, (bus_size_t)reg, val);
 1987         return;
 1988 }
 1989 
 1990 static int64_t
 1991 _allmul(a, b)
 1992         int64_t                 a;
 1993         int64_t                 b;
 1994 {
 1995         return (a * b);
 1996 }
 1997 
 1998 static int64_t
 1999 _alldiv(a, b)
 2000         int64_t                 a;
 2001         int64_t                 b;
 2002 {
 2003         return (a / b);
 2004 }
 2005 
 2006 static int64_t
 2007 _allrem(a, b)
 2008         int64_t                 a;
 2009         int64_t                 b;
 2010 {
 2011         return (a % b);
 2012 }
 2013 
 2014 static uint64_t
 2015 _aullmul(a, b)
 2016         uint64_t                a;
 2017         uint64_t                b;
 2018 {
 2019         return (a * b);
 2020 }
 2021 
 2022 static uint64_t
 2023 _aulldiv(a, b)
 2024         uint64_t                a;
 2025         uint64_t                b;
 2026 {
 2027         return (a / b);
 2028 }
 2029 
 2030 static uint64_t
 2031 _aullrem(a, b)
 2032         uint64_t                a;
 2033         uint64_t                b;
 2034 {
 2035         return (a % b);
 2036 }
 2037 
 2038 static int64_t
 2039 _allshl(a, b)
 2040         int64_t                 a;
 2041         uint8_t                 b;
 2042 {
 2043         return (a << b);
 2044 }
 2045 
 2046 static uint64_t
 2047 _aullshl(a, b)
 2048         uint64_t                a;
 2049         uint8_t                 b;
 2050 {
 2051         return (a << b);
 2052 }
 2053 
 2054 static int64_t
 2055 _allshr(a, b)
 2056         int64_t                 a;
 2057         uint8_t                 b;
 2058 {
 2059         return (a >> b);
 2060 }
 2061 
 2062 static uint64_t
 2063 _aullshr(a, b)
 2064         uint64_t                a;
 2065         uint8_t                 b;
 2066 {
 2067         return (a >> b);
 2068 }
 2069 
 2070 static slist_entry *
 2071 ntoskrnl_pushsl(head, entry)
 2072         slist_header            *head;
 2073         slist_entry             *entry;
 2074 {
 2075         slist_entry             *oldhead;
 2076 
 2077         oldhead = head->slh_list.slh_next;
 2078         entry->sl_next = head->slh_list.slh_next;
 2079         head->slh_list.slh_next = entry;
 2080         head->slh_list.slh_depth++;
 2081         head->slh_list.slh_seq++;
 2082 
 2083         return(oldhead);
 2084 }
 2085 
 2086 static slist_entry *
 2087 ntoskrnl_popsl(head)
 2088         slist_header            *head;
 2089 {
 2090         slist_entry             *first;
 2091 
 2092         first = head->slh_list.slh_next;
 2093         if (first != NULL) {
 2094                 head->slh_list.slh_next = first->sl_next;
 2095                 head->slh_list.slh_depth--;
 2096                 head->slh_list.slh_seq++;
 2097         }
 2098 
 2099         return(first);
 2100 }
 2101 
 2102 /*
 2103  * We need this to make lookaside lists work for amd64.
 2104  * We pass a pointer to ExAllocatePoolWithTag() the lookaside
 2105  * list structure. For amd64 to work right, this has to be a
 2106  * pointer to the wrapped version of the routine, not the
 2107  * original. Letting the Windows driver invoke the original
 2108  * function directly will result in a convention calling
 2109  * mismatch and a pretty crash. On x86, this effectively
 2110  * becomes a no-op since ipt_func and ipt_wrap are the same.
 2111  */
 2112 
 2113 static funcptr
 2114 ntoskrnl_findwrap(func)
 2115         funcptr                 func;
 2116 {
 2117         image_patch_table       *patch;
 2118 
 2119         patch = ntoskrnl_functbl;
 2120         while (patch->ipt_func != NULL) {
 2121                 if ((funcptr)patch->ipt_func == func)
 2122                         return((funcptr)patch->ipt_wrap);
 2123                 patch++;
 2124         }
 2125 
 2126         return(NULL);
 2127 }
 2128 
 2129 static void
 2130 ExInitializePagedLookasideList(lookaside, allocfunc, freefunc,
 2131     flags, size, tag, depth)
 2132         paged_lookaside_list    *lookaside;
 2133         lookaside_alloc_func    *allocfunc;
 2134         lookaside_free_func     *freefunc;
 2135         uint32_t                flags;
 2136         size_t                  size;
 2137         uint32_t                tag;
 2138         uint16_t                depth;
 2139 {
 2140         bzero((char *)lookaside, sizeof(paged_lookaside_list));
 2141 
 2142         if (size < sizeof(slist_entry))
 2143                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2144         else
 2145                 lookaside->nll_l.gl_size = size;
 2146         lookaside->nll_l.gl_tag = tag;
 2147         if (allocfunc == NULL)
 2148                 lookaside->nll_l.gl_allocfunc =
 2149                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2150         else
 2151                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2152 
 2153         if (freefunc == NULL)
 2154                 lookaside->nll_l.gl_freefunc =
 2155                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2156         else
 2157                 lookaside->nll_l.gl_freefunc = freefunc;
 2158 
 2159 #ifdef __i386__
 2160         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2161 #endif
 2162 
 2163         lookaside->nll_l.gl_type = NonPagedPool;
 2164         lookaside->nll_l.gl_depth = depth;
 2165         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2166 
 2167         return;
 2168 }
 2169 
 2170 static void
 2171 ExDeletePagedLookasideList(lookaside)
 2172         paged_lookaside_list   *lookaside;
 2173 {
 2174         void                    *buf;
 2175         void            (*freefunc)(void *);
 2176 
 2177         freefunc = lookaside->nll_l.gl_freefunc;
 2178         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2179                 MSCALL1(freefunc, buf);
 2180 
 2181         return;
 2182 }
 2183 
 2184 static void
 2185 ExInitializeNPagedLookasideList(lookaside, allocfunc, freefunc,
 2186     flags, size, tag, depth)
 2187         npaged_lookaside_list   *lookaside;
 2188         lookaside_alloc_func    *allocfunc;
 2189         lookaside_free_func     *freefunc;
 2190         uint32_t                flags;
 2191         size_t                  size;
 2192         uint32_t                tag;
 2193         uint16_t                depth;
 2194 {
 2195         bzero((char *)lookaside, sizeof(npaged_lookaside_list));
 2196 
 2197         if (size < sizeof(slist_entry))
 2198                 lookaside->nll_l.gl_size = sizeof(slist_entry);
 2199         else
 2200                 lookaside->nll_l.gl_size = size;
 2201         lookaside->nll_l.gl_tag = tag;
 2202         if (allocfunc == NULL)
 2203                 lookaside->nll_l.gl_allocfunc =
 2204                     ntoskrnl_findwrap((funcptr)ExAllocatePoolWithTag);
 2205         else
 2206                 lookaside->nll_l.gl_allocfunc = allocfunc;
 2207 
 2208         if (freefunc == NULL)
 2209                 lookaside->nll_l.gl_freefunc =
 2210                     ntoskrnl_findwrap((funcptr)ExFreePool);
 2211         else
 2212                 lookaside->nll_l.gl_freefunc = freefunc;
 2213 
 2214 #ifdef __i386__
 2215         KeInitializeSpinLock(&lookaside->nll_obsoletelock);
 2216 #endif
 2217 
 2218         lookaside->nll_l.gl_type = NonPagedPool;
 2219         lookaside->nll_l.gl_depth = depth;
 2220         lookaside->nll_l.gl_maxdepth = LOOKASIDE_DEPTH;
 2221 
 2222         return;
 2223 }
 2224 
 2225 static void
 2226 ExDeleteNPagedLookasideList(lookaside)
 2227         npaged_lookaside_list   *lookaside;
 2228 {
 2229         void                    *buf;
 2230         void            (*freefunc)(void *);
 2231 
 2232         freefunc = lookaside->nll_l.gl_freefunc;
 2233         while((buf = ntoskrnl_popsl(&lookaside->nll_l.gl_listhead)) != NULL)
 2234                 MSCALL1(freefunc, buf);
 2235 
 2236         return;
 2237 }
 2238 
 2239 slist_entry *
 2240 InterlockedPushEntrySList(head, entry)
 2241         slist_header            *head;
 2242         slist_entry             *entry;
 2243 {
 2244         slist_entry             *oldhead;
 2245 
 2246         mtx_lock_spin(&ntoskrnl_interlock);
 2247         oldhead = ntoskrnl_pushsl(head, entry);
 2248         mtx_unlock_spin(&ntoskrnl_interlock);
 2249 
 2250         return(oldhead);
 2251 }
 2252 
 2253 slist_entry *
 2254 InterlockedPopEntrySList(head)
 2255         slist_header            *head;
 2256 {
 2257         slist_entry             *first;
 2258 
 2259         mtx_lock_spin(&ntoskrnl_interlock);
 2260         first = ntoskrnl_popsl(head);
 2261         mtx_unlock_spin(&ntoskrnl_interlock);
 2262 
 2263         return(first);
 2264 }
 2265 
 2266 static slist_entry *
 2267 ExInterlockedPushEntrySList(head, entry, lock)
 2268         slist_header            *head;
 2269         slist_entry             *entry;
 2270         kspin_lock              *lock;
 2271 {
 2272         return(InterlockedPushEntrySList(head, entry));
 2273 }
 2274 
 2275 static slist_entry *
 2276 ExInterlockedPopEntrySList(head, lock)
 2277         slist_header            *head;
 2278         kspin_lock              *lock;
 2279 {
 2280         return(InterlockedPopEntrySList(head));
 2281 }
 2282 
 2283 uint16_t
 2284 ExQueryDepthSList(head)
 2285         slist_header            *head;
 2286 {
 2287         uint16_t                depth;
 2288 
 2289         mtx_lock_spin(&ntoskrnl_interlock);
 2290         depth = head->slh_list.slh_depth;
 2291         mtx_unlock_spin(&ntoskrnl_interlock);
 2292 
 2293         return(depth);
 2294 }
 2295 
 2296 void
 2297 KeInitializeSpinLock(lock)
 2298         kspin_lock              *lock;
 2299 {
 2300         *lock = 0;
 2301 
 2302         return;
 2303 }
 2304 
 2305 #ifdef __i386__
 2306 void
 2307 KefAcquireSpinLockAtDpcLevel(lock)
 2308         kspin_lock              *lock;
 2309 {
 2310 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2311         int                     i = 0;
 2312 #endif
 2313 
 2314         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0) {
 2315                 /* sit and spin */;
 2316 #ifdef NTOSKRNL_DEBUG_SPINLOCKS
 2317                 i++;
 2318                 if (i > 200000000)
 2319                         panic("DEADLOCK!");
 2320 #endif
 2321         }
 2322 
 2323         return;
 2324 }
 2325 
 2326 void
 2327 KefReleaseSpinLockFromDpcLevel(lock)
 2328         kspin_lock              *lock;
 2329 {
 2330         atomic_store_rel_int((volatile u_int *)lock, 0);
 2331 
 2332         return;
 2333 }
 2334 
 2335 uint8_t
 2336 KeAcquireSpinLockRaiseToDpc(kspin_lock *lock)
 2337 {
 2338         uint8_t                 oldirql;
 2339 
 2340         if (KeGetCurrentIrql() > DISPATCH_LEVEL)
 2341                 panic("IRQL_NOT_LESS_THAN_OR_EQUAL");
 2342 
 2343         KeRaiseIrql(DISPATCH_LEVEL, &oldirql);
 2344         KeAcquireSpinLockAtDpcLevel(lock);
 2345 
 2346         return(oldirql);
 2347 }
 2348 #else
 2349 void
 2350 KeAcquireSpinLockAtDpcLevel(kspin_lock *lock)
 2351 {
 2352         while (atomic_cmpset_acq_int((volatile u_int *)lock, 0, 1) == 0)
 2353                 /* sit and spin */;
 2354 
 2355         return;
 2356 }
 2357 
 2358 void
 2359 KeReleaseSpinLockFromDpcLevel(kspin_lock *lock)
 2360 {
 2361         atomic_store_rel_int((volatile u_int *)lock, 0);
 2362 
 2363         return;
 2364 }
 2365 #endif /* __i386__ */
 2366 
 2367 uintptr_t
 2368 InterlockedExchange(dst, val)
 2369         volatile uint32_t       *dst;
 2370         uintptr_t               val;
 2371 {
 2372         uintptr_t               r;
 2373 
 2374         mtx_lock_spin(&ntoskrnl_interlock);
 2375         r = *dst;
 2376         *dst = val;
 2377         mtx_unlock_spin(&ntoskrnl_interlock);
 2378 
 2379         return(r);
 2380 }
 2381 
 2382 static uint32_t
 2383 InterlockedIncrement(addend)
 2384         volatile uint32_t       *addend;
 2385 {
 2386         atomic_add_long((volatile u_long *)addend, 1);
 2387         return(*addend);
 2388 }
 2389 
 2390 static uint32_t
 2391 InterlockedDecrement(addend)
 2392         volatile uint32_t       *addend;
 2393 {
 2394         atomic_subtract_long((volatile u_long *)addend, 1);
 2395         return(*addend);
 2396 }
 2397 
 2398 static void
 2399 ExInterlockedAddLargeStatistic(addend, inc)
 2400         uint64_t                *addend;
 2401         uint32_t                inc;
 2402 {
 2403         mtx_lock_spin(&ntoskrnl_interlock);
 2404         *addend += inc;
 2405         mtx_unlock_spin(&ntoskrnl_interlock);
 2406 
 2407         return;
 2408 };
 2409 
 2410 mdl *
 2411 IoAllocateMdl(vaddr, len, secondarybuf, chargequota, iopkt)
 2412         void                    *vaddr;
 2413         uint32_t                len;
 2414         uint8_t                 secondarybuf;
 2415         uint8_t                 chargequota;
 2416         irp                     *iopkt;
 2417 {
 2418         mdl                     *m;
 2419         int                     zone = 0;
 2420 
 2421         if (MmSizeOfMdl(vaddr, len) > MDL_ZONE_SIZE)
 2422                 m = ExAllocatePoolWithTag(NonPagedPool,
 2423                     MmSizeOfMdl(vaddr, len), 0);
 2424         else {
 2425                 m = uma_zalloc(mdl_zone, M_NOWAIT | M_ZERO);
 2426                 zone++;
 2427         }
 2428 
 2429         if (m == NULL)
 2430                 return (NULL);
 2431 
 2432         MmInitializeMdl(m, vaddr, len);
 2433 
 2434         /*
 2435          * MmInitializMdl() clears the flags field, so we
 2436          * have to set this here. If the MDL came from the
 2437          * MDL UMA zone, tag it so we can release it to
 2438          * the right place later.
 2439          */
 2440         if (zone)
 2441                 m->mdl_flags = MDL_ZONE_ALLOCED;
 2442 
 2443         if (iopkt != NULL) {
 2444                 if (secondarybuf == TRUE) {
 2445                         mdl                     *last;
 2446                         last = iopkt->irp_mdl;
 2447                         while (last->mdl_next != NULL)
 2448                                 last = last->mdl_next;
 2449                         last->mdl_next = m;
 2450                 } else {
 2451                         if (iopkt->irp_mdl != NULL)
 2452                                 panic("leaking an MDL in IoAllocateMdl()");
 2453                         iopkt->irp_mdl = m;
 2454                 }
 2455         }
 2456 
 2457         return (m);
 2458 }
 2459 
 2460 void
 2461 IoFreeMdl(m)
 2462         mdl                     *m;
 2463 {
 2464         if (m == NULL)
 2465                 return;
 2466 
 2467         if (m->mdl_flags & MDL_ZONE_ALLOCED)
 2468                 uma_zfree(mdl_zone, m);
 2469         else
 2470                 ExFreePool(m);
 2471 
 2472         return;
 2473 }
 2474 
 2475 static uint32_t
 2476 MmSizeOfMdl(vaddr, len)
 2477         void                    *vaddr;
 2478         size_t                  len;
 2479 {
 2480         uint32_t                l;
 2481 
 2482         l = sizeof(struct mdl) +
 2483             (sizeof(vm_offset_t *) * SPAN_PAGES(vaddr, len));
 2484 
 2485         return(l);
 2486 }
 2487 
 2488 /*
 2489  * The Microsoft documentation says this routine fills in the
 2490  * page array of an MDL with the _physical_ page addresses that
 2491  * comprise the buffer, but we don't really want to do that here.
 2492  * Instead, we just fill in the page array with the kernel virtual
 2493  * addresses of the buffers.
 2494  */
 2495 void
 2496 MmBuildMdlForNonPagedPool(m)
 2497         mdl                     *m;
 2498 {
 2499         vm_offset_t             *mdl_pages;
 2500         int                     pagecnt, i;
 2501 
 2502         pagecnt = SPAN_PAGES(m->mdl_byteoffset, m->mdl_bytecount);
 2503 
 2504         if (pagecnt > (m->mdl_size - sizeof(mdl)) / sizeof(vm_offset_t *))
 2505                 panic("not enough pages in MDL to describe buffer");
 2506 
 2507         mdl_pages = MmGetMdlPfnArray(m);
 2508 
 2509         for (i = 0; i < pagecnt; i++)
 2510                 *mdl_pages = (vm_offset_t)m->mdl_startva + (i * PAGE_SIZE);
 2511 
 2512         m->mdl_flags |= MDL_SOURCE_IS_NONPAGED_POOL;
 2513         m->mdl_mappedsystemva = MmGetMdlVirtualAddress(m);
 2514 
 2515         return;
 2516 }
 2517 
 2518 static void *
 2519 MmMapLockedPages(buf, accessmode)
 2520         mdl                     *buf;
 2521         uint8_t                 accessmode;
 2522 {
 2523         buf->mdl_flags |= MDL_MAPPED_TO_SYSTEM_VA;
 2524         return(MmGetMdlVirtualAddress(buf));
 2525 }
 2526 
 2527 static void *
 2528 MmMapLockedPagesSpecifyCache(buf, accessmode, cachetype, vaddr,
 2529     bugcheck, prio)
 2530         mdl                     *buf;
 2531         uint8_t                 accessmode;
 2532         uint32_t                cachetype;
 2533         void                    *vaddr;
 2534         uint32_t                bugcheck;
 2535         uint32_t                prio;
 2536 {
 2537         return(MmMapLockedPages(buf, accessmode));
 2538 }
 2539 
 2540 static void
 2541 MmUnmapLockedPages(vaddr, buf)
 2542         void                    *vaddr;
 2543         mdl                     *buf;
 2544 {
 2545         buf->mdl_flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
 2546         return;
 2547 }
 2548 
 2549 /*
 2550  * This function has a problem in that it will break if you
 2551  * compile this module without PAE and try to use it on a PAE
 2552  * kernel. Unfortunately, there's no way around this at the
 2553  * moment. It's slightly less broken that using pmap_kextract().
 2554  * You'd think the virtual memory subsystem would help us out
 2555  * here, but it doesn't.
 2556  */
 2557 
 2558 static uint8_t
 2559 MmIsAddressValid(vaddr)
 2560         void                    *vaddr;
 2561 {
 2562         if (pmap_extract(kernel_map->pmap, (vm_offset_t)vaddr))
 2563                 return(TRUE);
 2564 
 2565         return(FALSE);
 2566 }
 2567 
 2568 void *
 2569 MmMapIoSpace(paddr, len, cachetype)
 2570         uint64_t                paddr;
 2571         uint32_t                len;
 2572         uint32_t                cachetype;
 2573 {
 2574         devclass_t              nexus_class;
 2575         device_t                *nexus_devs, devp;
 2576         int                     nexus_count = 0;
 2577         device_t                matching_dev = NULL;
 2578         struct resource         *res;
 2579         int                     i;
 2580         vm_offset_t             v;
 2581 
 2582         /* There will always be at least one nexus. */
 2583 
 2584         nexus_class = devclass_find("nexus");
 2585         devclass_get_devices(nexus_class, &nexus_devs, &nexus_count);
 2586 
 2587         for (i = 0; i < nexus_count; i++) {
 2588                 devp = nexus_devs[i];
 2589                 matching_dev = ntoskrnl_finddev(devp, paddr, &res);
 2590                 if (matching_dev)
 2591                         break;
 2592         }
 2593 
 2594         free(nexus_devs, M_TEMP);
 2595 
 2596         if (matching_dev == NULL)
 2597                 return(NULL);
 2598 
 2599         v = (vm_offset_t)rman_get_virtual(res);
 2600         if (paddr > rman_get_start(res))
 2601                 v += paddr - rman_get_start(res);
 2602 
 2603         return((void *)v);
 2604 }
 2605 
 2606 void
 2607 MmUnmapIoSpace(vaddr, len)
 2608         void                    *vaddr;
 2609         size_t                  len;
 2610 {
 2611         return;
 2612 }
 2613 
 2614 
 2615 static device_t
 2616 ntoskrnl_finddev(dev, paddr, res)
 2617         device_t                dev;
 2618         uint64_t                paddr;
 2619         struct resource         **res;
 2620 {
 2621         device_t                *children = NULL;
 2622         device_t                matching_dev;
 2623         int                     childcnt;
 2624         struct resource         *r;
 2625         struct resource_list    *rl;
 2626         struct resource_list_entry      *rle;
 2627         uint32_t                flags;
 2628         int                     i;
 2629 
 2630         /* We only want devices that have been successfully probed. */
 2631 
 2632         if (device_is_alive(dev) == FALSE)
 2633                 return(NULL);
 2634 
 2635         rl = BUS_GET_RESOURCE_LIST(device_get_parent(dev), dev);
 2636         if (rl != NULL) {
 2637 #if __FreeBSD_version < 600022
 2638                 SLIST_FOREACH(rle, rl, link) {
 2639 #else
 2640                 STAILQ_FOREACH(rle, rl, link) {
 2641 #endif
 2642                         r = rle->res;
 2643 
 2644                         if (r == NULL)
 2645                                 continue;
 2646 
 2647                         flags = rman_get_flags(r);
 2648 
 2649                         if (rle->type == SYS_RES_MEMORY &&
 2650                             paddr >= rman_get_start(r) &&
 2651                             paddr <= rman_get_end(r)) {
 2652                                 if (!(flags & RF_ACTIVE))
 2653                                         bus_activate_resource(dev,
 2654                                             SYS_RES_MEMORY, 0, r);
 2655                                 *res = r;
 2656                                 return(dev);
 2657                         }
 2658                 }
 2659         }
 2660 
 2661         /*
 2662          * If this device has children, do another
 2663          * level of recursion to inspect them.
 2664          */
 2665 
 2666         device_get_children(dev, &children, &childcnt);
 2667 
 2668         for (i = 0; i < childcnt; i++) {
 2669                 matching_dev = ntoskrnl_finddev(children[i], paddr, res);
 2670                 if (matching_dev != NULL) {
 2671                         free(children, M_TEMP);
 2672                         return(matching_dev);
 2673                 }
 2674         }
 2675 
 2676         
 2677         /* Won't somebody please think of the children! */
 2678 
 2679         if (children != NULL)
 2680                 free(children, M_TEMP);
 2681 
 2682         return(NULL);
 2683 }
 2684 
 2685 /*
 2686  * Workitems are unlike DPCs, in that they run in a user-mode thread
 2687  * context rather than at DISPATCH_LEVEL in kernel context. In our
 2688  * case we run them in kernel context anyway.
 2689  */
 2690 static void
 2691 ntoskrnl_workitem_thread(arg)
 2692         void                    *arg;
 2693 {
 2694         kdpc_queue              *kq;
 2695         list_entry              *l;
 2696         io_workitem             *iw;
 2697         uint8_t                 irql;
 2698 
 2699         kq = arg;
 2700 
 2701         InitializeListHead(&kq->kq_disp);
 2702         kq->kq_td = curthread;
 2703         kq->kq_exit = 0;
 2704         KeInitializeSpinLock(&kq->kq_lock);
 2705         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 2706 
 2707         while (1) {
 2708                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 2709 
 2710                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 2711 
 2712                 if (kq->kq_exit) {
 2713                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2714                         break;
 2715                 }
 2716 
 2717                 while (!IsListEmpty(&kq->kq_disp)) {
 2718                         l = RemoveHeadList(&kq->kq_disp);
 2719                         iw = CONTAINING_RECORD(l,
 2720                             io_workitem, iw_listentry);
 2721                         InitializeListHead((&iw->iw_listentry));
 2722                         if (iw->iw_func == NULL)
 2723                                 continue;
 2724                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2725                         MSCALL2(iw->iw_func, iw->iw_dobj, iw->iw_ctx);
 2726                         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2727                 }
 2728 
 2729                 KeReleaseSpinLock(&kq->kq_lock, irql);
 2730         }
 2731 
 2732 #if __FreeBSD_version < 502113
 2733         mtx_lock(&Giant);
 2734 #endif
 2735         kthread_exit(0);
 2736         return; /* notreached */
 2737 }
 2738 
 2739 static void
 2740 ntoskrnl_destroy_workitem_threads(void)
 2741 {
 2742         kdpc_queue              *kq;
 2743         int                     i;
 2744 
 2745         for (i = 0; i < WORKITEM_THREADS; i++) {
 2746                 kq = wq_queues + i;
 2747                 kq->kq_exit = 1;
 2748                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);       
 2749                 tsleep(kq->kq_td->td_proc, PWAIT, "waitiw", 0);
 2750         }
 2751 
 2752         return;
 2753 }
 2754 
 2755 io_workitem *
 2756 IoAllocateWorkItem(dobj)
 2757         device_object           *dobj;
 2758 {
 2759         io_workitem             *iw;
 2760 
 2761         iw = uma_zalloc(iw_zone, M_NOWAIT);
 2762         if (iw == NULL)
 2763                 return(NULL);
 2764 
 2765         InitializeListHead(&iw->iw_listentry);
 2766         iw->iw_dobj = dobj;
 2767 
 2768         mtx_lock(&ntoskrnl_dispatchlock);
 2769         iw->iw_idx = wq_idx;
 2770         WORKIDX_INC(wq_idx);
 2771         mtx_unlock(&ntoskrnl_dispatchlock);
 2772 
 2773         return(iw);
 2774 }
 2775 
 2776 void
 2777 IoFreeWorkItem(iw)
 2778         io_workitem             *iw;
 2779 {
 2780         uma_zfree(iw_zone, iw);
 2781         return;
 2782 }
 2783 
 2784 void
 2785 IoQueueWorkItem(iw, iw_func, qtype, ctx)
 2786         io_workitem             *iw;
 2787         io_workitem_func        iw_func;
 2788         uint32_t                qtype;
 2789         void                    *ctx;
 2790 {
 2791         kdpc_queue              *kq;
 2792         list_entry              *l;
 2793         io_workitem             *cur;
 2794         uint8_t                 irql;
 2795 
 2796         kq = wq_queues + iw->iw_idx;
 2797 
 2798         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2799 
 2800         /*
 2801          * Traverse the list and make sure this workitem hasn't
 2802          * already been inserted. Queuing the same workitem
 2803          * twice will hose the list but good.
 2804          */
 2805 
 2806         l = kq->kq_disp.nle_flink;
 2807         while (l != &kq->kq_disp) {
 2808                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2809                 if (cur == iw) {
 2810                         /* Already queued -- do nothing. */
 2811                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2812                         return;
 2813                 }
 2814                 l = l->nle_flink;
 2815         }
 2816 
 2817         iw->iw_func = iw_func;
 2818         iw->iw_ctx = ctx;
 2819 
 2820         InsertTailList((&kq->kq_disp), (&iw->iw_listentry));
 2821         KeReleaseSpinLock(&kq->kq_lock, irql);
 2822 
 2823         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 2824 
 2825         return;
 2826 }
 2827 
 2828 static void
 2829 ntoskrnl_workitem(dobj, arg)
 2830         device_object           *dobj;
 2831         void                    *arg;
 2832 {
 2833         io_workitem             *iw;
 2834         work_queue_item         *w;
 2835         work_item_func          f;
 2836 
 2837         iw = arg;
 2838         w = (work_queue_item *)dobj;
 2839         f = (work_item_func)w->wqi_func;
 2840         uma_zfree(iw_zone, iw);
 2841         MSCALL2(f, w, w->wqi_ctx);
 2842 
 2843         return;
 2844 }
 2845 
 2846 /*
 2847  * The ExQueueWorkItem() API is deprecated in Windows XP. Microsoft
 2848  * warns that it's unsafe and to use IoQueueWorkItem() instead. The
 2849  * problem with ExQueueWorkItem() is that it can't guard against
 2850  * the condition where a driver submits a job to the work queue and
 2851  * is then unloaded before the job is able to run. IoQueueWorkItem()
 2852  * acquires a reference to the device's device_object via the
 2853  * object manager and retains it until after the job has completed,
 2854  * which prevents the driver from being unloaded before the job
 2855  * runs. (We don't currently support this behavior, though hopefully
 2856  * that will change once the object manager API is fleshed out a bit.)
 2857  *
 2858  * Having said all that, the ExQueueWorkItem() API remains, because
 2859  * there are still other parts of Windows that use it, including
 2860  * NDIS itself: NdisScheduleWorkItem() calls ExQueueWorkItem().
 2861  * We fake up the ExQueueWorkItem() API on top of our implementation
 2862  * of IoQueueWorkItem(). Workitem thread #3 is reserved exclusively
 2863  * for ExQueueWorkItem() jobs, and we pass a pointer to the work
 2864  * queue item (provided by the caller) in to IoAllocateWorkItem()
 2865  * instead of the device_object. We need to save this pointer so
 2866  * we can apply a sanity check: as with the DPC queue and other
 2867  * workitem queues, we can't allow the same work queue item to
 2868  * be queued twice. If it's already pending, we silently return
 2869  */
 2870 
 2871 void
 2872 ExQueueWorkItem(w, qtype)
 2873         work_queue_item         *w;
 2874         uint32_t                qtype;
 2875 {
 2876         io_workitem             *iw;
 2877         io_workitem_func        iwf;
 2878         kdpc_queue              *kq;
 2879         list_entry              *l;
 2880         io_workitem             *cur;
 2881         uint8_t                 irql;
 2882 
 2883 
 2884         /*
 2885          * We need to do a special sanity test to make sure
 2886          * the ExQueueWorkItem() API isn't used to queue
 2887          * the same workitem twice. Rather than checking the
 2888          * io_workitem pointer itself, we test the attached
 2889          * device object, which is really a pointer to the
 2890          * legacy work queue item structure.
 2891          */
 2892 
 2893         kq = wq_queues + WORKITEM_LEGACY_THREAD;
 2894         KeAcquireSpinLock(&kq->kq_lock, &irql);
 2895         l = kq->kq_disp.nle_flink;
 2896         while (l != &kq->kq_disp) {
 2897                 cur = CONTAINING_RECORD(l, io_workitem, iw_listentry);
 2898                 if (cur->iw_dobj == (device_object *)w) {
 2899                         /* Already queued -- do nothing. */
 2900                         KeReleaseSpinLock(&kq->kq_lock, irql);
 2901                         return;
 2902                 }
 2903                 l = l->nle_flink;
 2904         }
 2905         KeReleaseSpinLock(&kq->kq_lock, irql);
 2906 
 2907         iw = IoAllocateWorkItem((device_object *)w);
 2908         if (iw == NULL)
 2909                 return;
 2910 
 2911         iw->iw_idx = WORKITEM_LEGACY_THREAD;
 2912         iwf = (io_workitem_func)ntoskrnl_findwrap((funcptr)ntoskrnl_workitem);
 2913         IoQueueWorkItem(iw, iwf, qtype, iw);
 2914 
 2915         return;
 2916 }
 2917 
 2918 static void
 2919 RtlZeroMemory(dst, len)
 2920         void                    *dst;
 2921         size_t                  len;
 2922 {
 2923         bzero(dst, len);
 2924         return;
 2925 }
 2926 
 2927 static void
 2928 RtlCopyMemory(dst, src, len)
 2929         void                    *dst;
 2930         const void              *src;
 2931         size_t                  len;
 2932 {
 2933         bcopy(src, dst, len);
 2934         return;
 2935 }
 2936 
 2937 static size_t
 2938 RtlCompareMemory(s1, s2, len)
 2939         const void              *s1;
 2940         const void              *s2;
 2941         size_t                  len;
 2942 {
 2943         size_t                  i, total = 0;
 2944         uint8_t                 *m1, *m2;
 2945 
 2946         m1 = __DECONST(char *, s1);
 2947         m2 = __DECONST(char *, s2);
 2948 
 2949         for (i = 0; i < len; i++) {
 2950                 if (m1[i] == m2[i])
 2951                         total++;
 2952         }
 2953         return(total);
 2954 }
 2955 
 2956 void
 2957 RtlInitAnsiString(dst, src)
 2958         ansi_string             *dst;
 2959         char                    *src;
 2960 {
 2961         ansi_string             *a;
 2962 
 2963         a = dst;
 2964         if (a == NULL)
 2965                 return;
 2966         if (src == NULL) {
 2967                 a->as_len = a->as_maxlen = 0;
 2968                 a->as_buf = NULL;
 2969         } else {
 2970                 a->as_buf = src;
 2971                 a->as_len = a->as_maxlen = strlen(src);
 2972         }
 2973 
 2974         return;
 2975 }
 2976 
 2977 void
 2978 RtlInitUnicodeString(dst, src)
 2979         unicode_string          *dst;
 2980         uint16_t                *src;
 2981 {
 2982         unicode_string          *u;
 2983         int                     i;
 2984 
 2985         u = dst;
 2986         if (u == NULL)
 2987                 return;
 2988         if (src == NULL) {
 2989                 u->us_len = u->us_maxlen = 0;
 2990                 u->us_buf = NULL;
 2991         } else {
 2992                 i = 0;
 2993                 while(src[i] != 0)
 2994                         i++;
 2995                 u->us_buf = src;
 2996                 u->us_len = u->us_maxlen = i * 2;
 2997         }
 2998 
 2999         return;
 3000 }
 3001 
 3002 ndis_status
 3003 RtlUnicodeStringToInteger(ustr, base, val)
 3004         unicode_string          *ustr;
 3005         uint32_t                base;
 3006         uint32_t                *val;
 3007 {
 3008         uint16_t                *uchr;
 3009         int                     len, neg = 0;
 3010         char                    abuf[64];
 3011         char                    *astr;
 3012 
 3013         uchr = ustr->us_buf;
 3014         len = ustr->us_len;
 3015         bzero(abuf, sizeof(abuf));
 3016 
 3017         if ((char)((*uchr) & 0xFF) == '-') {
 3018                 neg = 1;
 3019                 uchr++;
 3020                 len -= 2;
 3021         } else if ((char)((*uchr) & 0xFF) == '+') {
 3022                 neg = 0;
 3023                 uchr++;
 3024                 len -= 2;
 3025         }
 3026 
 3027         if (base == 0) {
 3028                 if ((char)((*uchr) & 0xFF) == 'b') {
 3029                         base = 2;
 3030                         uchr++;
 3031                         len -= 2;
 3032                 } else if ((char)((*uchr) & 0xFF) == 'o') {
 3033                         base = 8;
 3034                         uchr++;
 3035                         len -= 2;
 3036                 } else if ((char)((*uchr) & 0xFF) == 'x') {
 3037                         base = 16;
 3038                         uchr++;
 3039                         len -= 2;
 3040                 } else
 3041                         base = 10;
 3042         }
 3043 
 3044         astr = abuf;
 3045         if (neg) {
 3046                 strcpy(astr, "-");
 3047                 astr++;
 3048         }
 3049 
 3050         ntoskrnl_unicode_to_ascii(uchr, astr, len);
 3051         *val = strtoul(abuf, NULL, base);
 3052 
 3053         return(STATUS_SUCCESS);
 3054 }
 3055 
 3056 void
 3057 RtlFreeUnicodeString(ustr)
 3058         unicode_string          *ustr;
 3059 {
 3060         if (ustr->us_buf == NULL)
 3061                 return;
 3062         ExFreePool(ustr->us_buf);
 3063         ustr->us_buf = NULL;
 3064         return;
 3065 }
 3066 
 3067 void
 3068 RtlFreeAnsiString(astr)
 3069         ansi_string             *astr;
 3070 {
 3071         if (astr->as_buf == NULL)
 3072                 return;
 3073         ExFreePool(astr->as_buf);
 3074         astr->as_buf = NULL;
 3075         return;
 3076 }
 3077 
 3078 static int
 3079 atoi(str)
 3080         const char              *str;
 3081 {
 3082         return (int)strtol(str, (char **)NULL, 10);
 3083 }
 3084 
 3085 static long
 3086 atol(str)
 3087         const char              *str;
 3088 {
 3089         return strtol(str, (char **)NULL, 10);
 3090 }
 3091 
 3092 static int
 3093 rand(void)
 3094 {
 3095         struct timeval          tv;
 3096 
 3097         microtime(&tv);
 3098         srandom(tv.tv_usec);
 3099         return((int)random());
 3100 }
 3101 
 3102 static void
 3103 srand(seed)
 3104         unsigned int            seed;
 3105 {
 3106         srandom(seed);
 3107         return;
 3108 }
 3109 
 3110 static uint8_t
 3111 IoIsWdmVersionAvailable(major, minor)
 3112         uint8_t                 major;
 3113         uint8_t                 minor;
 3114 {
 3115         if (major == WDM_MAJOR && minor == WDM_MINOR_WINXP)
 3116                 return(TRUE);
 3117         return(FALSE);
 3118 }
 3119 
 3120 static ndis_status
 3121 IoGetDeviceProperty(devobj, regprop, buflen, prop, reslen)
 3122         device_object           *devobj;
 3123         uint32_t                regprop;
 3124         uint32_t                buflen;
 3125         void                    *prop;
 3126         uint32_t                *reslen;
 3127 {
 3128         driver_object           *drv;
 3129         uint16_t                **name;
 3130 
 3131         drv = devobj->do_drvobj;
 3132 
 3133         switch (regprop) {
 3134         case DEVPROP_DRIVER_KEYNAME:
 3135                 name = prop;
 3136                 *name = drv->dro_drivername.us_buf;
 3137                 *reslen = drv->dro_drivername.us_len;
 3138                 break;
 3139         default:
 3140                 return(STATUS_INVALID_PARAMETER_2);
 3141                 break;
 3142         }
 3143 
 3144         return(STATUS_SUCCESS);
 3145 }
 3146 
 3147 static void
 3148 KeInitializeMutex(kmutex, level)
 3149         kmutant                 *kmutex;
 3150         uint32_t                level;
 3151 {
 3152         InitializeListHead((&kmutex->km_header.dh_waitlisthead));
 3153         kmutex->km_abandoned = FALSE;
 3154         kmutex->km_apcdisable = 1;
 3155         kmutex->km_header.dh_sigstate = 1;
 3156         kmutex->km_header.dh_type = DISP_TYPE_MUTANT;
 3157         kmutex->km_header.dh_size = sizeof(kmutant) / sizeof(uint32_t);
 3158         kmutex->km_ownerthread = NULL;
 3159         return;
 3160 }
 3161 
 3162 static uint32_t
 3163 KeReleaseMutex(kmutex, kwait)
 3164         kmutant                 *kmutex;
 3165         uint8_t                 kwait;
 3166 {
 3167         uint32_t                prevstate;
 3168 
 3169         mtx_lock(&ntoskrnl_dispatchlock);
 3170         prevstate = kmutex->km_header.dh_sigstate;
 3171         if (kmutex->km_ownerthread != curthread) {
 3172                 mtx_unlock(&ntoskrnl_dispatchlock);
 3173                 return(STATUS_MUTANT_NOT_OWNED);
 3174         }
 3175 
 3176         kmutex->km_header.dh_sigstate++;
 3177         kmutex->km_abandoned = FALSE;
 3178 
 3179         if (kmutex->km_header.dh_sigstate == 1) {
 3180                 kmutex->km_ownerthread = NULL;
 3181                 ntoskrnl_waittest(&kmutex->km_header, IO_NO_INCREMENT);
 3182         }
 3183 
 3184         mtx_unlock(&ntoskrnl_dispatchlock);
 3185 
 3186         return(prevstate);
 3187 }
 3188 
 3189 static uint32_t
 3190 KeReadStateMutex(kmutex)
 3191         kmutant                 *kmutex;
 3192 {
 3193         return(kmutex->km_header.dh_sigstate);
 3194 }
 3195 
 3196 void
 3197 KeInitializeEvent(kevent, type, state)
 3198         nt_kevent               *kevent;
 3199         uint32_t                type;
 3200         uint8_t                 state;
 3201 {
 3202         InitializeListHead((&kevent->k_header.dh_waitlisthead));
 3203         kevent->k_header.dh_sigstate = state;
 3204         if (type == EVENT_TYPE_NOTIFY)
 3205                 kevent->k_header.dh_type = DISP_TYPE_NOTIFICATION_EVENT;
 3206         else
 3207                 kevent->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_EVENT;
 3208         kevent->k_header.dh_size = sizeof(nt_kevent) / sizeof(uint32_t);
 3209         return;
 3210 }
 3211 
 3212 uint32_t
 3213 KeResetEvent(kevent)
 3214         nt_kevent               *kevent;
 3215 {
 3216         uint32_t                prevstate;
 3217 
 3218         mtx_lock(&ntoskrnl_dispatchlock);
 3219         prevstate = kevent->k_header.dh_sigstate;
 3220         kevent->k_header.dh_sigstate = FALSE;
 3221         mtx_unlock(&ntoskrnl_dispatchlock);
 3222 
 3223         return(prevstate);
 3224 }
 3225 
 3226 uint32_t
 3227 KeSetEvent(kevent, increment, kwait)
 3228         nt_kevent               *kevent;
 3229         uint32_t                increment;
 3230         uint8_t                 kwait;
 3231 {
 3232         uint32_t                prevstate;
 3233         wait_block              *w;
 3234         nt_dispatch_header      *dh;
 3235         struct thread           *td;
 3236         wb_ext                  *we;
 3237 
 3238         mtx_lock(&ntoskrnl_dispatchlock);
 3239         prevstate = kevent->k_header.dh_sigstate;
 3240         dh = &kevent->k_header;
 3241 
 3242         if (IsListEmpty(&dh->dh_waitlisthead))
 3243                 /*
 3244                  * If there's nobody in the waitlist, just set
 3245                  * the state to signalled.
 3246                  */
 3247                 dh->dh_sigstate = 1;
 3248         else {
 3249                 /*
 3250                  * Get the first waiter. If this is a synchronization
 3251                  * event, just wake up that one thread (don't bother
 3252                  * setting the state to signalled since we're supposed
 3253                  * to automatically clear synchronization events anyway).
 3254                  *
 3255                  * If it's a notification event, or the the first
 3256                  * waiter is doing a WAITTYPE_ALL wait, go through
 3257                  * the full wait satisfaction process.
 3258                  */
 3259                 w = CONTAINING_RECORD(dh->dh_waitlisthead.nle_flink,
 3260                     wait_block, wb_waitlist);
 3261                 we = w->wb_ext;
 3262                 td = we->we_td;
 3263                 if (kevent->k_header.dh_type == DISP_TYPE_NOTIFICATION_EVENT ||
 3264                     w->wb_waittype == WAITTYPE_ALL) {
 3265                         if (prevstate == 0) {
 3266                                 dh->dh_sigstate = 1;
 3267                                 ntoskrnl_waittest(dh, increment);
 3268                         }
 3269                 } else {
 3270                         w->wb_awakened |= TRUE;
 3271                         cv_broadcastpri(&we->we_cv, w->wb_oldpri -
 3272                             (increment * 4));
 3273                 }
 3274         }
 3275 
 3276         mtx_unlock(&ntoskrnl_dispatchlock);
 3277 
 3278         return(prevstate);
 3279 }
 3280 
 3281 void
 3282 KeClearEvent(kevent)
 3283         nt_kevent               *kevent;
 3284 {
 3285         kevent->k_header.dh_sigstate = FALSE;
 3286         return;
 3287 }
 3288 
 3289 uint32_t
 3290 KeReadStateEvent(kevent)
 3291         nt_kevent               *kevent;
 3292 {
 3293         return(kevent->k_header.dh_sigstate);
 3294 }
 3295 
 3296 /*
 3297  * The object manager in Windows is responsible for managing
 3298  * references and access to various types of objects, including
 3299  * device_objects, events, threads, timers and so on. However,
 3300  * there's a difference in the way objects are handled in user
 3301  * mode versus kernel mode.
 3302  *
 3303  * In user mode (i.e. Win32 applications), all objects are
 3304  * managed by the object manager. For example, when you create
 3305  * a timer or event object, you actually end up with an 
 3306  * object_header (for the object manager's bookkeeping
 3307  * purposes) and an object body (which contains the actual object
 3308  * structure, e.g. ktimer, kevent, etc...). This allows Windows
 3309  * to manage resource quotas and to enforce access restrictions
 3310  * on basically every kind of system object handled by the kernel.
 3311  *
 3312  * However, in kernel mode, you only end up using the object
 3313  * manager some of the time. For example, in a driver, you create
 3314  * a timer object by simply allocating the memory for a ktimer
 3315  * structure and initializing it with KeInitializeTimer(). Hence,
 3316  * the timer has no object_header and no reference counting or
 3317  * security/resource checks are done on it. The assumption in
 3318  * this case is that if you're running in kernel mode, you know
 3319  * what you're doing, and you're already at an elevated privilege
 3320  * anyway.
 3321  *
 3322  * There are some exceptions to this. The two most important ones
 3323  * for our purposes are device_objects and threads. We need to use
 3324  * the object manager to do reference counting on device_objects,
 3325  * and for threads, you can only get a pointer to a thread's
 3326  * dispatch header by using ObReferenceObjectByHandle() on the
 3327  * handle returned by PsCreateSystemThread().
 3328  */
 3329 
 3330 static ndis_status
 3331 ObReferenceObjectByHandle(handle, reqaccess, otype,
 3332     accessmode, object, handleinfo)
 3333         ndis_handle             handle;
 3334         uint32_t                reqaccess;
 3335         void                    *otype;
 3336         uint8_t                 accessmode;
 3337         void                    **object;
 3338         void                    **handleinfo;
 3339 {
 3340         nt_objref               *nr;
 3341 
 3342         nr = malloc(sizeof(nt_objref), M_DEVBUF, M_NOWAIT|M_ZERO);
 3343         if (nr == NULL)
 3344                 return(STATUS_INSUFFICIENT_RESOURCES);
 3345 
 3346         InitializeListHead((&nr->no_dh.dh_waitlisthead));
 3347         nr->no_obj = handle;
 3348         nr->no_dh.dh_type = DISP_TYPE_THREAD;
 3349         nr->no_dh.dh_sigstate = 0;
 3350         nr->no_dh.dh_size = (uint8_t)(sizeof(struct thread) /
 3351             sizeof(uint32_t));
 3352         TAILQ_INSERT_TAIL(&ntoskrnl_reflist, nr, link);
 3353         *object = nr;
 3354 
 3355         return(STATUS_SUCCESS);
 3356 }
 3357 
 3358 static void
 3359 ObfDereferenceObject(object)
 3360         void                    *object;
 3361 {
 3362         nt_objref               *nr;
 3363 
 3364         nr = object;
 3365         TAILQ_REMOVE(&ntoskrnl_reflist, nr, link);
 3366         free(nr, M_DEVBUF);
 3367 
 3368         return;
 3369 }
 3370 
 3371 static uint32_t
 3372 ZwClose(handle)
 3373         ndis_handle             handle;
 3374 {
 3375         return(STATUS_SUCCESS);
 3376 }
 3377 
 3378 static uint32_t
 3379 WmiQueryTraceInformation(traceclass, traceinfo, infolen, reqlen, buf)
 3380         uint32_t                traceclass;
 3381         void                    *traceinfo;
 3382         uint32_t                infolen;
 3383         uint32_t                reqlen;
 3384         void                    *buf;
 3385 {
 3386         return(STATUS_NOT_FOUND);
 3387 }
 3388 
 3389 static uint32_t
 3390 WmiTraceMessage(uint64_t loghandle, uint32_t messageflags,
 3391         void *guid, uint16_t messagenum, ...)
 3392 {
 3393         return(STATUS_SUCCESS);
 3394 }
 3395 
 3396 static uint32_t
 3397 IoWMIRegistrationControl(dobj, action)
 3398         device_object           *dobj;
 3399         uint32_t                action;
 3400 {
 3401         return(STATUS_SUCCESS);
 3402 }
 3403 
 3404 /*
 3405  * This is here just in case the thread returns without calling
 3406  * PsTerminateSystemThread().
 3407  */
 3408 static void
 3409 ntoskrnl_thrfunc(arg)
 3410         void                    *arg;
 3411 {
 3412         thread_context          *thrctx;
 3413         uint32_t (*tfunc)(void *);
 3414         void                    *tctx;
 3415         uint32_t                rval;
 3416 
 3417         thrctx = arg;
 3418         tfunc = thrctx->tc_thrfunc;
 3419         tctx = thrctx->tc_thrctx;
 3420         free(thrctx, M_TEMP);
 3421 
 3422         rval = MSCALL1(tfunc, tctx);
 3423 
 3424         PsTerminateSystemThread(rval);
 3425         return; /* notreached */
 3426 }
 3427 
 3428 static ndis_status
 3429 PsCreateSystemThread(handle, reqaccess, objattrs, phandle,
 3430         clientid, thrfunc, thrctx)
 3431         ndis_handle             *handle;
 3432         uint32_t                reqaccess;
 3433         void                    *objattrs;
 3434         ndis_handle             phandle;
 3435         void                    *clientid;
 3436         void                    *thrfunc;
 3437         void                    *thrctx;
 3438 {
 3439         int                     error;
 3440         char                    tname[128];
 3441         thread_context          *tc;
 3442         struct proc             *p;
 3443 
 3444         tc = malloc(sizeof(thread_context), M_TEMP, M_NOWAIT);
 3445         if (tc == NULL)
 3446                 return(STATUS_INSUFFICIENT_RESOURCES);
 3447 
 3448         tc->tc_thrctx = thrctx;
 3449         tc->tc_thrfunc = thrfunc;
 3450 
 3451         sprintf(tname, "windows kthread %d", ntoskrnl_kth);
 3452         error = kthread_create(ntoskrnl_thrfunc, tc, &p,
 3453             RFHIGHPID, NDIS_KSTACK_PAGES, tname);
 3454 
 3455         if (error) {
 3456                 free(tc, M_TEMP);
 3457                 return(STATUS_INSUFFICIENT_RESOURCES);
 3458         }
 3459 
 3460         *handle = p;
 3461         ntoskrnl_kth++;
 3462 
 3463         return(STATUS_SUCCESS);
 3464 }
 3465 
 3466 /*
 3467  * In Windows, the exit of a thread is an event that you're allowed
 3468  * to wait on, assuming you've obtained a reference to the thread using
 3469  * ObReferenceObjectByHandle(). Unfortunately, the only way we can
 3470  * simulate this behavior is to register each thread we create in a
 3471  * reference list, and if someone holds a reference to us, we poke
 3472  * them.
 3473  */
 3474 static ndis_status
 3475 PsTerminateSystemThread(status)
 3476         ndis_status             status;
 3477 {
 3478         struct nt_objref        *nr;
 3479 
 3480         mtx_lock(&ntoskrnl_dispatchlock);
 3481         TAILQ_FOREACH(nr, &ntoskrnl_reflist, link) {
 3482                 if (nr->no_obj != curthread->td_proc)
 3483                         continue;
 3484                 nr->no_dh.dh_sigstate = 1;
 3485                 ntoskrnl_waittest(&nr->no_dh, IO_NO_INCREMENT);
 3486                 break;
 3487         }
 3488         mtx_unlock(&ntoskrnl_dispatchlock);
 3489 
 3490         ntoskrnl_kth--;
 3491 
 3492 #if __FreeBSD_version < 502113
 3493         mtx_lock(&Giant);
 3494 #endif
 3495         kthread_exit(0);
 3496         return(0);      /* notreached */
 3497 }
 3498 
 3499 static uint32_t
 3500 DbgPrint(char *fmt, ...)
 3501 {
 3502         va_list                 ap;
 3503 
 3504         if (bootverbose) {
 3505                 va_start(ap, fmt);
 3506                 vprintf(fmt, ap);
 3507         }
 3508 
 3509         return(STATUS_SUCCESS);
 3510 }
 3511 
 3512 static void
 3513 DbgBreakPoint(void)
 3514 {
 3515 
 3516 #if __FreeBSD_version < 502113
 3517         Debugger("DbgBreakPoint(): breakpoint");
 3518 #else
 3519         kdb_enter("DbgBreakPoint(): breakpoint");
 3520 #endif
 3521 }
 3522 
 3523 static void
 3524 ntoskrnl_timercall(arg)
 3525         void                    *arg;
 3526 {
 3527         ktimer                  *timer;
 3528         struct timeval          tv;
 3529         kdpc                    *dpc;
 3530 
 3531         mtx_lock(&ntoskrnl_dispatchlock);
 3532 
 3533         timer = arg;
 3534 
 3535 #ifdef NTOSKRNL_DEBUG_TIMERS
 3536         ntoskrnl_timer_fires++;
 3537 #endif
 3538         ntoskrnl_remove_timer(timer);
 3539 
 3540         /*
 3541          * This should never happen, but complain
 3542          * if it does.
 3543          */
 3544 
 3545         if (timer->k_header.dh_inserted == FALSE) {
 3546                 mtx_unlock(&ntoskrnl_dispatchlock);
 3547                 printf("NTOS: timer %p fired even though "
 3548                     "it was canceled\n", timer);
 3549                 return;
 3550         }
 3551 
 3552         /* Mark the timer as no longer being on the timer queue. */
 3553 
 3554         timer->k_header.dh_inserted = FALSE;
 3555 
 3556         /* Now signal the object and satisfy any waits on it. */
 3557 
 3558         timer->k_header.dh_sigstate = 1;
 3559         ntoskrnl_waittest(&timer->k_header, IO_NO_INCREMENT);
 3560 
 3561         /*
 3562          * If this is a periodic timer, re-arm it
 3563          * so it will fire again. We do this before
 3564          * calling any deferred procedure calls because
 3565          * it's possible the DPC might cancel the timer,
 3566          * in which case it would be wrong for us to
 3567          * re-arm it again afterwards.
 3568          */
 3569 
 3570         if (timer->k_period) {
 3571                 tv.tv_sec = 0;
 3572                 tv.tv_usec = timer->k_period * 1000;
 3573                 timer->k_header.dh_inserted = TRUE;
 3574                 ntoskrnl_insert_timer(timer, tvtohz(&tv));
 3575 #ifdef NTOSKRNL_DEBUG_TIMERS
 3576                 ntoskrnl_timer_reloads++;
 3577 #endif
 3578         }
 3579 
 3580         dpc = timer->k_dpc;
 3581 
 3582         mtx_unlock(&ntoskrnl_dispatchlock);
 3583 
 3584         /* If there's a DPC associated with the timer, queue it up. */
 3585 
 3586         if (dpc != NULL)
 3587                 KeInsertQueueDpc(dpc, NULL, NULL);
 3588 
 3589         return;
 3590 }
 3591 
 3592 #ifdef NTOSKRNL_DEBUG_TIMERS
 3593 static int
 3594 sysctl_show_timers(SYSCTL_HANDLER_ARGS)
 3595 {
 3596         int                     ret;
 3597 
 3598         ret = 0;
 3599         ntoskrnl_show_timers();
 3600         return (sysctl_handle_int(oidp, &ret, 0, req));
 3601 }
 3602 
 3603 static void
 3604 ntoskrnl_show_timers()
 3605 {
 3606         int                     i = 0;
 3607         list_entry              *l;
 3608 
 3609         mtx_lock_spin(&ntoskrnl_calllock);
 3610         l = ntoskrnl_calllist.nle_flink;
 3611         while(l != &ntoskrnl_calllist) {
 3612                 i++;
 3613                 l = l->nle_flink;
 3614         }
 3615         mtx_unlock_spin(&ntoskrnl_calllock);
 3616 
 3617         printf("\n");
 3618         printf("%d timers available (out of %d)\n", i, NTOSKRNL_TIMEOUTS);
 3619         printf("timer sets: %qu\n", ntoskrnl_timer_sets);
 3620         printf("timer reloads: %qu\n", ntoskrnl_timer_reloads);
 3621         printf("timer cancels: %qu\n", ntoskrnl_timer_cancels);
 3622         printf("timer fires: %qu\n", ntoskrnl_timer_fires);
 3623         printf("\n");
 3624 
 3625         return;
 3626 }
 3627 #endif
 3628 
 3629 /*
 3630  * Must be called with dispatcher lock held.
 3631  */
 3632 
 3633 static void
 3634 ntoskrnl_insert_timer(timer, ticks)
 3635         ktimer                  *timer;
 3636         int                     ticks;
 3637 {
 3638         callout_entry           *e;
 3639         list_entry              *l;
 3640         struct callout          *c;
 3641 
 3642         /*
 3643          * Try and allocate a timer.
 3644          */
 3645         mtx_lock_spin(&ntoskrnl_calllock);
 3646         if (IsListEmpty(&ntoskrnl_calllist)) {
 3647                 mtx_unlock_spin(&ntoskrnl_calllock);
 3648 #ifdef NTOSKRNL_DEBUG_TIMERS
 3649                 ntoskrnl_show_timers();
 3650 #endif
 3651                 panic("out of timers!");
 3652         }
 3653         l = RemoveHeadList(&ntoskrnl_calllist);
 3654         mtx_unlock_spin(&ntoskrnl_calllock);
 3655 
 3656         e = CONTAINING_RECORD(l, callout_entry, ce_list);
 3657         c = &e->ce_callout;
 3658 
 3659         timer->k_callout = c;
 3660 
 3661         callout_init(c, CALLOUT_MPSAFE);
 3662         callout_reset(c, ticks, ntoskrnl_timercall, timer);
 3663 
 3664         return;
 3665 }
 3666 
 3667 static void
 3668 ntoskrnl_remove_timer(timer)
 3669         ktimer                  *timer;
 3670 {
 3671         callout_entry           *e;
 3672 
 3673         e = (callout_entry *)timer->k_callout;
 3674         callout_stop(timer->k_callout);
 3675 
 3676         mtx_lock_spin(&ntoskrnl_calllock);
 3677         InsertHeadList((&ntoskrnl_calllist), (&e->ce_list));
 3678         mtx_unlock_spin(&ntoskrnl_calllock);
 3679 
 3680         return;
 3681 }
 3682 
 3683 void
 3684 KeInitializeTimer(timer)
 3685         ktimer                  *timer;
 3686 {
 3687         if (timer == NULL)
 3688                 return;
 3689 
 3690         KeInitializeTimerEx(timer,  EVENT_TYPE_NOTIFY);
 3691 
 3692         return;
 3693 }
 3694 
 3695 void
 3696 KeInitializeTimerEx(timer, type)
 3697         ktimer                  *timer;
 3698         uint32_t                type;
 3699 {
 3700         if (timer == NULL)
 3701                 return;
 3702 
 3703         bzero((char *)timer, sizeof(ktimer));
 3704         InitializeListHead((&timer->k_header.dh_waitlisthead));
 3705         timer->k_header.dh_sigstate = FALSE;
 3706         timer->k_header.dh_inserted = FALSE;
 3707         if (type == EVENT_TYPE_NOTIFY)
 3708                 timer->k_header.dh_type = DISP_TYPE_NOTIFICATION_TIMER;
 3709         else
 3710                 timer->k_header.dh_type = DISP_TYPE_SYNCHRONIZATION_TIMER;
 3711         timer->k_header.dh_size = sizeof(ktimer) / sizeof(uint32_t);
 3712 
 3713         return;
 3714 }
 3715 
 3716 /*
 3717  * DPC subsystem. A Windows Defered Procedure Call has the following
 3718  * properties:
 3719  * - It runs at DISPATCH_LEVEL.
 3720  * - It can have one of 3 importance values that control when it
 3721  *   runs relative to other DPCs in the queue.
 3722  * - On SMP systems, it can be set to run on a specific processor.
 3723  * In order to satisfy the last property, we create a DPC thread for
 3724  * each CPU in the system and bind it to that CPU. Each thread
 3725  * maintains three queues with different importance levels, which
 3726  * will be processed in order from lowest to highest.
 3727  *
 3728  * In Windows, interrupt handlers run as DPCs. (Not to be confused
 3729  * with ISRs, which run in interrupt context and can preempt DPCs.)
 3730  * ISRs are given the highest importance so that they'll take
 3731  * precedence over timers and other things.
 3732  */
 3733 
 3734 static void
 3735 ntoskrnl_dpc_thread(arg)
 3736         void                    *arg;
 3737 {
 3738         kdpc_queue              *kq;
 3739         kdpc                    *d;
 3740         list_entry              *l;
 3741         uint8_t                 irql;
 3742 
 3743         kq = arg;
 3744 
 3745         InitializeListHead(&kq->kq_disp);
 3746         kq->kq_td = curthread;
 3747         kq->kq_exit = 0;
 3748         kq->kq_running = FALSE;
 3749         KeInitializeSpinLock(&kq->kq_lock);
 3750         KeInitializeEvent(&kq->kq_proc, EVENT_TYPE_SYNC, FALSE);
 3751         KeInitializeEvent(&kq->kq_done, EVENT_TYPE_SYNC, FALSE);
 3752 
 3753         /*
 3754          * Elevate our priority. DPCs are used to run interrupt
 3755          * handlers, and they should trigger as soon as possible
 3756          * once scheduled by an ISR.
 3757          */
 3758 
 3759         mtx_lock_spin(&sched_lock);
 3760 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3761 #if __FreeBSD_version >= 502102
 3762         sched_bind(curthread, kq->kq_cpu);
 3763 #endif
 3764 #endif
 3765         sched_prio(curthread, PRI_MIN_KERN);
 3766 #if __FreeBSD_version < 600000
 3767         curthread->td_base_pri = PRI_MIN_KERN;
 3768 #endif
 3769         mtx_unlock_spin(&sched_lock);
 3770 
 3771         while (1) {
 3772                 KeWaitForSingleObject(&kq->kq_proc, 0, 0, TRUE, NULL);
 3773 
 3774                 KeAcquireSpinLock(&kq->kq_lock, &irql);
 3775 
 3776                 if (kq->kq_exit) {
 3777                         KeReleaseSpinLock(&kq->kq_lock, irql);
 3778                         break;
 3779                 }
 3780 
 3781                 kq->kq_running = TRUE;
 3782 
 3783                 while (!IsListEmpty(&kq->kq_disp)) {
 3784                         l = RemoveHeadList((&kq->kq_disp));
 3785                         d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3786                         InitializeListHead((&d->k_dpclistentry));
 3787                         KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 3788                         MSCALL4(d->k_deferedfunc, d, d->k_deferredctx,
 3789                             d->k_sysarg1, d->k_sysarg2);
 3790                         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3791                 }
 3792 
 3793                 kq->kq_running = FALSE;
 3794 
 3795                 KeReleaseSpinLock(&kq->kq_lock, irql);
 3796 
 3797                 KeSetEvent(&kq->kq_done, IO_NO_INCREMENT, FALSE);
 3798         }
 3799 
 3800 #if __FreeBSD_version < 502113
 3801         mtx_lock(&Giant);
 3802 #endif
 3803         kthread_exit(0);
 3804         return; /* notreached */
 3805 }
 3806 
 3807 static void
 3808 ntoskrnl_destroy_dpc_threads(void)
 3809 {
 3810         kdpc_queue              *kq;
 3811         kdpc                    dpc;
 3812         int                     i;
 3813 
 3814         kq = kq_queues;
 3815 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3816         for (i = 0; i < mp_ncpus; i++) {
 3817 #else
 3818         for (i = 0; i < 1; i++) {
 3819 #endif
 3820                 kq += i;
 3821 
 3822                 kq->kq_exit = 1;
 3823                 KeInitializeDpc(&dpc, NULL, NULL);
 3824                 KeSetTargetProcessorDpc(&dpc, i);
 3825                 KeInsertQueueDpc(&dpc, NULL, NULL);
 3826                 tsleep(kq->kq_td->td_proc, PWAIT, "dpcw", 0);
 3827         }
 3828 
 3829         return;
 3830 }
 3831 
 3832 static uint8_t
 3833 ntoskrnl_insert_dpc(head, dpc)
 3834         list_entry              *head;
 3835         kdpc                    *dpc;
 3836 {
 3837         list_entry              *l;
 3838         kdpc                    *d;
 3839 
 3840         l = head->nle_flink;
 3841         while (l != head) {
 3842                 d = CONTAINING_RECORD(l, kdpc, k_dpclistentry);
 3843                 if (d == dpc)
 3844                         return(FALSE);
 3845                 l = l->nle_flink;
 3846         }
 3847 
 3848         if (dpc->k_importance == KDPC_IMPORTANCE_LOW)
 3849                 InsertTailList((head), (&dpc->k_dpclistentry));
 3850         else
 3851                 InsertHeadList((head), (&dpc->k_dpclistentry));
 3852 
 3853         return (TRUE);
 3854 }
 3855 
 3856 void
 3857 KeInitializeDpc(dpc, dpcfunc, dpcctx)
 3858         kdpc                    *dpc;
 3859         void                    *dpcfunc;
 3860         void                    *dpcctx;
 3861 {
 3862 
 3863         if (dpc == NULL)
 3864                 return;
 3865 
 3866         dpc->k_deferedfunc = dpcfunc;
 3867         dpc->k_deferredctx = dpcctx;
 3868         dpc->k_num = KDPC_CPU_DEFAULT;
 3869         dpc->k_importance = KDPC_IMPORTANCE_MEDIUM;
 3870         InitializeListHead((&dpc->k_dpclistentry));
 3871 
 3872         return;
 3873 }
 3874 
 3875 uint8_t
 3876 KeInsertQueueDpc(dpc, sysarg1, sysarg2)
 3877         kdpc                    *dpc;
 3878         void                    *sysarg1;
 3879         void                    *sysarg2;
 3880 {
 3881         kdpc_queue              *kq;
 3882         uint8_t                 r;
 3883         uint8_t                 irql;
 3884 
 3885         if (dpc == NULL)
 3886                 return(FALSE);
 3887 
 3888         kq = kq_queues;
 3889 
 3890 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3891         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 3892 
 3893         /*
 3894          * By default, the DPC is queued to run on the same CPU
 3895          * that scheduled it.
 3896          */
 3897 
 3898         if (dpc->k_num == KDPC_CPU_DEFAULT)
 3899                 kq += curthread->td_oncpu;
 3900         else
 3901                 kq += dpc->k_num;
 3902         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3903 #else
 3904         KeAcquireSpinLock(&kq->kq_lock, &irql);
 3905 #endif
 3906 
 3907         r = ntoskrnl_insert_dpc(&kq->kq_disp, dpc);
 3908         if (r == TRUE) {
 3909                 dpc->k_sysarg1 = sysarg1;
 3910                 dpc->k_sysarg2 = sysarg2;
 3911         }
 3912         KeReleaseSpinLock(&kq->kq_lock, irql);
 3913 
 3914         if (r == FALSE)
 3915                 return(r);
 3916 
 3917         KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 3918 
 3919         return(r);
 3920 }
 3921 
 3922 uint8_t
 3923 KeRemoveQueueDpc(dpc)
 3924         kdpc                    *dpc;
 3925 {
 3926         kdpc_queue              *kq;
 3927         uint8_t                 irql;
 3928 
 3929         if (dpc == NULL)
 3930                 return(FALSE);
 3931 
 3932 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3933         KeRaiseIrql(DISPATCH_LEVEL, &irql);
 3934 
 3935         kq = kq_queues + dpc->k_num;
 3936 
 3937         KeAcquireSpinLockAtDpcLevel(&kq->kq_lock);
 3938 #else
 3939         kq = kq_queues;
 3940         KeAcquireSpinLock(&kq->kq_lock, &irql);
 3941 #endif
 3942 
 3943         if (dpc->k_dpclistentry.nle_flink == &dpc->k_dpclistentry) {
 3944                 KeReleaseSpinLockFromDpcLevel(&kq->kq_lock);
 3945                 KeLowerIrql(irql);
 3946                 return(FALSE);
 3947         }
 3948 
 3949         RemoveEntryList((&dpc->k_dpclistentry));
 3950         InitializeListHead((&dpc->k_dpclistentry));
 3951 
 3952         KeReleaseSpinLock(&kq->kq_lock, irql);
 3953 
 3954         return(TRUE);
 3955 }
 3956 
 3957 void
 3958 KeSetImportanceDpc(dpc, imp)
 3959         kdpc                    *dpc;
 3960         uint32_t                imp;
 3961 {
 3962         if (imp != KDPC_IMPORTANCE_LOW &&
 3963             imp != KDPC_IMPORTANCE_MEDIUM &&
 3964             imp != KDPC_IMPORTANCE_HIGH)
 3965                 return;
 3966 
 3967         dpc->k_importance = (uint8_t)imp;
 3968         return;
 3969 }
 3970 
 3971 void
 3972 KeSetTargetProcessorDpc(dpc, cpu)
 3973         kdpc                    *dpc;
 3974         uint8_t                 cpu;
 3975 {
 3976         if (cpu > mp_ncpus)
 3977                 return;
 3978 
 3979         dpc->k_num = cpu;
 3980         return;
 3981 }
 3982 
 3983 void
 3984 KeFlushQueuedDpcs(void)
 3985 {
 3986         kdpc_queue              *kq;
 3987         int                     i;
 3988 
 3989         /*
 3990          * Poke each DPC queue and wait
 3991          * for them to drain.
 3992          */
 3993 
 3994 #ifdef NTOSKRNL_MULTIPLE_DPCS
 3995         for (i = 0; i < mp_ncpus; i++) {
 3996 #else
 3997         for (i = 0; i < 1; i++) {
 3998 #endif
 3999                 kq = kq_queues + i;
 4000                 KeSetEvent(&kq->kq_proc, IO_NO_INCREMENT, FALSE);
 4001                 KeWaitForSingleObject(&kq->kq_done, 0, 0, TRUE, NULL);
 4002         }
 4003 
 4004         return;
 4005 }
 4006 
 4007 uint32_t
 4008 KeGetCurrentProcessorNumber(void)
 4009 {
 4010         return((uint32_t)curthread->td_oncpu);
 4011 }
 4012 
 4013 uint8_t
 4014 KeSetTimerEx(timer, duetime, period, dpc)
 4015         ktimer                  *timer;
 4016         int64_t                 duetime;
 4017         uint32_t                period;
 4018         kdpc                    *dpc;
 4019 {
 4020         struct timeval          tv;
 4021         uint64_t                curtime;
 4022         uint8_t                 pending;
 4023 
 4024         if (timer == NULL)
 4025                 return(FALSE);
 4026 
 4027         mtx_lock(&ntoskrnl_dispatchlock);
 4028 
 4029         if (timer->k_header.dh_inserted == TRUE) {
 4030                 ntoskrnl_remove_timer(timer);
 4031 #ifdef NTOSKRNL_DEBUG_TIMERS
 4032                 ntoskrnl_timer_cancels++;
 4033 #endif
 4034                 timer->k_header.dh_inserted = FALSE;
 4035                 pending = TRUE;
 4036         } else
 4037                 pending = FALSE;
 4038 
 4039         timer->k_duetime = duetime;
 4040         timer->k_period = period;
 4041         timer->k_header.dh_sigstate = FALSE;
 4042         timer->k_dpc = dpc;
 4043 
 4044         if (duetime < 0) {
 4045                 tv.tv_sec = - (duetime) / 10000000;
 4046                 tv.tv_usec = (- (duetime) / 10) -
 4047                     (tv.tv_sec * 1000000);
 4048         } else {
 4049                 ntoskrnl_time(&curtime);
 4050                 if (duetime < curtime)
 4051                         tv.tv_sec = tv.tv_usec = 0;
 4052                 else {
 4053                         tv.tv_sec = ((duetime) - curtime) / 10000000;
 4054                         tv.tv_usec = ((duetime) - curtime) / 10 -
 4055                             (tv.tv_sec * 1000000);
 4056                 }
 4057         }
 4058 
 4059         timer->k_header.dh_inserted = TRUE;
 4060         ntoskrnl_insert_timer(timer, tvtohz(&tv));
 4061 #ifdef NTOSKRNL_DEBUG_TIMERS
 4062         ntoskrnl_timer_sets++;
 4063 #endif
 4064 
 4065         mtx_unlock(&ntoskrnl_dispatchlock);
 4066 
 4067         return(pending);
 4068 }
 4069 
 4070 uint8_t
 4071 KeSetTimer(timer, duetime, dpc)
 4072         ktimer                  *timer;
 4073         int64_t                 duetime;
 4074         kdpc                    *dpc;
 4075 {
 4076         return (KeSetTimerEx(timer, duetime, 0, dpc));
 4077 }
 4078 
 4079 /*
 4080  * The Windows DDK documentation seems to say that cancelling
 4081  * a timer that has a DPC will result in the DPC also being
 4082  * cancelled, but this isn't really the case.
 4083  */
 4084 
 4085 uint8_t
 4086 KeCancelTimer(timer)
 4087         ktimer                  *timer;
 4088 {
 4089         uint8_t                 pending;
 4090 
 4091         if (timer == NULL)
 4092                 return(FALSE);
 4093 
 4094         mtx_lock(&ntoskrnl_dispatchlock);
 4095 
 4096         pending = timer->k_header.dh_inserted;
 4097 
 4098         if (timer->k_header.dh_inserted == TRUE) {
 4099                 timer->k_header.dh_inserted = FALSE;
 4100                 ntoskrnl_remove_timer(timer);
 4101 #ifdef NTOSKRNL_DEBUG_TIMERS
 4102                 ntoskrnl_timer_cancels++;
 4103 #endif
 4104         }
 4105 
 4106         mtx_unlock(&ntoskrnl_dispatchlock);
 4107 
 4108         return(pending);
 4109 }
 4110 
 4111 uint8_t
 4112 KeReadStateTimer(timer)
 4113         ktimer                  *timer;
 4114 {
 4115         return(timer->k_header.dh_sigstate);
 4116 }
 4117 
 4118 static void
 4119 dummy()
 4120 {
 4121         printf ("ntoskrnl dummy called...\n");
 4122         return;
 4123 }
 4124 
 4125 
 4126 image_patch_table ntoskrnl_functbl[] = {
 4127         IMPORT_SFUNC(RtlZeroMemory, 2),
 4128         IMPORT_SFUNC(RtlCopyMemory, 3),
 4129         IMPORT_SFUNC(RtlCompareMemory, 3),
 4130         IMPORT_SFUNC(RtlEqualUnicodeString, 3),
 4131         IMPORT_SFUNC(RtlCopyUnicodeString, 2),
 4132         IMPORT_SFUNC(RtlUnicodeStringToAnsiString, 3),
 4133         IMPORT_SFUNC(RtlAnsiStringToUnicodeString, 3),
 4134         IMPORT_SFUNC(RtlInitAnsiString, 2),
 4135         IMPORT_SFUNC_MAP(RtlInitString, RtlInitAnsiString, 2),
 4136         IMPORT_SFUNC(RtlInitUnicodeString, 2),
 4137         IMPORT_SFUNC(RtlFreeAnsiString, 1),
 4138         IMPORT_SFUNC(RtlFreeUnicodeString, 1),
 4139         IMPORT_SFUNC(RtlUnicodeStringToInteger, 3),
 4140         IMPORT_CFUNC(sprintf, 0),
 4141         IMPORT_CFUNC(vsprintf, 0),
 4142         IMPORT_CFUNC_MAP(_snprintf, snprintf, 0),
 4143         IMPORT_CFUNC_MAP(_vsnprintf, vsnprintf, 0),
 4144         IMPORT_CFUNC(DbgPrint, 0),
 4145         IMPORT_SFUNC(DbgBreakPoint, 0),
 4146         IMPORT_CFUNC(strncmp, 0),
 4147         IMPORT_CFUNC(strcmp, 0),
 4148         IMPORT_CFUNC(strncpy, 0),
 4149         IMPORT_CFUNC(strcpy, 0),
 4150         IMPORT_CFUNC(strlen, 0),
 4151         IMPORT_CFUNC_MAP(toupper, ntoskrnl_toupper, 0),
 4152         IMPORT_CFUNC_MAP(tolower, ntoskrnl_tolower, 0),
 4153         IMPORT_CFUNC_MAP(strstr, ntoskrnl_strstr, 0),
 4154         IMPORT_CFUNC_MAP(strchr, index, 0),
 4155         IMPORT_CFUNC(memcpy, 0),
 4156         IMPORT_CFUNC_MAP(memmove, ntoskrnl_memmove, 0),
 4157         IMPORT_CFUNC_MAP(memset, ntoskrnl_memset, 0),
 4158         IMPORT_SFUNC(IoAllocateDriverObjectExtension, 4),
 4159         IMPORT_SFUNC(IoGetDriverObjectExtension, 2),
 4160         IMPORT_FFUNC(IofCallDriver, 2),
 4161         IMPORT_FFUNC(IofCompleteRequest, 2),
 4162         IMPORT_SFUNC(IoAcquireCancelSpinLock, 1),
 4163         IMPORT_SFUNC(IoReleaseCancelSpinLock, 1),
 4164         IMPORT_SFUNC(IoCancelIrp, 1),
 4165         IMPORT_SFUNC(IoConnectInterrupt, 11),
 4166         IMPORT_SFUNC(IoDisconnectInterrupt, 1),
 4167         IMPORT_SFUNC(IoCreateDevice, 7),
 4168         IMPORT_SFUNC(IoDeleteDevice, 1),
 4169         IMPORT_SFUNC(IoGetAttachedDevice, 1),
 4170         IMPORT_SFUNC(IoAttachDeviceToDeviceStack, 2),
 4171         IMPORT_SFUNC(IoDetachDevice, 1),
 4172         IMPORT_SFUNC(IoBuildSynchronousFsdRequest, 7),
 4173         IMPORT_SFUNC(IoBuildAsynchronousFsdRequest, 6),
 4174         IMPORT_SFUNC(IoBuildDeviceIoControlRequest, 9),
 4175         IMPORT_SFUNC(IoAllocateIrp, 2),
 4176         IMPORT_SFUNC(IoReuseIrp, 2),
 4177         IMPORT_SFUNC(IoMakeAssociatedIrp, 2),
 4178         IMPORT_SFUNC(IoFreeIrp, 1),
 4179         IMPORT_SFUNC(IoInitializeIrp, 3),
 4180         IMPORT_SFUNC(KeAcquireInterruptSpinLock, 1),
 4181         IMPORT_SFUNC(KeReleaseInterruptSpinLock, 2),
 4182         IMPORT_SFUNC(KeSynchronizeExecution, 3),
 4183         IMPORT_SFUNC(KeWaitForSingleObject, 5),
 4184         IMPORT_SFUNC(KeWaitForMultipleObjects, 8),
 4185         IMPORT_SFUNC(_allmul, 4),
 4186         IMPORT_SFUNC(_alldiv, 4),
 4187         IMPORT_SFUNC(_allrem, 4),
 4188         IMPORT_RFUNC(_allshr, 0),
 4189         IMPORT_RFUNC(_allshl, 0),
 4190         IMPORT_SFUNC(_aullmul, 4),
 4191         IMPORT_SFUNC(_aulldiv, 4),
 4192         IMPORT_SFUNC(_aullrem, 4),
 4193         IMPORT_RFUNC(_aullshr, 0),
 4194         IMPORT_RFUNC(_aullshl, 0),
 4195         IMPORT_CFUNC(atoi, 0),
 4196         IMPORT_CFUNC(atol, 0),
 4197         IMPORT_CFUNC(rand, 0),
 4198         IMPORT_CFUNC(srand, 0),
 4199         IMPORT_SFUNC(WRITE_REGISTER_USHORT, 2),
 4200         IMPORT_SFUNC(READ_REGISTER_USHORT, 1),
 4201         IMPORT_SFUNC(WRITE_REGISTER_ULONG, 2),
 4202         IMPORT_SFUNC(READ_REGISTER_ULONG, 1),
 4203         IMPORT_SFUNC(READ_REGISTER_UCHAR, 1),
 4204         IMPORT_SFUNC(WRITE_REGISTER_UCHAR, 2),
 4205         IMPORT_SFUNC(ExInitializePagedLookasideList, 7),
 4206         IMPORT_SFUNC(ExDeletePagedLookasideList, 1),
 4207         IMPORT_SFUNC(ExInitializeNPagedLookasideList, 7),
 4208         IMPORT_SFUNC(ExDeleteNPagedLookasideList, 1),
 4209         IMPORT_FFUNC(InterlockedPopEntrySList, 1),
 4210         IMPORT_FFUNC(InterlockedPushEntrySList, 2),
 4211         IMPORT_SFUNC(ExQueryDepthSList, 1),
 4212         IMPORT_FFUNC_MAP(ExpInterlockedPopEntrySList,
 4213                 InterlockedPopEntrySList, 1),
 4214         IMPORT_FFUNC_MAP(ExpInterlockedPushEntrySList,
 4215                 InterlockedPushEntrySList, 2),
 4216         IMPORT_FFUNC(ExInterlockedPopEntrySList, 2),
 4217         IMPORT_FFUNC(ExInterlockedPushEntrySList, 3),
 4218         IMPORT_SFUNC(ExAllocatePoolWithTag, 3),
 4219         IMPORT_SFUNC(ExFreePool, 1),
 4220 #ifdef __i386__
 4221         IMPORT_FFUNC(KefAcquireSpinLockAtDpcLevel, 1),
 4222         IMPORT_FFUNC(KefReleaseSpinLockFromDpcLevel,1),
 4223         IMPORT_FFUNC(KeAcquireSpinLockRaiseToDpc, 1),
 4224 #else
 4225         /*
 4226          * For AMD64, we can get away with just mapping
 4227          * KeAcquireSpinLockRaiseToDpc() directly to KfAcquireSpinLock()
 4228          * because the calling conventions end up being the same.
 4229          * On i386, we have to be careful because KfAcquireSpinLock()
 4230          * is _fastcall but KeAcquireSpinLockRaiseToDpc() isn't.
 4231          */
 4232         IMPORT_SFUNC(KeAcquireSpinLockAtDpcLevel, 1),
 4233         IMPORT_SFUNC(KeReleaseSpinLockFromDpcLevel, 1),
 4234         IMPORT_SFUNC_MAP(KeAcquireSpinLockRaiseToDpc, KfAcquireSpinLock, 1),
 4235 #endif
 4236         IMPORT_SFUNC_MAP(KeReleaseSpinLock, KfReleaseSpinLock, 1),
 4237         IMPORT_FFUNC(InterlockedIncrement, 1),
 4238         IMPORT_FFUNC(InterlockedDecrement, 1),
 4239         IMPORT_FFUNC(InterlockedExchange, 2),
 4240         IMPORT_FFUNC(ExInterlockedAddLargeStatistic, 2),
 4241         IMPORT_SFUNC(IoAllocateMdl, 5),
 4242         IMPORT_SFUNC(IoFreeMdl, 1),
 4243         IMPORT_SFUNC(MmSizeOfMdl, 1),
 4244         IMPORT_SFUNC(MmMapLockedPages, 2),
 4245         IMPORT_SFUNC(MmMapLockedPagesSpecifyCache, 6),
 4246         IMPORT_SFUNC(MmUnmapLockedPages, 2),
 4247         IMPORT_SFUNC(MmBuildMdlForNonPagedPool, 1),
 4248         IMPORT_SFUNC(MmIsAddressValid, 1),
 4249         IMPORT_SFUNC(MmMapIoSpace, 3 + 1),
 4250         IMPORT_SFUNC(MmUnmapIoSpace, 2),
 4251         IMPORT_SFUNC(KeInitializeSpinLock, 1),
 4252         IMPORT_SFUNC(IoIsWdmVersionAvailable, 2),
 4253         IMPORT_SFUNC(IoGetDeviceProperty, 5),
 4254         IMPORT_SFUNC(IoAllocateWorkItem, 1),
 4255         IMPORT_SFUNC(IoFreeWorkItem, 1),
 4256         IMPORT_SFUNC(IoQueueWorkItem, 4),
 4257         IMPORT_SFUNC(ExQueueWorkItem, 2),
 4258         IMPORT_SFUNC(ntoskrnl_workitem, 2),
 4259         IMPORT_SFUNC(KeInitializeMutex, 2),
 4260         IMPORT_SFUNC(KeReleaseMutex, 2),
 4261         IMPORT_SFUNC(KeReadStateMutex, 1),
 4262         IMPORT_SFUNC(KeInitializeEvent, 3),
 4263         IMPORT_SFUNC(KeSetEvent, 3),
 4264         IMPORT_SFUNC(KeResetEvent, 1),
 4265         IMPORT_SFUNC(KeClearEvent, 1),
 4266         IMPORT_SFUNC(KeReadStateEvent, 1),
 4267         IMPORT_SFUNC(KeInitializeTimer, 1),
 4268         IMPORT_SFUNC(KeInitializeTimerEx, 2),
 4269         IMPORT_SFUNC(KeSetTimer, 3),
 4270         IMPORT_SFUNC(KeSetTimerEx, 4),
 4271         IMPORT_SFUNC(KeCancelTimer, 1),
 4272         IMPORT_SFUNC(KeReadStateTimer, 1),
 4273         IMPORT_SFUNC(KeInitializeDpc, 3),
 4274         IMPORT_SFUNC(KeInsertQueueDpc, 3),
 4275         IMPORT_SFUNC(KeRemoveQueueDpc, 1),
 4276         IMPORT_SFUNC(KeSetImportanceDpc, 2),
 4277         IMPORT_SFUNC(KeSetTargetProcessorDpc, 2),
 4278         IMPORT_SFUNC(KeFlushQueuedDpcs, 0),
 4279         IMPORT_SFUNC(KeGetCurrentProcessorNumber, 1),
 4280         IMPORT_SFUNC(ObReferenceObjectByHandle, 6),
 4281         IMPORT_FFUNC(ObfDereferenceObject, 1),
 4282         IMPORT_SFUNC(ZwClose, 1),
 4283         IMPORT_SFUNC(PsCreateSystemThread, 7),
 4284         IMPORT_SFUNC(PsTerminateSystemThread, 1),
 4285         IMPORT_SFUNC(IoWMIRegistrationControl, 2),
 4286         IMPORT_SFUNC(WmiQueryTraceInformation, 5),
 4287         IMPORT_CFUNC(WmiTraceMessage, 0),
 4288 
 4289         /*
 4290          * This last entry is a catch-all for any function we haven't
 4291          * implemented yet. The PE import list patching routine will
 4292          * use it for any function that doesn't have an explicit match
 4293          * in this table.
 4294          */
 4295 
 4296         { NULL, (FUNC)dummy, NULL, 0, WINDRV_WRAP_STDCALL },
 4297 
 4298         /* End of list. */
 4299 
 4300         { NULL, NULL, NULL }
 4301 };

Cache object: 80b35bec1b39e990361d43f14a0566d7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.