The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1992 Terrence R. Lambert.
    3  * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * William Jolitz.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the University of
   20  *      California, Berkeley and its contributors.
   21  * 4. Neither the name of the University nor the names of its contributors
   22  *    may be used to endorse or promote products derived from this software
   23  *    without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  *      from: @(#)machdep.c     7.4 (Berkeley) 6/3/91
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __FBSDID("$FreeBSD: releng/11.1/sys/i386/i386/machdep.c 338607 2018-09-12 05:08:49Z gordon $");
   42 
   43 #include "opt_apic.h"
   44 #include "opt_atpic.h"
   45 #include "opt_compat.h"
   46 #include "opt_cpu.h"
   47 #include "opt_ddb.h"
   48 #include "opt_inet.h"
   49 #include "opt_isa.h"
   50 #include "opt_kstack_pages.h"
   51 #include "opt_maxmem.h"
   52 #include "opt_mp_watchdog.h"
   53 #include "opt_perfmon.h"
   54 #include "opt_platform.h"
   55 #include "opt_xbox.h"
   56 
   57 #include <sys/param.h>
   58 #include <sys/proc.h>
   59 #include <sys/systm.h>
   60 #include <sys/bio.h>
   61 #include <sys/buf.h>
   62 #include <sys/bus.h>
   63 #include <sys/callout.h>
   64 #include <sys/cons.h>
   65 #include <sys/cpu.h>
   66 #include <sys/eventhandler.h>
   67 #include <sys/exec.h>
   68 #include <sys/imgact.h>
   69 #include <sys/kdb.h>
   70 #include <sys/kernel.h>
   71 #include <sys/ktr.h>
   72 #include <sys/linker.h>
   73 #include <sys/lock.h>
   74 #include <sys/malloc.h>
   75 #include <sys/memrange.h>
   76 #include <sys/msgbuf.h>
   77 #include <sys/mutex.h>
   78 #include <sys/pcpu.h>
   79 #include <sys/ptrace.h>
   80 #include <sys/reboot.h>
   81 #include <sys/rwlock.h>
   82 #include <sys/sched.h>
   83 #include <sys/signalvar.h>
   84 #ifdef SMP
   85 #include <sys/smp.h>
   86 #endif
   87 #include <sys/syscallsubr.h>
   88 #include <sys/sysctl.h>
   89 #include <sys/sysent.h>
   90 #include <sys/sysproto.h>
   91 #include <sys/ucontext.h>
   92 #include <sys/vmmeter.h>
   93 
   94 #include <vm/vm.h>
   95 #include <vm/vm_extern.h>
   96 #include <vm/vm_kern.h>
   97 #include <vm/vm_page.h>
   98 #include <vm/vm_map.h>
   99 #include <vm/vm_object.h>
  100 #include <vm/vm_pager.h>
  101 #include <vm/vm_param.h>
  102 
  103 #ifdef DDB
  104 #ifndef KDB
  105 #error KDB must be enabled in order for DDB to work!
  106 #endif
  107 #include <ddb/ddb.h>
  108 #include <ddb/db_sym.h>
  109 #endif
  110 
  111 #ifdef PC98
  112 #include <pc98/pc98/pc98_machdep.h>
  113 #else
  114 #include <isa/rtc.h>
  115 #endif
  116 
  117 #include <net/netisr.h>
  118 
  119 #include <machine/bootinfo.h>
  120 #include <machine/clock.h>
  121 #include <machine/cpu.h>
  122 #include <machine/cputypes.h>
  123 #include <machine/intr_machdep.h>
  124 #include <x86/mca.h>
  125 #include <machine/md_var.h>
  126 #include <machine/metadata.h>
  127 #include <machine/mp_watchdog.h>
  128 #include <machine/pc/bios.h>
  129 #include <machine/pcb.h>
  130 #include <machine/pcb_ext.h>
  131 #include <machine/proc.h>
  132 #include <machine/reg.h>
  133 #include <machine/sigframe.h>
  134 #include <machine/specialreg.h>
  135 #include <machine/vm86.h>
  136 #include <x86/init.h>
  137 #ifdef PERFMON
  138 #include <machine/perfmon.h>
  139 #endif
  140 #ifdef SMP
  141 #include <machine/smp.h>
  142 #endif
  143 #ifdef FDT
  144 #include <x86/fdt.h>
  145 #endif
  146 
  147 #ifdef DEV_APIC
  148 #include <x86/apicvar.h>
  149 #endif
  150 
  151 #ifdef DEV_ISA
  152 #include <x86/isa/icu.h>
  153 #endif
  154 
  155 #ifdef XBOX
  156 #include <machine/xbox.h>
  157 
  158 int arch_i386_is_xbox = 0;
  159 uint32_t arch_i386_xbox_memsize = 0;
  160 #endif
  161 
  162 /* Sanity check for __curthread() */
  163 CTASSERT(offsetof(struct pcpu, pc_curthread) == 0);
  164 
  165 extern register_t init386(int first);
  166 extern void dblfault_handler(void);
  167 
  168 static void cpu_startup(void *);
  169 static void fpstate_drop(struct thread *td);
  170 static void get_fpcontext(struct thread *td, mcontext_t *mcp,
  171     char *xfpusave, size_t xfpusave_len);
  172 static int  set_fpcontext(struct thread *td, mcontext_t *mcp,
  173     char *xfpustate, size_t xfpustate_len);
  174 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
  175 
  176 /* Intel ICH registers */
  177 #define ICH_PMBASE      0x400
  178 #define ICH_SMI_EN      ICH_PMBASE + 0x30
  179 
  180 int     _udatasel, _ucodesel;
  181 u_int   basemem;
  182 
  183 #ifdef PC98
  184 int     need_pre_dma_flush;     /* If 1, use wbinvd befor DMA transfer. */
  185 int     need_post_dma_flush;    /* If 1, use invd after DMA transfer. */
  186 
  187 static int      ispc98 = 1;
  188 SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, "");
  189 #endif
  190 
  191 int cold = 1;
  192 
  193 #ifdef COMPAT_43
  194 static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
  195 #endif
  196 #ifdef COMPAT_FREEBSD4
  197 static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask);
  198 #endif
  199 
  200 long Maxmem = 0;
  201 long realmem = 0;
  202 
  203 #ifdef PAE
  204 FEATURE(pae, "Physical Address Extensions");
  205 #endif
  206 
  207 /*
  208  * The number of PHYSMAP entries must be one less than the number of
  209  * PHYSSEG entries because the PHYSMAP entry that spans the largest
  210  * physical address that is accessible by ISA DMA is split into two
  211  * PHYSSEG entries.
  212  */
  213 #define PHYSMAP_SIZE    (2 * (VM_PHYSSEG_MAX - 1))
  214 
  215 vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
  216 vm_paddr_t dump_avail[PHYSMAP_SIZE + 2];
  217 
  218 /* must be 2 less so 0 0 can signal end of chunks */
  219 #define PHYS_AVAIL_ARRAY_END (nitems(phys_avail) - 2)
  220 #define DUMP_AVAIL_ARRAY_END (nitems(dump_avail) - 2)
  221 
  222 struct kva_md_info kmi;
  223 
  224 static struct trapframe proc0_tf;
  225 struct pcpu __pcpu[MAXCPU];
  226 
  227 struct mtx icu_lock;
  228 
  229 struct mem_range_softc mem_range_softc;
  230 
  231  /* Default init_ops implementation. */
  232  struct init_ops init_ops = {
  233         .early_clock_source_init =      i8254_init,
  234         .early_delay =                  i8254_delay,
  235 #ifdef DEV_APIC
  236         .msi_init =                     msi_init,
  237 #endif
  238  };
  239 
  240 static void
  241 cpu_startup(dummy)
  242         void *dummy;
  243 {
  244         uintmax_t memsize;
  245         char *sysenv;
  246 
  247 #ifndef PC98
  248         /*
  249          * On MacBooks, we need to disallow the legacy USB circuit to
  250          * generate an SMI# because this can cause several problems,
  251          * namely: incorrect CPU frequency detection and failure to
  252          * start the APs.
  253          * We do this by disabling a bit in the SMI_EN (SMI Control and
  254          * Enable register) of the Intel ICH LPC Interface Bridge.
  255          */
  256         sysenv = kern_getenv("smbios.system.product");
  257         if (sysenv != NULL) {
  258                 if (strncmp(sysenv, "MacBook1,1", 10) == 0 ||
  259                     strncmp(sysenv, "MacBook3,1", 10) == 0 ||
  260                     strncmp(sysenv, "MacBook4,1", 10) == 0 ||
  261                     strncmp(sysenv, "MacBookPro1,1", 13) == 0 ||
  262                     strncmp(sysenv, "MacBookPro1,2", 13) == 0 ||
  263                     strncmp(sysenv, "MacBookPro3,1", 13) == 0 ||
  264                     strncmp(sysenv, "MacBookPro4,1", 13) == 0 ||
  265                     strncmp(sysenv, "Macmini1,1", 10) == 0) {
  266                         if (bootverbose)
  267                                 printf("Disabling LEGACY_USB_EN bit on "
  268                                     "Intel ICH.\n");
  269                         outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8);
  270                 }
  271                 freeenv(sysenv);
  272         }
  273 #endif /* !PC98 */
  274 
  275         /*
  276          * Good {morning,afternoon,evening,night}.
  277          */
  278         startrtclock();
  279         printcpuinfo();
  280         panicifcpuunsupported();
  281 #ifdef PERFMON
  282         perfmon_init();
  283 #endif
  284 
  285         /*
  286          * Display physical memory if SMBIOS reports reasonable amount.
  287          */
  288         memsize = 0;
  289         sysenv = kern_getenv("smbios.memory.enabled");
  290         if (sysenv != NULL) {
  291                 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10;
  292                 freeenv(sysenv);
  293         }
  294         if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count))
  295                 memsize = ptoa((uintmax_t)Maxmem);
  296         printf("real memory  = %ju (%ju MB)\n", memsize, memsize >> 20);
  297         realmem = atop(memsize);
  298 
  299         /*
  300          * Display any holes after the first chunk of extended memory.
  301          */
  302         if (bootverbose) {
  303                 int indx;
  304 
  305                 printf("Physical memory chunk(s):\n");
  306                 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
  307                         vm_paddr_t size;
  308 
  309                         size = phys_avail[indx + 1] - phys_avail[indx];
  310                         printf(
  311                             "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
  312                             (uintmax_t)phys_avail[indx],
  313                             (uintmax_t)phys_avail[indx + 1] - 1,
  314                             (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
  315                 }
  316         }
  317 
  318         vm_ksubmap_init(&kmi);
  319 
  320         printf("avail memory = %ju (%ju MB)\n",
  321             ptoa((uintmax_t)vm_cnt.v_free_count),
  322             ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576);
  323 
  324         /*
  325          * Set up buffers, so they can be used to read disk labels.
  326          */
  327         bufinit();
  328         vm_pager_bufferinit();
  329         cpu_setregs();
  330 }
  331 
  332 /*
  333  * Send an interrupt to process.
  334  *
  335  * Stack is set up to allow sigcode stored
  336  * at top to call routine, followed by call
  337  * to sigreturn routine below.  After sigreturn
  338  * resets the signal mask, the stack, and the
  339  * frame pointer, it returns to the user
  340  * specified pc, psl.
  341  */
  342 #ifdef COMPAT_43
  343 static void
  344 osendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
  345 {
  346         struct osigframe sf, *fp;
  347         struct proc *p;
  348         struct thread *td;
  349         struct sigacts *psp;
  350         struct trapframe *regs;
  351         int sig;
  352         int oonstack;
  353 
  354         td = curthread;
  355         p = td->td_proc;
  356         PROC_LOCK_ASSERT(p, MA_OWNED);
  357         sig = ksi->ksi_signo;
  358         psp = p->p_sigacts;
  359         mtx_assert(&psp->ps_mtx, MA_OWNED);
  360         regs = td->td_frame;
  361         oonstack = sigonstack(regs->tf_esp);
  362 
  363         /* Allocate space for the signal handler context. */
  364         if ((td->td_pflags & TDP_ALTSTACK) && !oonstack &&
  365             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  366                 fp = (struct osigframe *)((uintptr_t)td->td_sigstk.ss_sp +
  367                     td->td_sigstk.ss_size - sizeof(struct osigframe));
  368 #if defined(COMPAT_43)
  369                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  370 #endif
  371         } else
  372                 fp = (struct osigframe *)regs->tf_esp - 1;
  373 
  374         /* Build the argument list for the signal handler. */
  375         sf.sf_signum = sig;
  376         sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc;
  377         bzero(&sf.sf_siginfo, sizeof(sf.sf_siginfo));
  378         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  379                 /* Signal handler installed with SA_SIGINFO. */
  380                 sf.sf_arg2 = (register_t)&fp->sf_siginfo;
  381                 sf.sf_siginfo.si_signo = sig;
  382                 sf.sf_siginfo.si_code = ksi->ksi_code;
  383                 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher;
  384                 sf.sf_addr = 0;
  385         } else {
  386                 /* Old FreeBSD-style arguments. */
  387                 sf.sf_arg2 = ksi->ksi_code;
  388                 sf.sf_addr = (register_t)ksi->ksi_addr;
  389                 sf.sf_ahu.sf_handler = catcher;
  390         }
  391         mtx_unlock(&psp->ps_mtx);
  392         PROC_UNLOCK(p);
  393 
  394         /* Save most if not all of trap frame. */
  395         sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax;
  396         sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx;
  397         sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx;
  398         sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx;
  399         sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi;
  400         sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi;
  401         sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs;
  402         sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds;
  403         sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss;
  404         sf.sf_siginfo.si_sc.sc_es = regs->tf_es;
  405         sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs;
  406         sf.sf_siginfo.si_sc.sc_gs = rgs();
  407         sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp;
  408 
  409         /* Build the signal context to be used by osigreturn(). */
  410         sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0;
  411         SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask);
  412         sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp;
  413         sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp;
  414         sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip;
  415         sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags;
  416         sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno;
  417         sf.sf_siginfo.si_sc.sc_err = regs->tf_err;
  418 
  419         /*
  420          * If we're a vm86 process, we want to save the segment registers.
  421          * We also change eflags to be our emulated eflags, not the actual
  422          * eflags.
  423          */
  424         if (regs->tf_eflags & PSL_VM) {
  425                 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */
  426                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
  427                 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  428 
  429                 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs;
  430                 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs;
  431                 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es;
  432                 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds;
  433 
  434                 if (vm86->vm86_has_vme == 0)
  435                         sf.sf_siginfo.si_sc.sc_ps =
  436                             (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
  437                             (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
  438 
  439                 /* See sendsig() for comments. */
  440                 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
  441         }
  442 
  443         /*
  444          * Copy the sigframe out to the user's stack.
  445          */
  446         if (copyout(&sf, fp, sizeof(*fp)) != 0) {
  447 #ifdef DEBUG
  448                 printf("process %ld has trashed its stack\n", (long)p->p_pid);
  449 #endif
  450                 PROC_LOCK(p);
  451                 sigexit(td, SIGILL);
  452         }
  453 
  454         regs->tf_esp = (int)fp;
  455         if (p->p_sysent->sv_sigcode_base != 0) {
  456                 regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
  457                     szosigcode;
  458         } else {
  459                 /* a.out sysentvec does not use shared page */
  460                 regs->tf_eip = p->p_sysent->sv_psstrings - szosigcode;
  461         }
  462         regs->tf_eflags &= ~(PSL_T | PSL_D);
  463         regs->tf_cs = _ucodesel;
  464         regs->tf_ds = _udatasel;
  465         regs->tf_es = _udatasel;
  466         regs->tf_fs = _udatasel;
  467         load_gs(_udatasel);
  468         regs->tf_ss = _udatasel;
  469         PROC_LOCK(p);
  470         mtx_lock(&psp->ps_mtx);
  471 }
  472 #endif /* COMPAT_43 */
  473 
  474 #ifdef COMPAT_FREEBSD4
  475 static void
  476 freebsd4_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
  477 {
  478         struct sigframe4 sf, *sfp;
  479         struct proc *p;
  480         struct thread *td;
  481         struct sigacts *psp;
  482         struct trapframe *regs;
  483         int sig;
  484         int oonstack;
  485 
  486         td = curthread;
  487         p = td->td_proc;
  488         PROC_LOCK_ASSERT(p, MA_OWNED);
  489         sig = ksi->ksi_signo;
  490         psp = p->p_sigacts;
  491         mtx_assert(&psp->ps_mtx, MA_OWNED);
  492         regs = td->td_frame;
  493         oonstack = sigonstack(regs->tf_esp);
  494 
  495         /* Save user context. */
  496         bzero(&sf, sizeof(sf));
  497         sf.sf_uc.uc_sigmask = *mask;
  498         sf.sf_uc.uc_stack = td->td_sigstk;
  499         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
  500             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  501         sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
  502         sf.sf_uc.uc_mcontext.mc_gs = rgs();
  503         bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
  504         bzero(sf.sf_uc.uc_mcontext.mc_fpregs,
  505             sizeof(sf.sf_uc.uc_mcontext.mc_fpregs));
  506         bzero(sf.sf_uc.uc_mcontext.__spare__,
  507             sizeof(sf.sf_uc.uc_mcontext.__spare__));
  508         bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
  509 
  510         /* Allocate space for the signal handler context. */
  511         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
  512             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  513                 sfp = (struct sigframe4 *)((uintptr_t)td->td_sigstk.ss_sp +
  514                     td->td_sigstk.ss_size - sizeof(struct sigframe4));
  515 #if defined(COMPAT_43)
  516                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  517 #endif
  518         } else
  519                 sfp = (struct sigframe4 *)regs->tf_esp - 1;
  520 
  521         /* Build the argument list for the signal handler. */
  522         sf.sf_signum = sig;
  523         sf.sf_ucontext = (register_t)&sfp->sf_uc;
  524         bzero(&sf.sf_si, sizeof(sf.sf_si));
  525         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  526                 /* Signal handler installed with SA_SIGINFO. */
  527                 sf.sf_siginfo = (register_t)&sfp->sf_si;
  528                 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
  529 
  530                 /* Fill in POSIX parts */
  531                 sf.sf_si.si_signo = sig;
  532                 sf.sf_si.si_code = ksi->ksi_code;
  533                 sf.sf_si.si_addr = ksi->ksi_addr;
  534         } else {
  535                 /* Old FreeBSD-style arguments. */
  536                 sf.sf_siginfo = ksi->ksi_code;
  537                 sf.sf_addr = (register_t)ksi->ksi_addr;
  538                 sf.sf_ahu.sf_handler = catcher;
  539         }
  540         mtx_unlock(&psp->ps_mtx);
  541         PROC_UNLOCK(p);
  542 
  543         /*
  544          * If we're a vm86 process, we want to save the segment registers.
  545          * We also change eflags to be our emulated eflags, not the actual
  546          * eflags.
  547          */
  548         if (regs->tf_eflags & PSL_VM) {
  549                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
  550                 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  551 
  552                 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
  553                 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
  554                 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
  555                 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
  556 
  557                 if (vm86->vm86_has_vme == 0)
  558                         sf.sf_uc.uc_mcontext.mc_eflags =
  559                             (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
  560                             (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
  561 
  562                 /*
  563                  * Clear PSL_NT to inhibit T_TSSFLT faults on return from
  564                  * syscalls made by the signal handler.  This just avoids
  565                  * wasting time for our lazy fixup of such faults.  PSL_NT
  566                  * does nothing in vm86 mode, but vm86 programs can set it
  567                  * almost legitimately in probes for old cpu types.
  568                  */
  569                 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
  570         }
  571 
  572         /*
  573          * Copy the sigframe out to the user's stack.
  574          */
  575         if (copyout(&sf, sfp, sizeof(*sfp)) != 0) {
  576 #ifdef DEBUG
  577                 printf("process %ld has trashed its stack\n", (long)p->p_pid);
  578 #endif
  579                 PROC_LOCK(p);
  580                 sigexit(td, SIGILL);
  581         }
  582 
  583         regs->tf_esp = (int)sfp;
  584         regs->tf_eip = p->p_sysent->sv_sigcode_base + szsigcode -
  585             szfreebsd4_sigcode;
  586         regs->tf_eflags &= ~(PSL_T | PSL_D);
  587         regs->tf_cs = _ucodesel;
  588         regs->tf_ds = _udatasel;
  589         regs->tf_es = _udatasel;
  590         regs->tf_fs = _udatasel;
  591         regs->tf_ss = _udatasel;
  592         PROC_LOCK(p);
  593         mtx_lock(&psp->ps_mtx);
  594 }
  595 #endif  /* COMPAT_FREEBSD4 */
  596 
  597 void
  598 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
  599 {
  600         struct sigframe sf, *sfp;
  601         struct proc *p;
  602         struct thread *td;
  603         struct sigacts *psp;
  604         char *sp;
  605         struct trapframe *regs;
  606         struct segment_descriptor *sdp;
  607         char *xfpusave;
  608         size_t xfpusave_len;
  609         int sig;
  610         int oonstack;
  611 
  612         td = curthread;
  613         p = td->td_proc;
  614         PROC_LOCK_ASSERT(p, MA_OWNED);
  615         sig = ksi->ksi_signo;
  616         psp = p->p_sigacts;
  617         mtx_assert(&psp->ps_mtx, MA_OWNED);
  618 #ifdef COMPAT_FREEBSD4
  619         if (SIGISMEMBER(psp->ps_freebsd4, sig)) {
  620                 freebsd4_sendsig(catcher, ksi, mask);
  621                 return;
  622         }
  623 #endif
  624 #ifdef COMPAT_43
  625         if (SIGISMEMBER(psp->ps_osigset, sig)) {
  626                 osendsig(catcher, ksi, mask);
  627                 return;
  628         }
  629 #endif
  630         regs = td->td_frame;
  631         oonstack = sigonstack(regs->tf_esp);
  632 
  633         if (cpu_max_ext_state_size > sizeof(union savefpu) && use_xsave) {
  634                 xfpusave_len = cpu_max_ext_state_size - sizeof(union savefpu);
  635                 xfpusave = __builtin_alloca(xfpusave_len);
  636         } else {
  637                 xfpusave_len = 0;
  638                 xfpusave = NULL;
  639         }
  640 
  641         /* Save user context. */
  642         bzero(&sf, sizeof(sf));
  643         sf.sf_uc.uc_sigmask = *mask;
  644         sf.sf_uc.uc_stack = td->td_sigstk;
  645         sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
  646             ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
  647         sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
  648         sf.sf_uc.uc_mcontext.mc_gs = rgs();
  649         bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs));
  650         sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */
  651         get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len);
  652         fpstate_drop(td);
  653         /*
  654          * Unconditionally fill the fsbase and gsbase into the mcontext.
  655          */
  656         sdp = &td->td_pcb->pcb_fsd;
  657         sf.sf_uc.uc_mcontext.mc_fsbase = sdp->sd_hibase << 24 |
  658             sdp->sd_lobase;
  659         sdp = &td->td_pcb->pcb_gsd;
  660         sf.sf_uc.uc_mcontext.mc_gsbase = sdp->sd_hibase << 24 |
  661             sdp->sd_lobase;
  662         bzero(sf.sf_uc.uc_mcontext.mc_spare2,
  663             sizeof(sf.sf_uc.uc_mcontext.mc_spare2));
  664         bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__));
  665 
  666         /* Allocate space for the signal handler context. */
  667         if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
  668             SIGISMEMBER(psp->ps_sigonstack, sig)) {
  669                 sp = (char *)td->td_sigstk.ss_sp + td->td_sigstk.ss_size;
  670 #if defined(COMPAT_43)
  671                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  672 #endif
  673         } else
  674                 sp = (char *)regs->tf_esp - 128;
  675         if (xfpusave != NULL) {
  676                 sp -= xfpusave_len;
  677                 sp = (char *)((unsigned int)sp & ~0x3F);
  678                 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp;
  679         }
  680         sp -= sizeof(struct sigframe);
  681 
  682         /* Align to 16 bytes. */
  683         sfp = (struct sigframe *)((unsigned int)sp & ~0xF);
  684 
  685         /* Build the argument list for the signal handler. */
  686         sf.sf_signum = sig;
  687         sf.sf_ucontext = (register_t)&sfp->sf_uc;
  688         bzero(&sf.sf_si, sizeof(sf.sf_si));
  689         if (SIGISMEMBER(psp->ps_siginfo, sig)) {
  690                 /* Signal handler installed with SA_SIGINFO. */
  691                 sf.sf_siginfo = (register_t)&sfp->sf_si;
  692                 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher;
  693 
  694                 /* Fill in POSIX parts */
  695                 sf.sf_si = ksi->ksi_info;
  696                 sf.sf_si.si_signo = sig; /* maybe a translated signal */
  697         } else {
  698                 /* Old FreeBSD-style arguments. */
  699                 sf.sf_siginfo = ksi->ksi_code;
  700                 sf.sf_addr = (register_t)ksi->ksi_addr;
  701                 sf.sf_ahu.sf_handler = catcher;
  702         }
  703         mtx_unlock(&psp->ps_mtx);
  704         PROC_UNLOCK(p);
  705 
  706         /*
  707          * If we're a vm86 process, we want to save the segment registers.
  708          * We also change eflags to be our emulated eflags, not the actual
  709          * eflags.
  710          */
  711         if (regs->tf_eflags & PSL_VM) {
  712                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
  713                 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  714 
  715                 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs;
  716                 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs;
  717                 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es;
  718                 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds;
  719 
  720                 if (vm86->vm86_has_vme == 0)
  721                         sf.sf_uc.uc_mcontext.mc_eflags =
  722                             (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) |
  723                             (vm86->vm86_eflags & (PSL_VIF | PSL_VIP));
  724 
  725                 /*
  726                  * Clear PSL_NT to inhibit T_TSSFLT faults on return from
  727                  * syscalls made by the signal handler.  This just avoids
  728                  * wasting time for our lazy fixup of such faults.  PSL_NT
  729                  * does nothing in vm86 mode, but vm86 programs can set it
  730                  * almost legitimately in probes for old cpu types.
  731                  */
  732                 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP);
  733         }
  734 
  735         /*
  736          * Copy the sigframe out to the user's stack.
  737          */
  738         if (copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
  739             (xfpusave != NULL && copyout(xfpusave,
  740             (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len)
  741             != 0)) {
  742 #ifdef DEBUG
  743                 printf("process %ld has trashed its stack\n", (long)p->p_pid);
  744 #endif
  745                 PROC_LOCK(p);
  746                 sigexit(td, SIGILL);
  747         }
  748 
  749         regs->tf_esp = (int)sfp;
  750         regs->tf_eip = p->p_sysent->sv_sigcode_base;
  751         if (regs->tf_eip == 0)
  752                 regs->tf_eip = p->p_sysent->sv_psstrings - szsigcode;
  753         regs->tf_eflags &= ~(PSL_T | PSL_D);
  754         regs->tf_cs = _ucodesel;
  755         regs->tf_ds = _udatasel;
  756         regs->tf_es = _udatasel;
  757         regs->tf_fs = _udatasel;
  758         regs->tf_ss = _udatasel;
  759         PROC_LOCK(p);
  760         mtx_lock(&psp->ps_mtx);
  761 }
  762 
  763 /*
  764  * System call to cleanup state after a signal
  765  * has been taken.  Reset signal mask and
  766  * stack state from context left by sendsig (above).
  767  * Return to previous pc and psl as specified by
  768  * context left by sendsig. Check carefully to
  769  * make sure that the user has not modified the
  770  * state to gain improper privileges.
  771  *
  772  * MPSAFE
  773  */
  774 #ifdef COMPAT_43
  775 int
  776 osigreturn(td, uap)
  777         struct thread *td;
  778         struct osigreturn_args /* {
  779                 struct osigcontext *sigcntxp;
  780         } */ *uap;
  781 {
  782         struct osigcontext sc;
  783         struct trapframe *regs;
  784         struct osigcontext *scp;
  785         int eflags, error;
  786         ksiginfo_t ksi;
  787 
  788         regs = td->td_frame;
  789         error = copyin(uap->sigcntxp, &sc, sizeof(sc));
  790         if (error != 0)
  791                 return (error);
  792         scp = &sc;
  793         eflags = scp->sc_ps;
  794         if (eflags & PSL_VM) {
  795                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
  796                 struct vm86_kernel *vm86;
  797 
  798                 /*
  799                  * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
  800                  * set up the vm86 area, and we can't enter vm86 mode.
  801                  */
  802                 if (td->td_pcb->pcb_ext == 0)
  803                         return (EINVAL);
  804                 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  805                 if (vm86->vm86_inited == 0)
  806                         return (EINVAL);
  807 
  808                 /* Go back to user mode if both flags are set. */
  809                 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
  810                         ksiginfo_init_trap(&ksi);
  811                         ksi.ksi_signo = SIGBUS;
  812                         ksi.ksi_code = BUS_OBJERR;
  813                         ksi.ksi_addr = (void *)regs->tf_eip;
  814                         trapsignal(td, &ksi);
  815                 }
  816 
  817                 if (vm86->vm86_has_vme) {
  818                         eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
  819                             (eflags & VME_USERCHANGE) | PSL_VM;
  820                 } else {
  821                         vm86->vm86_eflags = eflags;     /* save VIF, VIP */
  822                         eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
  823                             (eflags & VM_USERCHANGE) | PSL_VM;
  824                 }
  825                 tf->tf_vm86_ds = scp->sc_ds;
  826                 tf->tf_vm86_es = scp->sc_es;
  827                 tf->tf_vm86_fs = scp->sc_fs;
  828                 tf->tf_vm86_gs = scp->sc_gs;
  829                 tf->tf_ds = _udatasel;
  830                 tf->tf_es = _udatasel;
  831                 tf->tf_fs = _udatasel;
  832         } else {
  833                 /*
  834                  * Don't allow users to change privileged or reserved flags.
  835                  */
  836                 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
  837                         return (EINVAL);
  838                 }
  839 
  840                 /*
  841                  * Don't allow users to load a valid privileged %cs.  Let the
  842                  * hardware check for invalid selectors, excess privilege in
  843                  * other selectors, invalid %eip's and invalid %esp's.
  844                  */
  845                 if (!CS_SECURE(scp->sc_cs)) {
  846                         ksiginfo_init_trap(&ksi);
  847                         ksi.ksi_signo = SIGBUS;
  848                         ksi.ksi_code = BUS_OBJERR;
  849                         ksi.ksi_trapno = T_PROTFLT;
  850                         ksi.ksi_addr = (void *)regs->tf_eip;
  851                         trapsignal(td, &ksi);
  852                         return (EINVAL);
  853                 }
  854                 regs->tf_ds = scp->sc_ds;
  855                 regs->tf_es = scp->sc_es;
  856                 regs->tf_fs = scp->sc_fs;
  857         }
  858 
  859         /* Restore remaining registers. */
  860         regs->tf_eax = scp->sc_eax;
  861         regs->tf_ebx = scp->sc_ebx;
  862         regs->tf_ecx = scp->sc_ecx;
  863         regs->tf_edx = scp->sc_edx;
  864         regs->tf_esi = scp->sc_esi;
  865         regs->tf_edi = scp->sc_edi;
  866         regs->tf_cs = scp->sc_cs;
  867         regs->tf_ss = scp->sc_ss;
  868         regs->tf_isp = scp->sc_isp;
  869         regs->tf_ebp = scp->sc_fp;
  870         regs->tf_esp = scp->sc_sp;
  871         regs->tf_eip = scp->sc_pc;
  872         regs->tf_eflags = eflags;
  873 
  874 #if defined(COMPAT_43)
  875         if (scp->sc_onstack & 1)
  876                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  877         else
  878                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
  879 #endif
  880         kern_sigprocmask(td, SIG_SETMASK, (sigset_t *)&scp->sc_mask, NULL,
  881             SIGPROCMASK_OLD);
  882         return (EJUSTRETURN);
  883 }
  884 #endif /* COMPAT_43 */
  885 
  886 #ifdef COMPAT_FREEBSD4
  887 /*
  888  * MPSAFE
  889  */
  890 int
  891 freebsd4_sigreturn(td, uap)
  892         struct thread *td;
  893         struct freebsd4_sigreturn_args /* {
  894                 const ucontext4 *sigcntxp;
  895         } */ *uap;
  896 {
  897         struct ucontext4 uc;
  898         struct trapframe *regs;
  899         struct ucontext4 *ucp;
  900         int cs, eflags, error;
  901         ksiginfo_t ksi;
  902 
  903         error = copyin(uap->sigcntxp, &uc, sizeof(uc));
  904         if (error != 0)
  905                 return (error);
  906         ucp = &uc;
  907         regs = td->td_frame;
  908         eflags = ucp->uc_mcontext.mc_eflags;
  909         if (eflags & PSL_VM) {
  910                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
  911                 struct vm86_kernel *vm86;
  912 
  913                 /*
  914                  * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
  915                  * set up the vm86 area, and we can't enter vm86 mode.
  916                  */
  917                 if (td->td_pcb->pcb_ext == 0)
  918                         return (EINVAL);
  919                 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
  920                 if (vm86->vm86_inited == 0)
  921                         return (EINVAL);
  922 
  923                 /* Go back to user mode if both flags are set. */
  924                 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
  925                         ksiginfo_init_trap(&ksi);
  926                         ksi.ksi_signo = SIGBUS;
  927                         ksi.ksi_code = BUS_OBJERR;
  928                         ksi.ksi_addr = (void *)regs->tf_eip;
  929                         trapsignal(td, &ksi);
  930                 }
  931                 if (vm86->vm86_has_vme) {
  932                         eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
  933                             (eflags & VME_USERCHANGE) | PSL_VM;
  934                 } else {
  935                         vm86->vm86_eflags = eflags;     /* save VIF, VIP */
  936                         eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
  937                             (eflags & VM_USERCHANGE) | PSL_VM;
  938                 }
  939                 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
  940                 tf->tf_eflags = eflags;
  941                 tf->tf_vm86_ds = tf->tf_ds;
  942                 tf->tf_vm86_es = tf->tf_es;
  943                 tf->tf_vm86_fs = tf->tf_fs;
  944                 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
  945                 tf->tf_ds = _udatasel;
  946                 tf->tf_es = _udatasel;
  947                 tf->tf_fs = _udatasel;
  948         } else {
  949                 /*
  950                  * Don't allow users to change privileged or reserved flags.
  951                  */
  952                 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
  953                         uprintf("pid %d (%s): freebsd4_sigreturn eflags = 0x%x\n",
  954                             td->td_proc->p_pid, td->td_name, eflags);
  955                         return (EINVAL);
  956                 }
  957 
  958                 /*
  959                  * Don't allow users to load a valid privileged %cs.  Let the
  960                  * hardware check for invalid selectors, excess privilege in
  961                  * other selectors, invalid %eip's and invalid %esp's.
  962                  */
  963                 cs = ucp->uc_mcontext.mc_cs;
  964                 if (!CS_SECURE(cs)) {
  965                         uprintf("pid %d (%s): freebsd4_sigreturn cs = 0x%x\n",
  966                             td->td_proc->p_pid, td->td_name, cs);
  967                         ksiginfo_init_trap(&ksi);
  968                         ksi.ksi_signo = SIGBUS;
  969                         ksi.ksi_code = BUS_OBJERR;
  970                         ksi.ksi_trapno = T_PROTFLT;
  971                         ksi.ksi_addr = (void *)regs->tf_eip;
  972                         trapsignal(td, &ksi);
  973                         return (EINVAL);
  974                 }
  975 
  976                 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
  977         }
  978 
  979 #if defined(COMPAT_43)
  980         if (ucp->uc_mcontext.mc_onstack & 1)
  981                 td->td_sigstk.ss_flags |= SS_ONSTACK;
  982         else
  983                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
  984 #endif
  985         kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
  986         return (EJUSTRETURN);
  987 }
  988 #endif  /* COMPAT_FREEBSD4 */
  989 
  990 /*
  991  * MPSAFE
  992  */
  993 int
  994 sys_sigreturn(td, uap)
  995         struct thread *td;
  996         struct sigreturn_args /* {
  997                 const struct __ucontext *sigcntxp;
  998         } */ *uap;
  999 {
 1000         ucontext_t uc;
 1001         struct proc *p;
 1002         struct trapframe *regs;
 1003         ucontext_t *ucp;
 1004         char *xfpustate;
 1005         size_t xfpustate_len;
 1006         int cs, eflags, error, ret;
 1007         ksiginfo_t ksi;
 1008 
 1009         p = td->td_proc;
 1010 
 1011         error = copyin(uap->sigcntxp, &uc, sizeof(uc));
 1012         if (error != 0)
 1013                 return (error);
 1014         ucp = &uc;
 1015         if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) {
 1016                 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid,
 1017                     td->td_name, ucp->uc_mcontext.mc_flags);
 1018                 return (EINVAL);
 1019         }
 1020         regs = td->td_frame;
 1021         eflags = ucp->uc_mcontext.mc_eflags;
 1022         if (eflags & PSL_VM) {
 1023                 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
 1024                 struct vm86_kernel *vm86;
 1025 
 1026                 /*
 1027                  * if pcb_ext == 0 or vm86_inited == 0, the user hasn't
 1028                  * set up the vm86 area, and we can't enter vm86 mode.
 1029                  */
 1030                 if (td->td_pcb->pcb_ext == 0)
 1031                         return (EINVAL);
 1032                 vm86 = &td->td_pcb->pcb_ext->ext_vm86;
 1033                 if (vm86->vm86_inited == 0)
 1034                         return (EINVAL);
 1035 
 1036                 /* Go back to user mode if both flags are set. */
 1037                 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) {
 1038                         ksiginfo_init_trap(&ksi);
 1039                         ksi.ksi_signo = SIGBUS;
 1040                         ksi.ksi_code = BUS_OBJERR;
 1041                         ksi.ksi_addr = (void *)regs->tf_eip;
 1042                         trapsignal(td, &ksi);
 1043                 }
 1044 
 1045                 if (vm86->vm86_has_vme) {
 1046                         eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
 1047                             (eflags & VME_USERCHANGE) | PSL_VM;
 1048                 } else {
 1049                         vm86->vm86_eflags = eflags;     /* save VIF, VIP */
 1050                         eflags = (tf->tf_eflags & ~VM_USERCHANGE) |
 1051                             (eflags & VM_USERCHANGE) | PSL_VM;
 1052                 }
 1053                 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe));
 1054                 tf->tf_eflags = eflags;
 1055                 tf->tf_vm86_ds = tf->tf_ds;
 1056                 tf->tf_vm86_es = tf->tf_es;
 1057                 tf->tf_vm86_fs = tf->tf_fs;
 1058                 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs;
 1059                 tf->tf_ds = _udatasel;
 1060                 tf->tf_es = _udatasel;
 1061                 tf->tf_fs = _udatasel;
 1062         } else {
 1063                 /*
 1064                  * Don't allow users to change privileged or reserved flags.
 1065                  */
 1066                 if (!EFL_SECURE(eflags, regs->tf_eflags)) {
 1067                         uprintf("pid %d (%s): sigreturn eflags = 0x%x\n",
 1068                             td->td_proc->p_pid, td->td_name, eflags);
 1069                         return (EINVAL);
 1070                 }
 1071 
 1072                 /*
 1073                  * Don't allow users to load a valid privileged %cs.  Let the
 1074                  * hardware check for invalid selectors, excess privilege in
 1075                  * other selectors, invalid %eip's and invalid %esp's.
 1076                  */
 1077                 cs = ucp->uc_mcontext.mc_cs;
 1078                 if (!CS_SECURE(cs)) {
 1079                         uprintf("pid %d (%s): sigreturn cs = 0x%x\n",
 1080                             td->td_proc->p_pid, td->td_name, cs);
 1081                         ksiginfo_init_trap(&ksi);
 1082                         ksi.ksi_signo = SIGBUS;
 1083                         ksi.ksi_code = BUS_OBJERR;
 1084                         ksi.ksi_trapno = T_PROTFLT;
 1085                         ksi.ksi_addr = (void *)regs->tf_eip;
 1086                         trapsignal(td, &ksi);
 1087                         return (EINVAL);
 1088                 }
 1089 
 1090                 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) {
 1091                         xfpustate_len = uc.uc_mcontext.mc_xfpustate_len;
 1092                         if (xfpustate_len > cpu_max_ext_state_size -
 1093                             sizeof(union savefpu)) {
 1094                                 uprintf(
 1095                             "pid %d (%s): sigreturn xfpusave_len = 0x%zx\n",
 1096                                     p->p_pid, td->td_name, xfpustate_len);
 1097                                 return (EINVAL);
 1098                         }
 1099                         xfpustate = __builtin_alloca(xfpustate_len);
 1100                         error = copyin((const void *)uc.uc_mcontext.mc_xfpustate,
 1101                             xfpustate, xfpustate_len);
 1102                         if (error != 0) {
 1103                                 uprintf(
 1104         "pid %d (%s): sigreturn copying xfpustate failed\n",
 1105                                     p->p_pid, td->td_name);
 1106                                 return (error);
 1107                         }
 1108                 } else {
 1109                         xfpustate = NULL;
 1110                         xfpustate_len = 0;
 1111                 }
 1112                 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate,
 1113                     xfpustate_len);
 1114                 if (ret != 0)
 1115                         return (ret);
 1116                 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs));
 1117         }
 1118 
 1119 #if defined(COMPAT_43)
 1120         if (ucp->uc_mcontext.mc_onstack & 1)
 1121                 td->td_sigstk.ss_flags |= SS_ONSTACK;
 1122         else
 1123                 td->td_sigstk.ss_flags &= ~SS_ONSTACK;
 1124 #endif
 1125 
 1126         kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0);
 1127         return (EJUSTRETURN);
 1128 }
 1129 
 1130 /*
 1131  * Reset registers to default values on exec.
 1132  */
 1133 void
 1134 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
 1135 {
 1136         struct trapframe *regs = td->td_frame;
 1137         struct pcb *pcb = td->td_pcb;
 1138 
 1139         /* Reset pc->pcb_gs and %gs before possibly invalidating it. */
 1140         pcb->pcb_gs = _udatasel;
 1141         load_gs(_udatasel);
 1142 
 1143         mtx_lock_spin(&dt_lock);
 1144         if (td->td_proc->p_md.md_ldt)
 1145                 user_ldt_free(td);
 1146         else
 1147                 mtx_unlock_spin(&dt_lock);
 1148   
 1149         bzero((char *)regs, sizeof(struct trapframe));
 1150         regs->tf_eip = imgp->entry_addr;
 1151         regs->tf_esp = stack;
 1152         regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T);
 1153         regs->tf_ss = _udatasel;
 1154         regs->tf_ds = _udatasel;
 1155         regs->tf_es = _udatasel;
 1156         regs->tf_fs = _udatasel;
 1157         regs->tf_cs = _ucodesel;
 1158 
 1159         /* PS_STRINGS value for BSD/OS binaries.  It is 0 for non-BSD/OS. */
 1160         regs->tf_ebx = imgp->ps_strings;
 1161 
 1162         /*
 1163          * Reset the hardware debug registers if they were in use.
 1164          * They won't have any meaning for the newly exec'd process.  
 1165          */
 1166         if (pcb->pcb_flags & PCB_DBREGS) {
 1167                 pcb->pcb_dr0 = 0;
 1168                 pcb->pcb_dr1 = 0;
 1169                 pcb->pcb_dr2 = 0;
 1170                 pcb->pcb_dr3 = 0;
 1171                 pcb->pcb_dr6 = 0;
 1172                 pcb->pcb_dr7 = 0;
 1173                 if (pcb == curpcb) {
 1174                         /*
 1175                          * Clear the debug registers on the running
 1176                          * CPU, otherwise they will end up affecting
 1177                          * the next process we switch to.
 1178                          */
 1179                         reset_dbregs();
 1180                 }
 1181                 pcb->pcb_flags &= ~PCB_DBREGS;
 1182         }
 1183 
 1184         pcb->pcb_initial_npxcw = __INITIAL_NPXCW__;
 1185 
 1186         /*
 1187          * Drop the FP state if we hold it, so that the process gets a
 1188          * clean FP state if it uses the FPU again.
 1189          */
 1190         fpstate_drop(td);
 1191 
 1192         /*
 1193          * XXX - Linux emulator
 1194          * Make sure sure edx is 0x0 on entry. Linux binaries depend
 1195          * on it.
 1196          */
 1197         td->td_retval[1] = 0;
 1198 }
 1199 
 1200 void
 1201 cpu_setregs(void)
 1202 {
 1203         unsigned int cr0;
 1204 
 1205         cr0 = rcr0();
 1206 
 1207         /*
 1208          * CR0_MP, CR0_NE and CR0_TS are set for NPX (FPU) support:
 1209          *
 1210          * Prepare to trap all ESC (i.e., NPX) instructions and all WAIT
 1211          * instructions.  We must set the CR0_MP bit and use the CR0_TS
 1212          * bit to control the trap, because setting the CR0_EM bit does
 1213          * not cause WAIT instructions to trap.  It's important to trap
 1214          * WAIT instructions - otherwise the "wait" variants of no-wait
 1215          * control instructions would degenerate to the "no-wait" variants
 1216          * after FP context switches but work correctly otherwise.  It's
 1217          * particularly important to trap WAITs when there is no NPX -
 1218          * otherwise the "wait" variants would always degenerate.
 1219          *
 1220          * Try setting CR0_NE to get correct error reporting on 486DX's.
 1221          * Setting it should fail or do nothing on lesser processors.
 1222          */
 1223         cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM;
 1224         load_cr0(cr0);
 1225         load_gs(_udatasel);
 1226 }
 1227 
 1228 u_long bootdev;         /* not a struct cdev *- encoding is different */
 1229 SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev,
 1230         CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in struct cdev *format)");
 1231 
 1232 static char bootmethod[16] = "BIOS";
 1233 SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0,
 1234     "System firmware boot method");
 1235 
 1236 /*
 1237  * Initialize 386 and configure to run kernel
 1238  */
 1239 
 1240 /*
 1241  * Initialize segments & interrupt table
 1242  */
 1243 
 1244 int _default_ldt;
 1245 
 1246 union descriptor gdt[NGDT * MAXCPU];    /* global descriptor table */
 1247 union descriptor ldt[NLDT];             /* local descriptor table */
 1248 static struct gate_descriptor idt0[NIDT];
 1249 struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */
 1250 struct region_descriptor r_gdt, r_idt;  /* table descriptors */
 1251 struct mtx dt_lock;                     /* lock for GDT and LDT */
 1252 
 1253 static struct i386tss dblfault_tss;
 1254 static char dblfault_stack[PAGE_SIZE];
 1255 
 1256 extern  vm_offset_t     proc0kstack;
 1257 
 1258 
 1259 /*
 1260  * software prototypes -- in more palatable form.
 1261  *
 1262  * GCODE_SEL through GUDATA_SEL must be in this order for syscall/sysret
 1263  * GUFS_SEL and GUGS_SEL must be in this order (swtch.s knows it)
 1264  */
 1265 struct soft_segment_descriptor gdt_segs[] = {
 1266 /* GNULL_SEL    0 Null Descriptor */
 1267 {       .ssd_base = 0x0,
 1268         .ssd_limit = 0x0,
 1269         .ssd_type = 0,
 1270         .ssd_dpl = SEL_KPL,
 1271         .ssd_p = 0,
 1272         .ssd_xx = 0, .ssd_xx1 = 0,
 1273         .ssd_def32 = 0,
 1274         .ssd_gran = 0           },
 1275 /* GPRIV_SEL    1 SMP Per-Processor Private Data Descriptor */
 1276 {       .ssd_base = 0x0,
 1277         .ssd_limit = 0xfffff,
 1278         .ssd_type = SDT_MEMRWA,
 1279         .ssd_dpl = SEL_KPL,
 1280         .ssd_p = 1,
 1281         .ssd_xx = 0, .ssd_xx1 = 0,
 1282         .ssd_def32 = 1,
 1283         .ssd_gran = 1           },
 1284 /* GUFS_SEL     2 %fs Descriptor for user */
 1285 {       .ssd_base = 0x0,
 1286         .ssd_limit = 0xfffff,
 1287         .ssd_type = SDT_MEMRWA,
 1288         .ssd_dpl = SEL_UPL,
 1289         .ssd_p = 1,
 1290         .ssd_xx = 0, .ssd_xx1 = 0,
 1291         .ssd_def32 = 1,
 1292         .ssd_gran = 1           },
 1293 /* GUGS_SEL     3 %gs Descriptor for user */
 1294 {       .ssd_base = 0x0,
 1295         .ssd_limit = 0xfffff,
 1296         .ssd_type = SDT_MEMRWA,
 1297         .ssd_dpl = SEL_UPL,
 1298         .ssd_p = 1,
 1299         .ssd_xx = 0, .ssd_xx1 = 0,
 1300         .ssd_def32 = 1,
 1301         .ssd_gran = 1           },
 1302 /* GCODE_SEL    4 Code Descriptor for kernel */
 1303 {       .ssd_base = 0x0,
 1304         .ssd_limit = 0xfffff,
 1305         .ssd_type = SDT_MEMERA,
 1306         .ssd_dpl = SEL_KPL,
 1307         .ssd_p = 1,
 1308         .ssd_xx = 0, .ssd_xx1 = 0,
 1309         .ssd_def32 = 1,
 1310         .ssd_gran = 1           },
 1311 /* GDATA_SEL    5 Data Descriptor for kernel */
 1312 {       .ssd_base = 0x0,
 1313         .ssd_limit = 0xfffff,
 1314         .ssd_type = SDT_MEMRWA,
 1315         .ssd_dpl = SEL_KPL,
 1316         .ssd_p = 1,
 1317         .ssd_xx = 0, .ssd_xx1 = 0,
 1318         .ssd_def32 = 1,
 1319         .ssd_gran = 1           },
 1320 /* GUCODE_SEL   6 Code Descriptor for user */
 1321 {       .ssd_base = 0x0,
 1322         .ssd_limit = 0xfffff,
 1323         .ssd_type = SDT_MEMERA,
 1324         .ssd_dpl = SEL_UPL,
 1325         .ssd_p = 1,
 1326         .ssd_xx = 0, .ssd_xx1 = 0,
 1327         .ssd_def32 = 1,
 1328         .ssd_gran = 1           },
 1329 /* GUDATA_SEL   7 Data Descriptor for user */
 1330 {       .ssd_base = 0x0,
 1331         .ssd_limit = 0xfffff,
 1332         .ssd_type = SDT_MEMRWA,
 1333         .ssd_dpl = SEL_UPL,
 1334         .ssd_p = 1,
 1335         .ssd_xx = 0, .ssd_xx1 = 0,
 1336         .ssd_def32 = 1,
 1337         .ssd_gran = 1           },
 1338 /* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */
 1339 {       .ssd_base = 0x400,
 1340         .ssd_limit = 0xfffff,
 1341         .ssd_type = SDT_MEMRWA,
 1342         .ssd_dpl = SEL_KPL,
 1343         .ssd_p = 1,
 1344         .ssd_xx = 0, .ssd_xx1 = 0,
 1345         .ssd_def32 = 1,
 1346         .ssd_gran = 1           },
 1347 /* GPROC0_SEL   9 Proc 0 Tss Descriptor */
 1348 {
 1349         .ssd_base = 0x0,
 1350         .ssd_limit = sizeof(struct i386tss)-1,
 1351         .ssd_type = SDT_SYS386TSS,
 1352         .ssd_dpl = 0,
 1353         .ssd_p = 1,
 1354         .ssd_xx = 0, .ssd_xx1 = 0,
 1355         .ssd_def32 = 0,
 1356         .ssd_gran = 0           },
 1357 /* GLDT_SEL     10 LDT Descriptor */
 1358 {       .ssd_base = (int) ldt,
 1359         .ssd_limit = sizeof(ldt)-1,
 1360         .ssd_type = SDT_SYSLDT,
 1361         .ssd_dpl = SEL_UPL,
 1362         .ssd_p = 1,
 1363         .ssd_xx = 0, .ssd_xx1 = 0,
 1364         .ssd_def32 = 0,
 1365         .ssd_gran = 0           },
 1366 /* GUSERLDT_SEL 11 User LDT Descriptor per process */
 1367 {       .ssd_base = (int) ldt,
 1368         .ssd_limit = (512 * sizeof(union descriptor)-1),
 1369         .ssd_type = SDT_SYSLDT,
 1370         .ssd_dpl = 0,
 1371         .ssd_p = 1,
 1372         .ssd_xx = 0, .ssd_xx1 = 0,
 1373         .ssd_def32 = 0,
 1374         .ssd_gran = 0           },
 1375 /* GPANIC_SEL   12 Panic Tss Descriptor */
 1376 {       .ssd_base = (int) &dblfault_tss,
 1377         .ssd_limit = sizeof(struct i386tss)-1,
 1378         .ssd_type = SDT_SYS386TSS,
 1379         .ssd_dpl = 0,
 1380         .ssd_p = 1,
 1381         .ssd_xx = 0, .ssd_xx1 = 0,
 1382         .ssd_def32 = 0,
 1383         .ssd_gran = 0           },
 1384 /* GBIOSCODE32_SEL 13 BIOS 32-bit interface (32bit Code) */
 1385 {       .ssd_base = 0,
 1386         .ssd_limit = 0xfffff,
 1387         .ssd_type = SDT_MEMERA,
 1388         .ssd_dpl = 0,
 1389         .ssd_p = 1,
 1390         .ssd_xx = 0, .ssd_xx1 = 0,
 1391         .ssd_def32 = 0,
 1392         .ssd_gran = 1           },
 1393 /* GBIOSCODE16_SEL 14 BIOS 32-bit interface (16bit Code) */
 1394 {       .ssd_base = 0,
 1395         .ssd_limit = 0xfffff,
 1396         .ssd_type = SDT_MEMERA,
 1397         .ssd_dpl = 0,
 1398         .ssd_p = 1,
 1399         .ssd_xx = 0, .ssd_xx1 = 0,
 1400         .ssd_def32 = 0,
 1401         .ssd_gran = 1           },
 1402 /* GBIOSDATA_SEL 15 BIOS 32-bit interface (Data) */
 1403 {       .ssd_base = 0,
 1404         .ssd_limit = 0xfffff,
 1405         .ssd_type = SDT_MEMRWA,
 1406         .ssd_dpl = 0,
 1407         .ssd_p = 1,
 1408         .ssd_xx = 0, .ssd_xx1 = 0,
 1409         .ssd_def32 = 1,
 1410         .ssd_gran = 1           },
 1411 /* GBIOSUTIL_SEL 16 BIOS 16-bit interface (Utility) */
 1412 {       .ssd_base = 0,
 1413         .ssd_limit = 0xfffff,
 1414         .ssd_type = SDT_MEMRWA,
 1415         .ssd_dpl = 0,
 1416         .ssd_p = 1,
 1417         .ssd_xx = 0, .ssd_xx1 = 0,
 1418         .ssd_def32 = 0,
 1419         .ssd_gran = 1           },
 1420 /* GBIOSARGS_SEL 17 BIOS 16-bit interface (Arguments) */
 1421 {       .ssd_base = 0,
 1422         .ssd_limit = 0xfffff,
 1423         .ssd_type = SDT_MEMRWA,
 1424         .ssd_dpl = 0,
 1425         .ssd_p = 1,
 1426         .ssd_xx = 0, .ssd_xx1 = 0,
 1427         .ssd_def32 = 0,
 1428         .ssd_gran = 1           },
 1429 /* GNDIS_SEL    18 NDIS Descriptor */
 1430 {       .ssd_base = 0x0,
 1431         .ssd_limit = 0x0,
 1432         .ssd_type = 0,
 1433         .ssd_dpl = 0,
 1434         .ssd_p = 0,
 1435         .ssd_xx = 0, .ssd_xx1 = 0,
 1436         .ssd_def32 = 0,
 1437         .ssd_gran = 0           },
 1438 };
 1439 
 1440 static struct soft_segment_descriptor ldt_segs[] = {
 1441         /* Null Descriptor - overwritten by call gate */
 1442 {       .ssd_base = 0x0,
 1443         .ssd_limit = 0x0,
 1444         .ssd_type = 0,
 1445         .ssd_dpl = 0,
 1446         .ssd_p = 0,
 1447         .ssd_xx = 0, .ssd_xx1 = 0,
 1448         .ssd_def32 = 0,
 1449         .ssd_gran = 0           },
 1450         /* Null Descriptor - overwritten by call gate */
 1451 {       .ssd_base = 0x0,
 1452         .ssd_limit = 0x0,
 1453         .ssd_type = 0,
 1454         .ssd_dpl = 0,
 1455         .ssd_p = 0,
 1456         .ssd_xx = 0, .ssd_xx1 = 0,
 1457         .ssd_def32 = 0,
 1458         .ssd_gran = 0           },
 1459         /* Null Descriptor - overwritten by call gate */
 1460 {       .ssd_base = 0x0,
 1461         .ssd_limit = 0x0,
 1462         .ssd_type = 0,
 1463         .ssd_dpl = 0,
 1464         .ssd_p = 0,
 1465         .ssd_xx = 0, .ssd_xx1 = 0,
 1466         .ssd_def32 = 0,
 1467         .ssd_gran = 0           },
 1468         /* Code Descriptor for user */
 1469 {       .ssd_base = 0x0,
 1470         .ssd_limit = 0xfffff,
 1471         .ssd_type = SDT_MEMERA,
 1472         .ssd_dpl = SEL_UPL,
 1473         .ssd_p = 1,
 1474         .ssd_xx = 0, .ssd_xx1 = 0,
 1475         .ssd_def32 = 1,
 1476         .ssd_gran = 1           },
 1477         /* Null Descriptor - overwritten by call gate */
 1478 {       .ssd_base = 0x0,
 1479         .ssd_limit = 0x0,
 1480         .ssd_type = 0,
 1481         .ssd_dpl = 0,
 1482         .ssd_p = 0,
 1483         .ssd_xx = 0, .ssd_xx1 = 0,
 1484         .ssd_def32 = 0,
 1485         .ssd_gran = 0           },
 1486         /* Data Descriptor for user */
 1487 {       .ssd_base = 0x0,
 1488         .ssd_limit = 0xfffff,
 1489         .ssd_type = SDT_MEMRWA,
 1490         .ssd_dpl = SEL_UPL,
 1491         .ssd_p = 1,
 1492         .ssd_xx = 0, .ssd_xx1 = 0,
 1493         .ssd_def32 = 1,
 1494         .ssd_gran = 1           },
 1495 };
 1496 
 1497 void
 1498 setidt(idx, func, typ, dpl, selec)
 1499         int idx;
 1500         inthand_t *func;
 1501         int typ;
 1502         int dpl;
 1503         int selec;
 1504 {
 1505         struct gate_descriptor *ip;
 1506 
 1507         ip = idt + idx;
 1508         ip->gd_looffset = (int)func;
 1509         ip->gd_selector = selec;
 1510         ip->gd_stkcpy = 0;
 1511         ip->gd_xx = 0;
 1512         ip->gd_type = typ;
 1513         ip->gd_dpl = dpl;
 1514         ip->gd_p = 1;
 1515         ip->gd_hioffset = ((int)func)>>16 ;
 1516 }
 1517 
 1518 extern inthand_t
 1519         IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl),
 1520         IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
 1521         IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
 1522         IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
 1523         IDTVEC(xmm),
 1524 #ifdef KDTRACE_HOOKS
 1525         IDTVEC(dtrace_ret),
 1526 #endif
 1527 #ifdef XENHVM
 1528         IDTVEC(xen_intr_upcall),
 1529 #endif
 1530         IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
 1531 
 1532 #ifdef DDB
 1533 /*
 1534  * Display the index and function name of any IDT entries that don't use
 1535  * the default 'rsvd' entry point.
 1536  */
 1537 DB_SHOW_COMMAND(idt, db_show_idt)
 1538 {
 1539         struct gate_descriptor *ip;
 1540         int idx;
 1541         uintptr_t func;
 1542 
 1543         ip = idt;
 1544         for (idx = 0; idx < NIDT && !db_pager_quit; idx++) {
 1545                 func = (ip->gd_hioffset << 16 | ip->gd_looffset);
 1546                 if (func != (uintptr_t)&IDTVEC(rsvd)) {
 1547                         db_printf("%3d\t", idx);
 1548                         db_printsym(func, DB_STGY_PROC);
 1549                         db_printf("\n");
 1550                 }
 1551                 ip++;
 1552         }
 1553 }
 1554 
 1555 /* Show privileged registers. */
 1556 DB_SHOW_COMMAND(sysregs, db_show_sysregs)
 1557 {
 1558         uint64_t idtr, gdtr;
 1559 
 1560         idtr = ridt();
 1561         db_printf("idtr\t0x%08x/%04x\n",
 1562             (u_int)(idtr >> 16), (u_int)idtr & 0xffff);
 1563         gdtr = rgdt();
 1564         db_printf("gdtr\t0x%08x/%04x\n",
 1565             (u_int)(gdtr >> 16), (u_int)gdtr & 0xffff);
 1566         db_printf("ldtr\t0x%04x\n", rldt());
 1567         db_printf("tr\t0x%04x\n", rtr());
 1568         db_printf("cr0\t0x%08x\n", rcr0());
 1569         db_printf("cr2\t0x%08x\n", rcr2());
 1570         db_printf("cr3\t0x%08x\n", rcr3());
 1571         db_printf("cr4\t0x%08x\n", rcr4());
 1572         if (rcr4() & CR4_XSAVE)
 1573                 db_printf("xcr0\t0x%016llx\n", rxcr(0));
 1574         if (amd_feature & (AMDID_NX | AMDID_LM))
 1575                 db_printf("EFER\t0x%016llx\n", rdmsr(MSR_EFER));
 1576         if (cpu_feature2 & (CPUID2_VMX | CPUID2_SMX))
 1577                 db_printf("FEATURES_CTL\t0x%016llx\n",
 1578                     rdmsr(MSR_IA32_FEATURE_CONTROL));
 1579         if ((cpu_vendor_id == CPU_VENDOR_INTEL ||
 1580             cpu_vendor_id == CPU_VENDOR_AMD) && CPUID_TO_FAMILY(cpu_id) >= 6)
 1581                 db_printf("DEBUG_CTL\t0x%016llx\n", rdmsr(MSR_DEBUGCTLMSR));
 1582         if (cpu_feature & CPUID_PAT)
 1583                 db_printf("PAT\t0x%016llx\n", rdmsr(MSR_PAT));
 1584 }
 1585 
 1586 DB_SHOW_COMMAND(dbregs, db_show_dbregs)
 1587 {
 1588 
 1589         db_printf("dr0\t0x%08x\n", rdr0());
 1590         db_printf("dr1\t0x%08x\n", rdr1());
 1591         db_printf("dr2\t0x%08x\n", rdr2());
 1592         db_printf("dr3\t0x%08x\n", rdr3());
 1593         db_printf("dr6\t0x%08x\n", rdr6());
 1594         db_printf("dr7\t0x%08x\n", rdr7());     
 1595 }
 1596 #endif
 1597 
 1598 void
 1599 sdtossd(sd, ssd)
 1600         struct segment_descriptor *sd;
 1601         struct soft_segment_descriptor *ssd;
 1602 {
 1603         ssd->ssd_base  = (sd->sd_hibase << 24) | sd->sd_lobase;
 1604         ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit;
 1605         ssd->ssd_type  = sd->sd_type;
 1606         ssd->ssd_dpl   = sd->sd_dpl;
 1607         ssd->ssd_p     = sd->sd_p;
 1608         ssd->ssd_def32 = sd->sd_def32;
 1609         ssd->ssd_gran  = sd->sd_gran;
 1610 }
 1611 
 1612 #if !defined(PC98)
 1613 static int
 1614 add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
 1615     int *physmap_idxp)
 1616 {
 1617         int i, insert_idx, physmap_idx;
 1618 
 1619         physmap_idx = *physmap_idxp;
 1620         
 1621         if (length == 0)
 1622                 return (1);
 1623 
 1624 #ifndef PAE
 1625         if (base > 0xffffffff) {
 1626                 printf("%uK of memory above 4GB ignored\n",
 1627                     (u_int)(length / 1024));
 1628                 return (1);
 1629         }
 1630 #endif
 1631 
 1632         /*
 1633          * Find insertion point while checking for overlap.  Start off by
 1634          * assuming the new entry will be added to the end.
 1635          */
 1636         insert_idx = physmap_idx + 2;
 1637         for (i = 0; i <= physmap_idx; i += 2) {
 1638                 if (base < physmap[i + 1]) {
 1639                         if (base + length <= physmap[i]) {
 1640                                 insert_idx = i;
 1641                                 break;
 1642                         }
 1643                         if (boothowto & RB_VERBOSE)
 1644                                 printf(
 1645                     "Overlapping memory regions, ignoring second region\n");
 1646                         return (1);
 1647                 }
 1648         }
 1649 
 1650         /* See if we can prepend to the next entry. */
 1651         if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) {
 1652                 physmap[insert_idx] = base;
 1653                 return (1);
 1654         }
 1655 
 1656         /* See if we can append to the previous entry. */
 1657         if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
 1658                 physmap[insert_idx - 1] += length;
 1659                 return (1);
 1660         }
 1661 
 1662         physmap_idx += 2;
 1663         *physmap_idxp = physmap_idx;
 1664         if (physmap_idx == PHYSMAP_SIZE) {
 1665                 printf(
 1666                 "Too many segments in the physical address map, giving up\n");
 1667                 return (0);
 1668         }
 1669 
 1670         /*
 1671          * Move the last 'N' entries down to make room for the new
 1672          * entry if needed.
 1673          */
 1674         for (i = physmap_idx; i > insert_idx; i -= 2) {
 1675                 physmap[i] = physmap[i - 2];
 1676                 physmap[i + 1] = physmap[i - 1];
 1677         }
 1678 
 1679         /* Insert the new entry. */
 1680         physmap[insert_idx] = base;
 1681         physmap[insert_idx + 1] = base + length;
 1682         return (1);
 1683 }
 1684 
 1685 static int
 1686 add_smap_entry(struct bios_smap *smap, vm_paddr_t *physmap, int *physmap_idxp)
 1687 {
 1688         if (boothowto & RB_VERBOSE)
 1689                 printf("SMAP type=%02x base=%016llx len=%016llx\n",
 1690                     smap->type, smap->base, smap->length);
 1691 
 1692         if (smap->type != SMAP_TYPE_MEMORY)
 1693                 return (1);
 1694 
 1695         return (add_physmap_entry(smap->base, smap->length, physmap,
 1696             physmap_idxp));
 1697 }
 1698 
 1699 static void
 1700 add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap,
 1701     int *physmap_idxp)
 1702 {
 1703         struct bios_smap *smap, *smapend;
 1704         u_int32_t smapsize;
 1705         /*
 1706          * Memory map from INT 15:E820.
 1707          *
 1708          * subr_module.c says:
 1709          * "Consumer may safely assume that size value precedes data."
 1710          * ie: an int32_t immediately precedes SMAP.
 1711          */
 1712         smapsize = *((u_int32_t *)smapbase - 1);
 1713         smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize);
 1714 
 1715         for (smap = smapbase; smap < smapend; smap++)
 1716                 if (!add_smap_entry(smap, physmap, physmap_idxp))
 1717                         break;
 1718 }
 1719 #endif /* !PC98 */
 1720 
 1721 static void
 1722 basemem_setup(void)
 1723 {
 1724         vm_paddr_t pa;
 1725         pt_entry_t *pte;
 1726         int i;
 1727 
 1728         if (basemem > 640) {
 1729                 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
 1730                         basemem);
 1731                 basemem = 640;
 1732         }
 1733 
 1734         /*
 1735          * XXX if biosbasemem is now < 640, there is a `hole'
 1736          * between the end of base memory and the start of
 1737          * ISA memory.  The hole may be empty or it may
 1738          * contain BIOS code or data.  Map it read/write so
 1739          * that the BIOS can write to it.  (Memory from 0 to
 1740          * the physical end of the kernel is mapped read-only
 1741          * to begin with and then parts of it are remapped.
 1742          * The parts that aren't remapped form holes that
 1743          * remain read-only and are unused by the kernel.
 1744          * The base memory area is below the physical end of
 1745          * the kernel and right now forms a read-only hole.
 1746          * The part of it from PAGE_SIZE to
 1747          * (trunc_page(biosbasemem * 1024) - 1) will be
 1748          * remapped and used by the kernel later.)
 1749          *
 1750          * This code is similar to the code used in
 1751          * pmap_mapdev, but since no memory needs to be
 1752          * allocated we simply change the mapping.
 1753          */
 1754         for (pa = trunc_page(basemem * 1024);
 1755              pa < ISA_HOLE_START; pa += PAGE_SIZE)
 1756                 pmap_kenter(KERNBASE + pa, pa);
 1757 
 1758         /*
 1759          * Map pages between basemem and ISA_HOLE_START, if any, r/w into
 1760          * the vm86 page table so that vm86 can scribble on them using
 1761          * the vm86 map too.  XXX: why 2 ways for this and only 1 way for
 1762          * page 0, at least as initialized here?
 1763          */
 1764         pte = (pt_entry_t *)vm86paddr;
 1765         for (i = basemem / 4; i < 160; i++)
 1766                 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
 1767 }
 1768 
 1769 /*
 1770  * Populate the (physmap) array with base/bound pairs describing the
 1771  * available physical memory in the system, then test this memory and
 1772  * build the phys_avail array describing the actually-available memory.
 1773  *
 1774  * If we cannot accurately determine the physical memory map, then use
 1775  * value from the 0xE801 call, and failing that, the RTC.
 1776  *
 1777  * Total memory size may be set by the kernel environment variable
 1778  * hw.physmem or the compile-time define MAXMEM.
 1779  *
 1780  * XXX first should be vm_paddr_t.
 1781  */
 1782 #ifdef PC98
 1783 static void
 1784 getmemsize(int first)
 1785 {
 1786         int off, physmap_idx, pa_indx, da_indx;
 1787         u_long physmem_tunable, memtest;
 1788         vm_paddr_t physmap[PHYSMAP_SIZE];
 1789         pt_entry_t *pte;
 1790         quad_t dcons_addr, dcons_size;
 1791         int i;
 1792         int pg_n;
 1793         u_int extmem;
 1794         u_int under16;
 1795         vm_paddr_t pa;
 1796 
 1797         bzero(physmap, sizeof(physmap));
 1798 
 1799         /* XXX - some of EPSON machines can't use PG_N */
 1800         pg_n = PG_N;
 1801         if (pc98_machine_type & M_EPSON_PC98) {
 1802                 switch (epson_machine_id) {
 1803 #ifdef WB_CACHE
 1804                 default:
 1805 #endif
 1806                 case EPSON_PC486_HX:
 1807                 case EPSON_PC486_HG:
 1808                 case EPSON_PC486_HA:
 1809                         pg_n = 0;
 1810                         break;
 1811                 }
 1812         }
 1813 
 1814         under16 = pc98_getmemsize(&basemem, &extmem);
 1815         basemem_setup();
 1816 
 1817         physmap[0] = 0;
 1818         physmap[1] = basemem * 1024;
 1819         physmap_idx = 2;
 1820         physmap[physmap_idx] = 0x100000;
 1821         physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
 1822 
 1823         /*
 1824          * Now, physmap contains a map of physical memory.
 1825          */
 1826 
 1827 #ifdef SMP
 1828         /* make hole for AP bootstrap code */
 1829         physmap[1] = mp_bootaddress(physmap[1]);
 1830 #endif
 1831 
 1832         /*
 1833          * Maxmem isn't the "maximum memory", it's one larger than the
 1834          * highest page of the physical address space.  It should be
 1835          * called something like "Maxphyspage".  We may adjust this 
 1836          * based on ``hw.physmem'' and the results of the memory test.
 1837          */
 1838         Maxmem = atop(physmap[physmap_idx + 1]);
 1839 
 1840 #ifdef MAXMEM
 1841         Maxmem = MAXMEM / 4;
 1842 #endif
 1843 
 1844         if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable))
 1845                 Maxmem = atop(physmem_tunable);
 1846 
 1847         /*
 1848          * By default keep the memtest enabled.  Use a general name so that
 1849          * one could eventually do more with the code than just disable it.
 1850          */
 1851         memtest = 1;
 1852         TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
 1853 
 1854         if (atop(physmap[physmap_idx + 1]) != Maxmem &&
 1855             (boothowto & RB_VERBOSE))
 1856                 printf("Physical memory use set to %ldK\n", Maxmem * 4);
 1857 
 1858         /*
 1859          * If Maxmem has been increased beyond what the system has detected,
 1860          * extend the last memory segment to the new limit.
 1861          */ 
 1862         if (atop(physmap[physmap_idx + 1]) < Maxmem)
 1863                 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
 1864 
 1865         /*
 1866          * We need to divide chunk if Maxmem is larger than 16MB and
 1867          * under 16MB area is not full of memory.
 1868          * (1) system area (15-16MB region) is cut off
 1869          * (2) extended memory is only over 16MB area (ex. Melco "HYPERMEMORY")
 1870          */
 1871         if ((under16 != 16 * 1024) && (extmem > 15 * 1024)) {
 1872                 /* 15M - 16M region is cut off, so need to divide chunk */
 1873                 physmap[physmap_idx + 1] = under16 * 1024;
 1874                 physmap_idx += 2;
 1875                 physmap[physmap_idx] = 0x1000000;
 1876                 physmap[physmap_idx + 1] = physmap[2] + extmem * 1024;
 1877         }
 1878 
 1879         /* call pmap initialization to make new kernel address space */
 1880         pmap_bootstrap(first);
 1881 
 1882         /*
 1883          * Size up each available chunk of physical memory.
 1884          */
 1885         physmap[0] = PAGE_SIZE;         /* mask off page 0 */
 1886         pa_indx = 0;
 1887         da_indx = 1;
 1888         phys_avail[pa_indx++] = physmap[0];
 1889         phys_avail[pa_indx] = physmap[0];
 1890         dump_avail[da_indx] = physmap[0];
 1891         pte = CMAP3;
 1892 
 1893         /*
 1894          * Get dcons buffer address
 1895          */
 1896         if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
 1897             getenv_quad("dcons.size", &dcons_size) == 0)
 1898                 dcons_addr = 0;
 1899 
 1900         /*
 1901          * physmap is in bytes, so when converting to page boundaries,
 1902          * round up the start address and round down the end address.
 1903          */
 1904         for (i = 0; i <= physmap_idx; i += 2) {
 1905                 vm_paddr_t end;
 1906 
 1907                 end = ptoa((vm_paddr_t)Maxmem);
 1908                 if (physmap[i + 1] < end)
 1909                         end = trunc_page(physmap[i + 1]);
 1910                 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
 1911                         int tmp, page_bad, full;
 1912                         int *ptr = (int *)CADDR3;
 1913 
 1914                         full = FALSE;
 1915                         /*
 1916                          * block out kernel memory as not available.
 1917                          */
 1918                         if (pa >= KERNLOAD && pa < first)
 1919                                 goto do_dump_avail;
 1920 
 1921                         /*
 1922                          * block out dcons buffer
 1923                          */
 1924                         if (dcons_addr > 0
 1925                             && pa >= trunc_page(dcons_addr)
 1926                             && pa < dcons_addr + dcons_size)
 1927                                 goto do_dump_avail;
 1928 
 1929                         page_bad = FALSE;
 1930                         if (memtest == 0)
 1931                                 goto skip_memtest;
 1932 
 1933                         /*
 1934                          * map page into kernel: valid, read/write,non-cacheable
 1935                          */
 1936                         *pte = pa | PG_V | PG_RW | pg_n;
 1937                         invltlb();
 1938 
 1939                         tmp = *(int *)ptr;
 1940                         /*
 1941                          * Test for alternating 1's and 0's
 1942                          */
 1943                         *(volatile int *)ptr = 0xaaaaaaaa;
 1944                         if (*(volatile int *)ptr != 0xaaaaaaaa)
 1945                                 page_bad = TRUE;
 1946                         /*
 1947                          * Test for alternating 0's and 1's
 1948                          */
 1949                         *(volatile int *)ptr = 0x55555555;
 1950                         if (*(volatile int *)ptr != 0x55555555)
 1951                                 page_bad = TRUE;
 1952                         /*
 1953                          * Test for all 1's
 1954                          */
 1955                         *(volatile int *)ptr = 0xffffffff;
 1956                         if (*(volatile int *)ptr != 0xffffffff)
 1957                                 page_bad = TRUE;
 1958                         /*
 1959                          * Test for all 0's
 1960                          */
 1961                         *(volatile int *)ptr = 0x0;
 1962                         if (*(volatile int *)ptr != 0x0)
 1963                                 page_bad = TRUE;
 1964                         /*
 1965                          * Restore original value.
 1966                          */
 1967                         *(int *)ptr = tmp;
 1968 
 1969 skip_memtest:
 1970                         /*
 1971                          * Adjust array of valid/good pages.
 1972                          */
 1973                         if (page_bad == TRUE)
 1974                                 continue;
 1975                         /*
 1976                          * If this good page is a continuation of the
 1977                          * previous set of good pages, then just increase
 1978                          * the end pointer. Otherwise start a new chunk.
 1979                          * Note that "end" points one higher than end,
 1980                          * making the range >= start and < end.
 1981                          * If we're also doing a speculative memory
 1982                          * test and we at or past the end, bump up Maxmem
 1983                          * so that we keep going. The first bad page
 1984                          * will terminate the loop.
 1985                          */
 1986                         if (phys_avail[pa_indx] == pa) {
 1987                                 phys_avail[pa_indx] += PAGE_SIZE;
 1988                         } else {
 1989                                 pa_indx++;
 1990                                 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
 1991                                         printf(
 1992                 "Too many holes in the physical address space, giving up\n");
 1993                                         pa_indx--;
 1994                                         full = TRUE;
 1995                                         goto do_dump_avail;
 1996                                 }
 1997                                 phys_avail[pa_indx++] = pa;     /* start */
 1998                                 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
 1999                         }
 2000                         physmem++;
 2001 do_dump_avail:
 2002                         if (dump_avail[da_indx] == pa) {
 2003                                 dump_avail[da_indx] += PAGE_SIZE;
 2004                         } else {
 2005                                 da_indx++;
 2006                                 if (da_indx == DUMP_AVAIL_ARRAY_END) {
 2007                                         da_indx--;
 2008                                         goto do_next;
 2009                                 }
 2010                                 dump_avail[da_indx++] = pa;     /* start */
 2011                                 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
 2012                         }
 2013 do_next:
 2014                         if (full)
 2015                                 break;
 2016                 }
 2017         }
 2018         *pte = 0;
 2019         invltlb();
 2020         
 2021         /*
 2022          * XXX
 2023          * The last chunk must contain at least one page plus the message
 2024          * buffer to avoid complicating other code (message buffer address
 2025          * calculation, etc.).
 2026          */
 2027         while (phys_avail[pa_indx - 1] + PAGE_SIZE +
 2028             round_page(msgbufsize) >= phys_avail[pa_indx]) {
 2029                 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
 2030                 phys_avail[pa_indx--] = 0;
 2031                 phys_avail[pa_indx--] = 0;
 2032         }
 2033 
 2034         Maxmem = atop(phys_avail[pa_indx]);
 2035 
 2036         /* Trim off space for the message buffer. */
 2037         phys_avail[pa_indx] -= round_page(msgbufsize);
 2038 
 2039         /* Map the message buffer. */
 2040         for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
 2041                 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
 2042                     off);
 2043 }
 2044 #else /* PC98 */
 2045 static void
 2046 getmemsize(int first)
 2047 {
 2048         int has_smap, off, physmap_idx, pa_indx, da_indx;
 2049         u_long memtest;
 2050         vm_paddr_t physmap[PHYSMAP_SIZE];
 2051         pt_entry_t *pte;
 2052         quad_t dcons_addr, dcons_size, physmem_tunable;
 2053         int hasbrokenint12, i, res;
 2054         u_int extmem;
 2055         struct vm86frame vmf;
 2056         struct vm86context vmc;
 2057         vm_paddr_t pa;
 2058         struct bios_smap *smap, *smapbase;
 2059         caddr_t kmdp;
 2060 
 2061         has_smap = 0;
 2062 #ifdef XBOX
 2063         if (arch_i386_is_xbox) {
 2064                 /*
 2065                  * We queried the memory size before, so chop off 4MB for
 2066                  * the framebuffer and inform the OS of this.
 2067                  */
 2068                 physmap[0] = 0;
 2069                 physmap[1] = (arch_i386_xbox_memsize * 1024 * 1024) - XBOX_FB_SIZE;
 2070                 physmap_idx = 0;
 2071                 goto physmap_done;
 2072         }
 2073 #endif
 2074         bzero(&vmf, sizeof(vmf));
 2075         bzero(physmap, sizeof(physmap));
 2076         basemem = 0;
 2077 
 2078         /*
 2079          * Check if the loader supplied an SMAP memory map.  If so,
 2080          * use that and do not make any VM86 calls.
 2081          */
 2082         physmap_idx = 0;
 2083         kmdp = preload_search_by_type("elf kernel");
 2084         if (kmdp == NULL)
 2085                 kmdp = preload_search_by_type("elf32 kernel");
 2086         smapbase = (struct bios_smap *)preload_search_info(kmdp,
 2087             MODINFO_METADATA | MODINFOMD_SMAP);
 2088         if (smapbase != NULL) {
 2089                 add_smap_entries(smapbase, physmap, &physmap_idx);
 2090                 has_smap = 1;
 2091                 goto have_smap;
 2092         }
 2093 
 2094         /*
 2095          * Some newer BIOSes have a broken INT 12H implementation
 2096          * which causes a kernel panic immediately.  In this case, we
 2097          * need use the SMAP to determine the base memory size.
 2098          */
 2099         hasbrokenint12 = 0;
 2100         TUNABLE_INT_FETCH("hw.hasbrokenint12", &hasbrokenint12);
 2101         if (hasbrokenint12 == 0) {
 2102                 /* Use INT12 to determine base memory size. */
 2103                 vm86_intcall(0x12, &vmf);
 2104                 basemem = vmf.vmf_ax;
 2105                 basemem_setup();
 2106         }
 2107 
 2108         /*
 2109          * Fetch the memory map with INT 15:E820.  Map page 1 R/W into
 2110          * the kernel page table so we can use it as a buffer.  The
 2111          * kernel will unmap this page later.
 2112          */
 2113         pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1 << PAGE_SHIFT);
 2114         vmc.npages = 0;
 2115         smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
 2116         res = vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
 2117         KASSERT(res != 0, ("vm86_getptr() failed: address not found"));
 2118 
 2119         vmf.vmf_ebx = 0;
 2120         do {
 2121                 vmf.vmf_eax = 0xE820;
 2122                 vmf.vmf_edx = SMAP_SIG;
 2123                 vmf.vmf_ecx = sizeof(struct bios_smap);
 2124                 i = vm86_datacall(0x15, &vmf, &vmc);
 2125                 if (i || vmf.vmf_eax != SMAP_SIG)
 2126                         break;
 2127                 has_smap = 1;
 2128                 if (!add_smap_entry(smap, physmap, &physmap_idx))
 2129                         break;
 2130         } while (vmf.vmf_ebx != 0);
 2131 
 2132 have_smap:
 2133         /*
 2134          * If we didn't fetch the "base memory" size from INT12,
 2135          * figure it out from the SMAP (or just guess).
 2136          */
 2137         if (basemem == 0) {
 2138                 for (i = 0; i <= physmap_idx; i += 2) {
 2139                         if (physmap[i] == 0x00000000) {
 2140                                 basemem = physmap[i + 1] / 1024;
 2141                                 break;
 2142                         }
 2143                 }
 2144 
 2145                 /* XXX: If we couldn't find basemem from SMAP, just guess. */
 2146                 if (basemem == 0)
 2147                         basemem = 640;
 2148                 basemem_setup();
 2149         }
 2150 
 2151         if (physmap[1] != 0)
 2152                 goto physmap_done;
 2153 
 2154         /*
 2155          * If we failed to find an SMAP, figure out the extended
 2156          * memory size.  We will then build a simple memory map with
 2157          * two segments, one for "base memory" and the second for
 2158          * "extended memory".  Note that "extended memory" starts at a
 2159          * physical address of 1MB and that both basemem and extmem
 2160          * are in units of 1KB.
 2161          *
 2162          * First, try to fetch the extended memory size via INT 15:E801.
 2163          */
 2164         vmf.vmf_ax = 0xE801;
 2165         if (vm86_intcall(0x15, &vmf) == 0) {
 2166                 extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
 2167         } else {
 2168                 /*
 2169                  * If INT15:E801 fails, this is our last ditch effort
 2170                  * to determine the extended memory size.  Currently
 2171                  * we prefer the RTC value over INT15:88.
 2172                  */
 2173 #if 0
 2174                 vmf.vmf_ah = 0x88;
 2175                 vm86_intcall(0x15, &vmf);
 2176                 extmem = vmf.vmf_ax;
 2177 #else
 2178                 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
 2179 #endif
 2180         }
 2181 
 2182         /*
 2183          * Special hack for chipsets that still remap the 384k hole when
 2184          * there's 16MB of memory - this really confuses people that
 2185          * are trying to use bus mastering ISA controllers with the
 2186          * "16MB limit"; they only have 16MB, but the remapping puts
 2187          * them beyond the limit.
 2188          *
 2189          * If extended memory is between 15-16MB (16-17MB phys address range),
 2190          *      chop it to 15MB.
 2191          */
 2192         if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
 2193                 extmem = 15 * 1024;
 2194 
 2195         physmap[0] = 0;
 2196         physmap[1] = basemem * 1024;
 2197         physmap_idx = 2;
 2198         physmap[physmap_idx] = 0x100000;
 2199         physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
 2200 
 2201 physmap_done:
 2202         /*
 2203          * Now, physmap contains a map of physical memory.
 2204          */
 2205 
 2206 #ifdef SMP
 2207         /* make hole for AP bootstrap code */
 2208         physmap[1] = mp_bootaddress(physmap[1]);
 2209 #endif
 2210 
 2211         /*
 2212          * Maxmem isn't the "maximum memory", it's one larger than the
 2213          * highest page of the physical address space.  It should be
 2214          * called something like "Maxphyspage".  We may adjust this 
 2215          * based on ``hw.physmem'' and the results of the memory test.
 2216          *
 2217          * This is especially confusing when it is much larger than the
 2218          * memory size and is displayed as "realmem".
 2219          */
 2220         Maxmem = atop(physmap[physmap_idx + 1]);
 2221 
 2222 #ifdef MAXMEM
 2223         Maxmem = MAXMEM / 4;
 2224 #endif
 2225 
 2226         if (TUNABLE_QUAD_FETCH("hw.physmem", &physmem_tunable))
 2227                 Maxmem = atop(physmem_tunable);
 2228 
 2229         /*
 2230          * If we have an SMAP, don't allow MAXMEM or hw.physmem to extend
 2231          * the amount of memory in the system.
 2232          */
 2233         if (has_smap && Maxmem > atop(physmap[physmap_idx + 1]))
 2234                 Maxmem = atop(physmap[physmap_idx + 1]);
 2235 
 2236         /*
 2237          * By default enable the memory test on real hardware, and disable
 2238          * it if we appear to be running in a VM.  This avoids touching all
 2239          * pages unnecessarily, which doesn't matter on real hardware but is
 2240          * bad for shared VM hosts.  Use a general name so that
 2241          * one could eventually do more with the code than just disable it.
 2242          */
 2243         memtest = (vm_guest > VM_GUEST_NO) ? 0 : 1;
 2244         TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest);
 2245 
 2246         if (atop(physmap[physmap_idx + 1]) != Maxmem &&
 2247             (boothowto & RB_VERBOSE))
 2248                 printf("Physical memory use set to %ldK\n", Maxmem * 4);
 2249 
 2250         /*
 2251          * If Maxmem has been increased beyond what the system has detected,
 2252          * extend the last memory segment to the new limit.
 2253          */ 
 2254         if (atop(physmap[physmap_idx + 1]) < Maxmem)
 2255                 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem);
 2256 
 2257         /* call pmap initialization to make new kernel address space */
 2258         pmap_bootstrap(first);
 2259 
 2260         /*
 2261          * Size up each available chunk of physical memory.
 2262          */
 2263         physmap[0] = PAGE_SIZE;         /* mask off page 0 */
 2264         pa_indx = 0;
 2265         da_indx = 1;
 2266         phys_avail[pa_indx++] = physmap[0];
 2267         phys_avail[pa_indx] = physmap[0];
 2268         dump_avail[da_indx] = physmap[0];
 2269         pte = CMAP3;
 2270 
 2271         /*
 2272          * Get dcons buffer address
 2273          */
 2274         if (getenv_quad("dcons.addr", &dcons_addr) == 0 ||
 2275             getenv_quad("dcons.size", &dcons_size) == 0)
 2276                 dcons_addr = 0;
 2277 
 2278         /*
 2279          * physmap is in bytes, so when converting to page boundaries,
 2280          * round up the start address and round down the end address.
 2281          */
 2282         for (i = 0; i <= physmap_idx; i += 2) {
 2283                 vm_paddr_t end;
 2284 
 2285                 end = ptoa((vm_paddr_t)Maxmem);
 2286                 if (physmap[i + 1] < end)
 2287                         end = trunc_page(physmap[i + 1]);
 2288                 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
 2289                         int tmp, page_bad, full;
 2290                         int *ptr = (int *)CADDR3;
 2291 
 2292                         full = FALSE;
 2293                         /*
 2294                          * block out kernel memory as not available.
 2295                          */
 2296                         if (pa >= KERNLOAD && pa < first)
 2297                                 goto do_dump_avail;
 2298 
 2299                         /*
 2300                          * block out dcons buffer
 2301                          */
 2302                         if (dcons_addr > 0
 2303                             && pa >= trunc_page(dcons_addr)
 2304                             && pa < dcons_addr + dcons_size)
 2305                                 goto do_dump_avail;
 2306 
 2307                         page_bad = FALSE;
 2308                         if (memtest == 0)
 2309                                 goto skip_memtest;
 2310 
 2311                         /*
 2312                          * map page into kernel: valid, read/write,non-cacheable
 2313                          */
 2314                         *pte = pa | PG_V | PG_RW | PG_N;
 2315                         invltlb();
 2316 
 2317                         tmp = *(int *)ptr;
 2318                         /*
 2319                          * Test for alternating 1's and 0's
 2320                          */
 2321                         *(volatile int *)ptr = 0xaaaaaaaa;
 2322                         if (*(volatile int *)ptr != 0xaaaaaaaa)
 2323                                 page_bad = TRUE;
 2324                         /*
 2325                          * Test for alternating 0's and 1's
 2326                          */
 2327                         *(volatile int *)ptr = 0x55555555;
 2328                         if (*(volatile int *)ptr != 0x55555555)
 2329                                 page_bad = TRUE;
 2330                         /*
 2331                          * Test for all 1's
 2332                          */
 2333                         *(volatile int *)ptr = 0xffffffff;
 2334                         if (*(volatile int *)ptr != 0xffffffff)
 2335                                 page_bad = TRUE;
 2336                         /*
 2337                          * Test for all 0's
 2338                          */
 2339                         *(volatile int *)ptr = 0x0;
 2340                         if (*(volatile int *)ptr != 0x0)
 2341                                 page_bad = TRUE;
 2342                         /*
 2343                          * Restore original value.
 2344                          */
 2345                         *(int *)ptr = tmp;
 2346 
 2347 skip_memtest:
 2348                         /*
 2349                          * Adjust array of valid/good pages.
 2350                          */
 2351                         if (page_bad == TRUE)
 2352                                 continue;
 2353                         /*
 2354                          * If this good page is a continuation of the
 2355                          * previous set of good pages, then just increase
 2356                          * the end pointer. Otherwise start a new chunk.
 2357                          * Note that "end" points one higher than end,
 2358                          * making the range >= start and < end.
 2359                          * If we're also doing a speculative memory
 2360                          * test and we at or past the end, bump up Maxmem
 2361                          * so that we keep going. The first bad page
 2362                          * will terminate the loop.
 2363                          */
 2364                         if (phys_avail[pa_indx] == pa) {
 2365                                 phys_avail[pa_indx] += PAGE_SIZE;
 2366                         } else {
 2367                                 pa_indx++;
 2368                                 if (pa_indx == PHYS_AVAIL_ARRAY_END) {
 2369                                         printf(
 2370                 "Too many holes in the physical address space, giving up\n");
 2371                                         pa_indx--;
 2372                                         full = TRUE;
 2373                                         goto do_dump_avail;
 2374                                 }
 2375                                 phys_avail[pa_indx++] = pa;     /* start */
 2376                                 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
 2377                         }
 2378                         physmem++;
 2379 do_dump_avail:
 2380                         if (dump_avail[da_indx] == pa) {
 2381                                 dump_avail[da_indx] += PAGE_SIZE;
 2382                         } else {
 2383                                 da_indx++;
 2384                                 if (da_indx == DUMP_AVAIL_ARRAY_END) {
 2385                                         da_indx--;
 2386                                         goto do_next;
 2387                                 }
 2388                                 dump_avail[da_indx++] = pa;     /* start */
 2389                                 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */
 2390                         }
 2391 do_next:
 2392                         if (full)
 2393                                 break;
 2394                 }
 2395         }
 2396         *pte = 0;
 2397         invltlb();
 2398         
 2399         /*
 2400          * XXX
 2401          * The last chunk must contain at least one page plus the message
 2402          * buffer to avoid complicating other code (message buffer address
 2403          * calculation, etc.).
 2404          */
 2405         while (phys_avail[pa_indx - 1] + PAGE_SIZE +
 2406             round_page(msgbufsize) >= phys_avail[pa_indx]) {
 2407                 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
 2408                 phys_avail[pa_indx--] = 0;
 2409                 phys_avail[pa_indx--] = 0;
 2410         }
 2411 
 2412         Maxmem = atop(phys_avail[pa_indx]);
 2413 
 2414         /* Trim off space for the message buffer. */
 2415         phys_avail[pa_indx] -= round_page(msgbufsize);
 2416 
 2417         /* Map the message buffer. */
 2418         for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
 2419                 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
 2420                     off);
 2421 }
 2422 #endif /* PC98 */
 2423 
 2424 static void
 2425 i386_kdb_init(void)
 2426 {
 2427 #ifdef DDB
 2428         db_fetch_ksymtab(bootinfo.bi_symtab, bootinfo.bi_esymtab);
 2429 #endif
 2430         kdb_init();
 2431 #ifdef KDB
 2432         if (boothowto & RB_KDB)
 2433                 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
 2434 #endif
 2435 }
 2436 
 2437 register_t
 2438 init386(int first)
 2439 {
 2440         struct gate_descriptor *gdp;
 2441         int gsel_tss, metadata_missing, x, pa;
 2442         struct pcpu *pc;
 2443         struct xstate_hdr *xhdr;
 2444         int late_console;
 2445 
 2446         thread0.td_kstack = proc0kstack;
 2447         thread0.td_kstack_pages = TD0_KSTACK_PAGES;
 2448 
 2449         /*
 2450          * This may be done better later if it gets more high level
 2451          * components in it. If so just link td->td_proc here.
 2452          */
 2453         proc_linkup0(&proc0, &thread0);
 2454 
 2455 #ifdef PC98
 2456         /*
 2457          * Initialize DMAC
 2458          */
 2459         pc98_init_dmac();
 2460 #endif
 2461 
 2462         metadata_missing = 0;
 2463         if (bootinfo.bi_modulep) {
 2464                 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE;
 2465                 preload_bootstrap_relocate(KERNBASE);
 2466         } else {
 2467                 metadata_missing = 1;
 2468         }
 2469 
 2470         if (bootinfo.bi_envp != 0)
 2471                 init_static_kenv((char *)bootinfo.bi_envp + KERNBASE, 0);
 2472         else
 2473                 init_static_kenv(NULL, 0);
 2474 
 2475         /* Init basic tunables, hz etc */
 2476         init_param1();
 2477 
 2478         /*
 2479          * Make gdt memory segments.  All segments cover the full 4GB
 2480          * of address space and permissions are enforced at page level.
 2481          */
 2482         gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1);
 2483         gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1);
 2484         gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - 1);
 2485         gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - 1);
 2486         gdt_segs[GUFS_SEL].ssd_limit = atop(0 - 1);
 2487         gdt_segs[GUGS_SEL].ssd_limit = atop(0 - 1);
 2488 
 2489         pc = &__pcpu[0];
 2490         gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
 2491         gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
 2492         gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
 2493 
 2494         for (x = 0; x < NGDT; x++)
 2495                 ssdtosd(&gdt_segs[x], &gdt[x].sd);
 2496 
 2497         r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1;
 2498         r_gdt.rd_base =  (int) gdt;
 2499         mtx_init(&dt_lock, "descriptor tables", NULL, MTX_SPIN);
 2500         lgdt(&r_gdt);
 2501 
 2502         pcpu_init(pc, 0, sizeof(struct pcpu));
 2503         for (pa = first; pa < first + DPCPU_SIZE; pa += PAGE_SIZE)
 2504                 pmap_kenter(pa + KERNBASE, pa);
 2505         dpcpu_init((void *)(first + KERNBASE), 0);
 2506         first += DPCPU_SIZE;
 2507         PCPU_SET(prvspace, pc);
 2508         PCPU_SET(curthread, &thread0);
 2509         /* Non-late cninit() and printf() can be moved up to here. */
 2510 
 2511         /*
 2512          * Initialize mutexes.
 2513          *
 2514          * icu_lock: in order to allow an interrupt to occur in a critical
 2515          *           section, to set pcpu->ipending (etc...) properly, we
 2516          *           must be able to get the icu lock, so it can't be
 2517          *           under witness.
 2518          */
 2519         mutex_init();
 2520         mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
 2521 
 2522         /* make ldt memory segments */
 2523         ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
 2524         ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
 2525         for (x = 0; x < nitems(ldt_segs); x++)
 2526                 ssdtosd(&ldt_segs[x], &ldt[x].sd);
 2527 
 2528         _default_ldt = GSEL(GLDT_SEL, SEL_KPL);
 2529         lldt(_default_ldt);
 2530         PCPU_SET(currentldt, _default_ldt);
 2531 
 2532         /* exceptions */
 2533         for (x = 0; x < NIDT; x++)
 2534                 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL,
 2535                     GSEL(GCODE_SEL, SEL_KPL));
 2536         setidt(IDT_DE, &IDTVEC(div),  SDT_SYS386TGT, SEL_KPL,
 2537             GSEL(GCODE_SEL, SEL_KPL));
 2538         setidt(IDT_DB, &IDTVEC(dbg),  SDT_SYS386IGT, SEL_KPL,
 2539             GSEL(GCODE_SEL, SEL_KPL));
 2540         setidt(IDT_NMI, &IDTVEC(nmi),  SDT_SYS386IGT, SEL_KPL,
 2541             GSEL(GCODE_SEL, SEL_KPL));
 2542         setidt(IDT_BP, &IDTVEC(bpt),  SDT_SYS386IGT, SEL_UPL,
 2543             GSEL(GCODE_SEL, SEL_KPL));
 2544         setidt(IDT_OF, &IDTVEC(ofl),  SDT_SYS386TGT, SEL_UPL,
 2545             GSEL(GCODE_SEL, SEL_KPL));
 2546         setidt(IDT_BR, &IDTVEC(bnd),  SDT_SYS386TGT, SEL_KPL,
 2547             GSEL(GCODE_SEL, SEL_KPL));
 2548         setidt(IDT_UD, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL,
 2549             GSEL(GCODE_SEL, SEL_KPL));
 2550         setidt(IDT_NM, &IDTVEC(dna),  SDT_SYS386TGT, SEL_KPL
 2551             , GSEL(GCODE_SEL, SEL_KPL));
 2552         setidt(IDT_DF, 0,  SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL));
 2553         setidt(IDT_FPUGP, &IDTVEC(fpusegm),  SDT_SYS386TGT, SEL_KPL,
 2554             GSEL(GCODE_SEL, SEL_KPL));
 2555         setidt(IDT_TS, &IDTVEC(tss),  SDT_SYS386TGT, SEL_KPL,
 2556             GSEL(GCODE_SEL, SEL_KPL));
 2557         setidt(IDT_NP, &IDTVEC(missing),  SDT_SYS386TGT, SEL_KPL,
 2558             GSEL(GCODE_SEL, SEL_KPL));
 2559         setidt(IDT_SS, &IDTVEC(stk),  SDT_SYS386TGT, SEL_KPL,
 2560             GSEL(GCODE_SEL, SEL_KPL));
 2561         setidt(IDT_GP, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL,
 2562             GSEL(GCODE_SEL, SEL_KPL));
 2563         setidt(IDT_PF, &IDTVEC(page),  SDT_SYS386IGT, SEL_KPL,
 2564             GSEL(GCODE_SEL, SEL_KPL));
 2565         setidt(IDT_MF, &IDTVEC(fpu),  SDT_SYS386TGT, SEL_KPL,
 2566             GSEL(GCODE_SEL, SEL_KPL));
 2567         setidt(IDT_AC, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL,
 2568             GSEL(GCODE_SEL, SEL_KPL));
 2569         setidt(IDT_MC, &IDTVEC(mchk),  SDT_SYS386TGT, SEL_KPL,
 2570             GSEL(GCODE_SEL, SEL_KPL));
 2571         setidt(IDT_XF, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL,
 2572             GSEL(GCODE_SEL, SEL_KPL));
 2573         setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
 2574             GSEL(GCODE_SEL, SEL_KPL));
 2575 #ifdef KDTRACE_HOOKS
 2576         setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
 2577             GSEL(GCODE_SEL, SEL_KPL));
 2578 #endif
 2579 #ifdef XENHVM
 2580         setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYS386IGT, SEL_KPL,
 2581             GSEL(GCODE_SEL, SEL_KPL));
 2582 #endif
 2583 
 2584         r_idt.rd_limit = sizeof(idt0) - 1;
 2585         r_idt.rd_base = (int) idt;
 2586         lidt(&r_idt);
 2587 
 2588 #ifdef XBOX
 2589         /*
 2590          * The following code queries the PCI ID of 0:0:0. For the XBOX,
 2591          * This should be 0x10de / 0x02a5.
 2592          *
 2593          * This is exactly what Linux does.
 2594          */
 2595         outl(0xcf8, 0x80000000);
 2596         if (inl(0xcfc) == 0x02a510de) {
 2597                 arch_i386_is_xbox = 1;
 2598                 pic16l_setled(XBOX_LED_GREEN);
 2599 
 2600                 /*
 2601                  * We are an XBOX, but we may have either 64MB or 128MB of
 2602                  * memory. The PCI host bridge should be programmed for this,
 2603                  * so we just query it. 
 2604                  */
 2605                 outl(0xcf8, 0x80000084);
 2606                 arch_i386_xbox_memsize = (inl(0xcfc) == 0x7FFFFFF) ? 128 : 64;
 2607         }
 2608 #endif /* XBOX */
 2609 
 2610         /*
 2611          * Initialize the clock before the console so that console
 2612          * initialization can use DELAY().
 2613          */
 2614         clock_init();
 2615 
 2616         finishidentcpu();       /* Final stage of CPU initialization */
 2617         setidt(IDT_UD, &IDTVEC(ill),  SDT_SYS386TGT, SEL_KPL,
 2618             GSEL(GCODE_SEL, SEL_KPL));
 2619         setidt(IDT_GP, &IDTVEC(prot),  SDT_SYS386TGT, SEL_KPL,
 2620             GSEL(GCODE_SEL, SEL_KPL));
 2621         initializecpu();        /* Initialize CPU registers */
 2622         initializecpucache();
 2623 
 2624         /* pointer to selector slot for %fs/%gs */
 2625         PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
 2626 
 2627         dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
 2628             dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
 2629         dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
 2630             dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
 2631 #if defined(PAE) || defined(PAE_TABLES)
 2632         dblfault_tss.tss_cr3 = (int)IdlePDPT;
 2633 #else
 2634         dblfault_tss.tss_cr3 = (int)IdlePTD;
 2635 #endif
 2636         dblfault_tss.tss_eip = (int)dblfault_handler;
 2637         dblfault_tss.tss_eflags = PSL_KERNEL;
 2638         dblfault_tss.tss_ds = dblfault_tss.tss_es =
 2639             dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
 2640         dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
 2641         dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
 2642         dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
 2643 
 2644         /* Initialize the tss (except for the final esp0) early for vm86. */
 2645         PCPU_SET(common_tss.tss_esp0, thread0.td_kstack +
 2646             thread0.td_kstack_pages * PAGE_SIZE - 16);
 2647         PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL));
 2648         gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
 2649         PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd);
 2650         PCPU_SET(common_tssd, *PCPU_GET(tss_gdt));
 2651         PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16);
 2652         ltr(gsel_tss);
 2653 
 2654         /* Initialize the PIC early for vm86 calls. */
 2655 #ifdef DEV_ISA
 2656 #ifdef DEV_ATPIC
 2657 #ifndef PC98
 2658         elcr_probe();
 2659 #endif
 2660         atpic_startup();
 2661 #else
 2662         /* Reset and mask the atpics and leave them shut down. */
 2663         atpic_reset();
 2664 
 2665         /*
 2666          * Point the ICU spurious interrupt vectors at the APIC spurious
 2667          * interrupt handler.
 2668          */
 2669         setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
 2670             GSEL(GCODE_SEL, SEL_KPL));
 2671         setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYS386IGT, SEL_KPL,
 2672             GSEL(GCODE_SEL, SEL_KPL));
 2673 #endif
 2674 #endif
 2675 
 2676         /*
 2677          * The console and kdb should be initialized even earlier than here,
 2678          * but some console drivers don't work until after getmemsize().
 2679          * Default to late console initialization to support these drivers.
 2680          * This loses mainly printf()s in getmemsize() and early debugging.
 2681          */
 2682         late_console = 1;
 2683         TUNABLE_INT_FETCH("debug.late_console", &late_console);
 2684         if (!late_console) {
 2685                 cninit();
 2686                 i386_kdb_init();
 2687         }
 2688 
 2689         vm86_initialize();
 2690         getmemsize(first);
 2691         init_param2(physmem);
 2692 
 2693         /* now running on new page tables, configured,and u/iom is accessible */
 2694 
 2695         if (late_console)
 2696                 cninit();
 2697 
 2698         if (metadata_missing)
 2699                 printf("WARNING: loader(8) metadata is missing!\n");
 2700 
 2701         if (late_console)
 2702                 i386_kdb_init();
 2703 
 2704         msgbufinit(msgbufp, msgbufsize);
 2705         npxinit(true);
 2706         /*
 2707          * Set up thread0 pcb after npxinit calculated pcb + fpu save
 2708          * area size.  Zero out the extended state header in fpu save
 2709          * area.
 2710          */
 2711         thread0.td_pcb = get_pcb_td(&thread0);
 2712         thread0.td_pcb->pcb_save = get_pcb_user_save_td(&thread0);
 2713         bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size);
 2714         if (use_xsave) {
 2715                 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) +
 2716                     1);
 2717                 xhdr->xstate_bv = xsave_mask;
 2718         }
 2719         PCPU_SET(curpcb, thread0.td_pcb);
 2720         /* Move esp0 in the tss to its final place. */
 2721         /* Note: -16 is so we can grow the trapframe if we came from vm86 */
 2722         PCPU_SET(common_tss.tss_esp0, (vm_offset_t)thread0.td_pcb - 16);
 2723         gdt[GPROC0_SEL].sd.sd_type = SDT_SYS386TSS;     /* clear busy bit */
 2724         ltr(gsel_tss);
 2725 
 2726         /* make a call gate to reenter kernel with */
 2727         gdp = &ldt[LSYS5CALLS_SEL].gd;
 2728 
 2729         x = (int) &IDTVEC(lcall_syscall);
 2730         gdp->gd_looffset = x;
 2731         gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
 2732         gdp->gd_stkcpy = 1;
 2733         gdp->gd_type = SDT_SYS386CGT;
 2734         gdp->gd_dpl = SEL_UPL;
 2735         gdp->gd_p = 1;
 2736         gdp->gd_hioffset = x >> 16;
 2737 
 2738         /* XXX does this work? */
 2739         /* XXX yes! */
 2740         ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL];
 2741         ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL];
 2742 
 2743         /* transfer to user mode */
 2744 
 2745         _ucodesel = GSEL(GUCODE_SEL, SEL_UPL);
 2746         _udatasel = GSEL(GUDATA_SEL, SEL_UPL);
 2747 
 2748         /* setup proc 0's pcb */
 2749         thread0.td_pcb->pcb_flags = 0;
 2750 #if defined(PAE) || defined(PAE_TABLES)
 2751         thread0.td_pcb->pcb_cr3 = (int)IdlePDPT;
 2752 #else
 2753         thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
 2754 #endif
 2755         thread0.td_pcb->pcb_ext = 0;
 2756         thread0.td_frame = &proc0_tf;
 2757 
 2758         cpu_probe_amdc1e();
 2759 
 2760 #ifdef FDT
 2761         x86_init_fdt();
 2762 #endif
 2763 
 2764         /* Location of kernel stack for locore */
 2765         return ((register_t)thread0.td_pcb);
 2766 }
 2767 
 2768 void
 2769 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
 2770 {
 2771 
 2772         pcpu->pc_acpi_id = 0xffffffff;
 2773 }
 2774 
 2775 #ifndef PC98
 2776 static int
 2777 smap_sysctl_handler(SYSCTL_HANDLER_ARGS)
 2778 {
 2779         struct bios_smap *smapbase;
 2780         struct bios_smap_xattr smap;
 2781         caddr_t kmdp;
 2782         uint32_t *smapattr;
 2783         int count, error, i;
 2784 
 2785         /* Retrieve the system memory map from the loader. */
 2786         kmdp = preload_search_by_type("elf kernel");
 2787         if (kmdp == NULL)
 2788                 kmdp = preload_search_by_type("elf32 kernel");
 2789         smapbase = (struct bios_smap *)preload_search_info(kmdp,
 2790             MODINFO_METADATA | MODINFOMD_SMAP);
 2791         if (smapbase == NULL)
 2792                 return (0);
 2793         smapattr = (uint32_t *)preload_search_info(kmdp,
 2794             MODINFO_METADATA | MODINFOMD_SMAP_XATTR);
 2795         count = *((u_int32_t *)smapbase - 1) / sizeof(*smapbase);
 2796         error = 0;
 2797         for (i = 0; i < count; i++) {
 2798                 smap.base = smapbase[i].base;
 2799                 smap.length = smapbase[i].length;
 2800                 smap.type = smapbase[i].type;
 2801                 if (smapattr != NULL)
 2802                         smap.xattr = smapattr[i];
 2803                 else
 2804                         smap.xattr = 0;
 2805                 error = SYSCTL_OUT(req, &smap, sizeof(smap));
 2806         }
 2807         return (error);
 2808 }
 2809 SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0,
 2810     smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data");
 2811 #endif /* !PC98 */
 2812 
 2813 void
 2814 spinlock_enter(void)
 2815 {
 2816         struct thread *td;
 2817         register_t flags;
 2818 
 2819         td = curthread;
 2820         if (td->td_md.md_spinlock_count == 0) {
 2821                 flags = intr_disable();
 2822                 td->td_md.md_spinlock_count = 1;
 2823                 td->td_md.md_saved_flags = flags;
 2824         } else
 2825                 td->td_md.md_spinlock_count++;
 2826         critical_enter();
 2827 }
 2828 
 2829 void
 2830 spinlock_exit(void)
 2831 {
 2832         struct thread *td;
 2833         register_t flags;
 2834 
 2835         td = curthread;
 2836         critical_exit();
 2837         flags = td->td_md.md_saved_flags;
 2838         td->td_md.md_spinlock_count--;
 2839         if (td->td_md.md_spinlock_count == 0)
 2840                 intr_restore(flags);
 2841 }
 2842 
 2843 #if defined(I586_CPU) && !defined(NO_F00F_HACK)
 2844 static void f00f_hack(void *unused);
 2845 SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL);
 2846 
 2847 static void
 2848 f00f_hack(void *unused)
 2849 {
 2850         struct gate_descriptor *new_idt;
 2851         vm_offset_t tmp;
 2852 
 2853         if (!has_f00f_bug)
 2854                 return;
 2855 
 2856         GIANT_REQUIRED;
 2857 
 2858         printf("Intel Pentium detected, installing workaround for F00F bug\n");
 2859 
 2860         tmp = kmem_malloc(kernel_arena, PAGE_SIZE * 2, M_WAITOK | M_ZERO);
 2861         if (tmp == 0)
 2862                 panic("kmem_malloc returned 0");
 2863 
 2864         /* Put the problematic entry (#6) at the end of the lower page. */
 2865         new_idt = (struct gate_descriptor*)
 2866             (tmp + PAGE_SIZE - 7 * sizeof(struct gate_descriptor));
 2867         bcopy(idt, new_idt, sizeof(idt0));
 2868         r_idt.rd_base = (u_int)new_idt;
 2869         lidt(&r_idt);
 2870         idt = new_idt;
 2871         pmap_protect(kernel_pmap, tmp, tmp + PAGE_SIZE, VM_PROT_READ);
 2872 }
 2873 #endif /* defined(I586_CPU) && !NO_F00F_HACK */
 2874 
 2875 /*
 2876  * Construct a PCB from a trapframe. This is called from kdb_trap() where
 2877  * we want to start a backtrace from the function that caused us to enter
 2878  * the debugger. We have the context in the trapframe, but base the trace
 2879  * on the PCB. The PCB doesn't have to be perfect, as long as it contains
 2880  * enough for a backtrace.
 2881  */
 2882 void
 2883 makectx(struct trapframe *tf, struct pcb *pcb)
 2884 {
 2885 
 2886         pcb->pcb_edi = tf->tf_edi;
 2887         pcb->pcb_esi = tf->tf_esi;
 2888         pcb->pcb_ebp = tf->tf_ebp;
 2889         pcb->pcb_ebx = tf->tf_ebx;
 2890         pcb->pcb_eip = tf->tf_eip;
 2891         pcb->pcb_esp = (ISPL(tf->tf_cs)) ? tf->tf_esp : (int)(tf + 1) - 8;
 2892         pcb->pcb_gs = rgs();
 2893 }
 2894 
 2895 int
 2896 ptrace_set_pc(struct thread *td, u_long addr)
 2897 {
 2898 
 2899         td->td_frame->tf_eip = addr;
 2900         return (0);
 2901 }
 2902 
 2903 int
 2904 ptrace_single_step(struct thread *td)
 2905 {
 2906         td->td_frame->tf_eflags |= PSL_T;
 2907         return (0);
 2908 }
 2909 
 2910 int
 2911 ptrace_clear_single_step(struct thread *td)
 2912 {
 2913         td->td_frame->tf_eflags &= ~PSL_T;
 2914         return (0);
 2915 }
 2916 
 2917 int
 2918 fill_regs(struct thread *td, struct reg *regs)
 2919 {
 2920         struct pcb *pcb;
 2921         struct trapframe *tp;
 2922 
 2923         tp = td->td_frame;
 2924         pcb = td->td_pcb;
 2925         regs->r_gs = pcb->pcb_gs;
 2926         return (fill_frame_regs(tp, regs));
 2927 }
 2928 
 2929 int
 2930 fill_frame_regs(struct trapframe *tp, struct reg *regs)
 2931 {
 2932         regs->r_fs = tp->tf_fs;
 2933         regs->r_es = tp->tf_es;
 2934         regs->r_ds = tp->tf_ds;
 2935         regs->r_edi = tp->tf_edi;
 2936         regs->r_esi = tp->tf_esi;
 2937         regs->r_ebp = tp->tf_ebp;
 2938         regs->r_ebx = tp->tf_ebx;
 2939         regs->r_edx = tp->tf_edx;
 2940         regs->r_ecx = tp->tf_ecx;
 2941         regs->r_eax = tp->tf_eax;
 2942         regs->r_eip = tp->tf_eip;
 2943         regs->r_cs = tp->tf_cs;
 2944         regs->r_eflags = tp->tf_eflags;
 2945         regs->r_esp = tp->tf_esp;
 2946         regs->r_ss = tp->tf_ss;
 2947         return (0);
 2948 }
 2949 
 2950 int
 2951 set_regs(struct thread *td, struct reg *regs)
 2952 {
 2953         struct pcb *pcb;
 2954         struct trapframe *tp;
 2955 
 2956         tp = td->td_frame;
 2957         if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) ||
 2958             !CS_SECURE(regs->r_cs))
 2959                 return (EINVAL);
 2960         pcb = td->td_pcb;
 2961         tp->tf_fs = regs->r_fs;
 2962         tp->tf_es = regs->r_es;
 2963         tp->tf_ds = regs->r_ds;
 2964         tp->tf_edi = regs->r_edi;
 2965         tp->tf_esi = regs->r_esi;
 2966         tp->tf_ebp = regs->r_ebp;
 2967         tp->tf_ebx = regs->r_ebx;
 2968         tp->tf_edx = regs->r_edx;
 2969         tp->tf_ecx = regs->r_ecx;
 2970         tp->tf_eax = regs->r_eax;
 2971         tp->tf_eip = regs->r_eip;
 2972         tp->tf_cs = regs->r_cs;
 2973         tp->tf_eflags = regs->r_eflags;
 2974         tp->tf_esp = regs->r_esp;
 2975         tp->tf_ss = regs->r_ss;
 2976         pcb->pcb_gs = regs->r_gs;
 2977         return (0);
 2978 }
 2979 
 2980 int
 2981 fill_fpregs(struct thread *td, struct fpreg *fpregs)
 2982 {
 2983 
 2984         KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
 2985             P_SHOULDSTOP(td->td_proc),
 2986             ("not suspended thread %p", td));
 2987         npxgetregs(td);
 2988         if (cpu_fxsr)
 2989                 npx_fill_fpregs_xmm(&get_pcb_user_save_td(td)->sv_xmm,
 2990                     (struct save87 *)fpregs);
 2991         else
 2992                 bcopy(&get_pcb_user_save_td(td)->sv_87, fpregs,
 2993                     sizeof(*fpregs));
 2994         return (0);
 2995 }
 2996 
 2997 int
 2998 set_fpregs(struct thread *td, struct fpreg *fpregs)
 2999 {
 3000 
 3001         critical_enter();
 3002         if (cpu_fxsr)
 3003                 npx_set_fpregs_xmm((struct save87 *)fpregs,
 3004                     &get_pcb_user_save_td(td)->sv_xmm);
 3005         else
 3006                 bcopy(fpregs, &get_pcb_user_save_td(td)->sv_87,
 3007                     sizeof(*fpregs));
 3008         npxuserinited(td);
 3009         critical_exit();
 3010         return (0);
 3011 }
 3012 
 3013 /*
 3014  * Get machine context.
 3015  */
 3016 int
 3017 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
 3018 {
 3019         struct trapframe *tp;
 3020         struct segment_descriptor *sdp;
 3021 
 3022         tp = td->td_frame;
 3023 
 3024         PROC_LOCK(curthread->td_proc);
 3025         mcp->mc_onstack = sigonstack(tp->tf_esp);
 3026         PROC_UNLOCK(curthread->td_proc);
 3027         mcp->mc_gs = td->td_pcb->pcb_gs;
 3028         mcp->mc_fs = tp->tf_fs;
 3029         mcp->mc_es = tp->tf_es;
 3030         mcp->mc_ds = tp->tf_ds;
 3031         mcp->mc_edi = tp->tf_edi;
 3032         mcp->mc_esi = tp->tf_esi;
 3033         mcp->mc_ebp = tp->tf_ebp;
 3034         mcp->mc_isp = tp->tf_isp;
 3035         mcp->mc_eflags = tp->tf_eflags;
 3036         if (flags & GET_MC_CLEAR_RET) {
 3037                 mcp->mc_eax = 0;
 3038                 mcp->mc_edx = 0;
 3039                 mcp->mc_eflags &= ~PSL_C;
 3040         } else {
 3041                 mcp->mc_eax = tp->tf_eax;
 3042                 mcp->mc_edx = tp->tf_edx;
 3043         }
 3044         mcp->mc_ebx = tp->tf_ebx;
 3045         mcp->mc_ecx = tp->tf_ecx;
 3046         mcp->mc_eip = tp->tf_eip;
 3047         mcp->mc_cs = tp->tf_cs;
 3048         mcp->mc_esp = tp->tf_esp;
 3049         mcp->mc_ss = tp->tf_ss;
 3050         mcp->mc_len = sizeof(*mcp);
 3051         get_fpcontext(td, mcp, NULL, 0);
 3052         sdp = &td->td_pcb->pcb_fsd;
 3053         mcp->mc_fsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
 3054         sdp = &td->td_pcb->pcb_gsd;
 3055         mcp->mc_gsbase = sdp->sd_hibase << 24 | sdp->sd_lobase;
 3056         mcp->mc_flags = 0;
 3057         mcp->mc_xfpustate = 0;
 3058         mcp->mc_xfpustate_len = 0;
 3059         bzero(mcp->mc_spare2, sizeof(mcp->mc_spare2));
 3060         return (0);
 3061 }
 3062 
 3063 /*
 3064  * Set machine context.
 3065  *
 3066  * However, we don't set any but the user modifiable flags, and we won't
 3067  * touch the cs selector.
 3068  */
 3069 int
 3070 set_mcontext(struct thread *td, mcontext_t *mcp)
 3071 {
 3072         struct trapframe *tp;
 3073         char *xfpustate;
 3074         int eflags, ret;
 3075 
 3076         tp = td->td_frame;
 3077         if (mcp->mc_len != sizeof(*mcp) ||
 3078             (mcp->mc_flags & ~_MC_FLAG_MASK) != 0)
 3079                 return (EINVAL);
 3080         eflags = (mcp->mc_eflags & PSL_USERCHANGE) |
 3081             (tp->tf_eflags & ~PSL_USERCHANGE);
 3082         if (mcp->mc_flags & _MC_HASFPXSTATE) {
 3083                 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size -
 3084                     sizeof(union savefpu))
 3085                         return (EINVAL);
 3086                 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len);
 3087                 ret = copyin((void *)mcp->mc_xfpustate, xfpustate,
 3088                     mcp->mc_xfpustate_len);
 3089                 if (ret != 0)
 3090                         return (ret);
 3091         } else
 3092                 xfpustate = NULL;
 3093         ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len);
 3094         if (ret != 0)
 3095                 return (ret);
 3096         tp->tf_fs = mcp->mc_fs;
 3097         tp->tf_es = mcp->mc_es;
 3098         tp->tf_ds = mcp->mc_ds;
 3099         tp->tf_edi = mcp->mc_edi;
 3100         tp->tf_esi = mcp->mc_esi;
 3101         tp->tf_ebp = mcp->mc_ebp;
 3102         tp->tf_ebx = mcp->mc_ebx;
 3103         tp->tf_edx = mcp->mc_edx;
 3104         tp->tf_ecx = mcp->mc_ecx;
 3105         tp->tf_eax = mcp->mc_eax;
 3106         tp->tf_eip = mcp->mc_eip;
 3107         tp->tf_eflags = eflags;
 3108         tp->tf_esp = mcp->mc_esp;
 3109         tp->tf_ss = mcp->mc_ss;
 3110         td->td_pcb->pcb_gs = mcp->mc_gs;
 3111         return (0);
 3112 }
 3113 
 3114 static void
 3115 get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave,
 3116     size_t xfpusave_len)
 3117 {
 3118         size_t max_len, len;
 3119 
 3120         mcp->mc_ownedfp = npxgetregs(td);
 3121         bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0],
 3122             sizeof(mcp->mc_fpstate));
 3123         mcp->mc_fpformat = npxformat();
 3124         if (!use_xsave || xfpusave_len == 0)
 3125                 return;
 3126         max_len = cpu_max_ext_state_size - sizeof(union savefpu);
 3127         len = xfpusave_len;
 3128         if (len > max_len) {
 3129                 len = max_len;
 3130                 bzero(xfpusave + max_len, len - max_len);
 3131         }
 3132         mcp->mc_flags |= _MC_HASFPXSTATE;
 3133         mcp->mc_xfpustate_len = len;
 3134         bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len);
 3135 }
 3136 
 3137 static int
 3138 set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate,
 3139     size_t xfpustate_len)
 3140 {
 3141         union savefpu *fpstate;
 3142         int error;
 3143 
 3144         if (mcp->mc_fpformat == _MC_FPFMT_NODEV)
 3145                 return (0);
 3146         else if (mcp->mc_fpformat != _MC_FPFMT_387 &&
 3147             mcp->mc_fpformat != _MC_FPFMT_XMM)
 3148                 return (EINVAL);
 3149         else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) {
 3150                 /* We don't care what state is left in the FPU or PCB. */
 3151                 fpstate_drop(td);
 3152                 error = 0;
 3153         } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU ||
 3154             mcp->mc_ownedfp == _MC_FPOWNED_PCB) {
 3155                 fpstate = (union savefpu *)&mcp->mc_fpstate;
 3156                 if (cpu_fxsr)
 3157                         fpstate->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
 3158                 error = npxsetregs(td, fpstate, xfpustate, xfpustate_len);
 3159         } else
 3160                 return (EINVAL);
 3161         return (error);
 3162 }
 3163 
 3164 static void
 3165 fpstate_drop(struct thread *td)
 3166 {
 3167 
 3168         KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
 3169         critical_enter();
 3170         if (PCPU_GET(fpcurthread) == td)
 3171                 npxdrop();
 3172         /*
 3173          * XXX force a full drop of the npx.  The above only drops it if we
 3174          * owned it.  npxgetregs() has the same bug in the !cpu_fxsr case.
 3175          *
 3176          * XXX I don't much like npxgetregs()'s semantics of doing a full
 3177          * drop.  Dropping only to the pcb matches fnsave's behaviour.
 3178          * We only need to drop to !PCB_INITDONE in sendsig().  But
 3179          * sendsig() is the only caller of npxgetregs()... perhaps we just
 3180          * have too many layers.
 3181          */
 3182         curthread->td_pcb->pcb_flags &= ~(PCB_NPXINITDONE |
 3183             PCB_NPXUSERINITDONE);
 3184         critical_exit();
 3185 }
 3186 
 3187 int
 3188 fill_dbregs(struct thread *td, struct dbreg *dbregs)
 3189 {
 3190         struct pcb *pcb;
 3191 
 3192         if (td == NULL) {
 3193                 dbregs->dr[0] = rdr0();
 3194                 dbregs->dr[1] = rdr1();
 3195                 dbregs->dr[2] = rdr2();
 3196                 dbregs->dr[3] = rdr3();
 3197                 dbregs->dr[4] = rdr4();
 3198                 dbregs->dr[5] = rdr5();
 3199                 dbregs->dr[6] = rdr6();
 3200                 dbregs->dr[7] = rdr7();
 3201         } else {
 3202                 pcb = td->td_pcb;
 3203                 dbregs->dr[0] = pcb->pcb_dr0;
 3204                 dbregs->dr[1] = pcb->pcb_dr1;
 3205                 dbregs->dr[2] = pcb->pcb_dr2;
 3206                 dbregs->dr[3] = pcb->pcb_dr3;
 3207                 dbregs->dr[4] = 0;
 3208                 dbregs->dr[5] = 0;
 3209                 dbregs->dr[6] = pcb->pcb_dr6;
 3210                 dbregs->dr[7] = pcb->pcb_dr7;
 3211         }
 3212         return (0);
 3213 }
 3214 
 3215 int
 3216 set_dbregs(struct thread *td, struct dbreg *dbregs)
 3217 {
 3218         struct pcb *pcb;
 3219         int i;
 3220 
 3221         if (td == NULL) {
 3222                 load_dr0(dbregs->dr[0]);
 3223                 load_dr1(dbregs->dr[1]);
 3224                 load_dr2(dbregs->dr[2]);
 3225                 load_dr3(dbregs->dr[3]);
 3226                 load_dr4(dbregs->dr[4]);
 3227                 load_dr5(dbregs->dr[5]);
 3228                 load_dr6(dbregs->dr[6]);
 3229                 load_dr7(dbregs->dr[7]);
 3230         } else {
 3231                 /*
 3232                  * Don't let an illegal value for dr7 get set.  Specifically,
 3233                  * check for undefined settings.  Setting these bit patterns
 3234                  * result in undefined behaviour and can lead to an unexpected
 3235                  * TRCTRAP.
 3236                  */
 3237                 for (i = 0; i < 4; i++) {
 3238                         if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02)
 3239                                 return (EINVAL);
 3240                         if (DBREG_DR7_LEN(dbregs->dr[7], i) == 0x02)
 3241                                 return (EINVAL);
 3242                 }
 3243                 
 3244                 pcb = td->td_pcb;
 3245                 
 3246                 /*
 3247                  * Don't let a process set a breakpoint that is not within the
 3248                  * process's address space.  If a process could do this, it
 3249                  * could halt the system by setting a breakpoint in the kernel
 3250                  * (if ddb was enabled).  Thus, we need to check to make sure
 3251                  * that no breakpoints are being enabled for addresses outside
 3252                  * process's address space.
 3253                  *
 3254                  * XXX - what about when the watched area of the user's
 3255                  * address space is written into from within the kernel
 3256                  * ... wouldn't that still cause a breakpoint to be generated
 3257                  * from within kernel mode?
 3258                  */
 3259 
 3260                 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) {
 3261                         /* dr0 is enabled */
 3262                         if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS)
 3263                                 return (EINVAL);
 3264                 }
 3265                         
 3266                 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) {
 3267                         /* dr1 is enabled */
 3268                         if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS)
 3269                                 return (EINVAL);
 3270                 }
 3271                         
 3272                 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) {
 3273                         /* dr2 is enabled */
 3274                         if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS)
 3275                                 return (EINVAL);
 3276                 }
 3277                         
 3278                 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) {
 3279                         /* dr3 is enabled */
 3280                         if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS)
 3281                                 return (EINVAL);
 3282                 }
 3283 
 3284                 pcb->pcb_dr0 = dbregs->dr[0];
 3285                 pcb->pcb_dr1 = dbregs->dr[1];
 3286                 pcb->pcb_dr2 = dbregs->dr[2];
 3287                 pcb->pcb_dr3 = dbregs->dr[3];
 3288                 pcb->pcb_dr6 = dbregs->dr[6];
 3289                 pcb->pcb_dr7 = dbregs->dr[7];
 3290 
 3291                 pcb->pcb_flags |= PCB_DBREGS;
 3292         }
 3293 
 3294         return (0);
 3295 }
 3296 
 3297 /*
 3298  * Return > 0 if a hardware breakpoint has been hit, and the
 3299  * breakpoint was in user space.  Return 0, otherwise.
 3300  */
 3301 int
 3302 user_dbreg_trap(void)
 3303 {
 3304         u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */
 3305         u_int32_t bp;       /* breakpoint bits extracted from dr6 */
 3306         int nbp;            /* number of breakpoints that triggered */
 3307         caddr_t addr[4];    /* breakpoint addresses */
 3308         int i;
 3309         
 3310         dr7 = rdr7();
 3311         if ((dr7 & 0x000000ff) == 0) {
 3312                 /*
 3313                  * all GE and LE bits in the dr7 register are zero,
 3314                  * thus the trap couldn't have been caused by the
 3315                  * hardware debug registers
 3316                  */
 3317                 return 0;
 3318         }
 3319 
 3320         nbp = 0;
 3321         dr6 = rdr6();
 3322         bp = dr6 & 0x0000000f;
 3323 
 3324         if (!bp) {
 3325                 /*
 3326                  * None of the breakpoint bits are set meaning this
 3327                  * trap was not caused by any of the debug registers
 3328                  */
 3329                 return 0;
 3330         }
 3331 
 3332         /*
 3333          * at least one of the breakpoints were hit, check to see
 3334          * which ones and if any of them are user space addresses
 3335          */
 3336 
 3337         if (bp & 0x01) {
 3338                 addr[nbp++] = (caddr_t)rdr0();
 3339         }
 3340         if (bp & 0x02) {
 3341                 addr[nbp++] = (caddr_t)rdr1();
 3342         }
 3343         if (bp & 0x04) {
 3344                 addr[nbp++] = (caddr_t)rdr2();
 3345         }
 3346         if (bp & 0x08) {
 3347                 addr[nbp++] = (caddr_t)rdr3();
 3348         }
 3349 
 3350         for (i = 0; i < nbp; i++) {
 3351                 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) {
 3352                         /*
 3353                          * addr[i] is in user space
 3354                          */
 3355                         return nbp;
 3356                 }
 3357         }
 3358 
 3359         /*
 3360          * None of the breakpoints are in user space.
 3361          */
 3362         return 0;
 3363 }
 3364 
 3365 #ifdef KDB
 3366 
 3367 /*
 3368  * Provide inb() and outb() as functions.  They are normally only available as
 3369  * inline functions, thus cannot be called from the debugger.
 3370  */
 3371 
 3372 /* silence compiler warnings */
 3373 u_char inb_(u_short);
 3374 void outb_(u_short, u_char);
 3375 
 3376 u_char
 3377 inb_(u_short port)
 3378 {
 3379         return inb(port);
 3380 }
 3381 
 3382 void
 3383 outb_(u_short port, u_char data)
 3384 {
 3385         outb(port, data);
 3386 }
 3387 
 3388 #endif /* KDB */

Cache object: 35c1ef99b489e6b9a5e3052ef1e465ec


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.