The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/fpu.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 William Jolitz.
    3  * Copyright (c) 1991 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 4. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  *      from: @(#)npx.c 7.2 (Berkeley) 5/12/91
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include <sys/param.h>
   37 #include <sys/systm.h>
   38 #include <sys/bus.h>
   39 #include <sys/kernel.h>
   40 #include <sys/lock.h>
   41 #include <sys/malloc.h>
   42 #include <sys/module.h>
   43 #include <sys/mutex.h>
   44 #include <sys/mutex.h>
   45 #include <sys/proc.h>
   46 #include <sys/sysctl.h>
   47 #include <machine/bus.h>
   48 #include <sys/rman.h>
   49 #include <sys/signalvar.h>
   50 #include <vm/uma.h>
   51 
   52 #include <machine/cputypes.h>
   53 #include <machine/frame.h>
   54 #include <machine/intr_machdep.h>
   55 #include <machine/md_var.h>
   56 #include <machine/pcb.h>
   57 #include <machine/psl.h>
   58 #include <machine/resource.h>
   59 #include <machine/specialreg.h>
   60 #include <machine/segments.h>
   61 #include <machine/ucontext.h>
   62 
   63 /*
   64  * Floating point support.
   65  */
   66 
   67 #if defined(__GNUCLIKE_ASM) && !defined(lint)
   68 
   69 #define fldcw(cw)               __asm __volatile("fldcw %0" : : "m" (cw))
   70 #define fnclex()                __asm __volatile("fnclex")
   71 #define fninit()                __asm __volatile("fninit")
   72 #define fnstcw(addr)            __asm __volatile("fnstcw %0" : "=m" (*(addr)))
   73 #define fnstsw(addr)            __asm __volatile("fnstsw %0" : "=am" (*(addr)))
   74 #define fxrstor(addr)           __asm __volatile("fxrstor %0" : : "m" (*(addr)))
   75 #define fxsave(addr)            __asm __volatile("fxsave %0" : "=m" (*(addr)))
   76 #define ldmxcsr(csr)            __asm __volatile("ldmxcsr %0" : : "m" (csr))
   77 #define stmxcsr(addr)           __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
   78 
   79 static __inline void
   80 xrstor(char *addr, uint64_t mask)
   81 {
   82         uint32_t low, hi;
   83 
   84         low = mask;
   85         hi = mask >> 32;
   86         __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
   87 }
   88 
   89 static __inline void
   90 xsave(char *addr, uint64_t mask)
   91 {
   92         uint32_t low, hi;
   93 
   94         low = mask;
   95         hi = mask >> 32;
   96         __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
   97             "memory");
   98 }
   99 
  100 #else   /* !(__GNUCLIKE_ASM && !lint) */
  101 
  102 void    fldcw(u_short cw);
  103 void    fnclex(void);
  104 void    fninit(void);
  105 void    fnstcw(caddr_t addr);
  106 void    fnstsw(caddr_t addr);
  107 void    fxsave(caddr_t addr);
  108 void    fxrstor(caddr_t addr);
  109 void    ldmxcsr(u_int csr);
  110 void    stmxcsr(u_int *csr);
  111 void    xrstor(char *addr, uint64_t mask);
  112 void    xsave(char *addr, uint64_t mask);
  113 
  114 #endif  /* __GNUCLIKE_ASM && !lint */
  115 
  116 #define start_emulating()       load_cr0(rcr0() | CR0_TS)
  117 #define stop_emulating()        clts()
  118 
  119 CTASSERT(sizeof(struct savefpu) == 512);
  120 CTASSERT(sizeof(struct xstate_hdr) == 64);
  121 CTASSERT(sizeof(struct savefpu_ymm) == 832);
  122 
  123 /*
  124  * This requirement is to make it easier for asm code to calculate
  125  * offset of the fpu save area from the pcb address. FPU save area
  126  * must be 64-byte aligned.
  127  */
  128 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
  129 
  130 /*
  131  * Ensure the copy of XCR0 saved in a core is contained in the padding
  132  * area.
  133  */
  134 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
  135     X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
  136 
  137 static  void    fpu_clean_state(void);
  138 
  139 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
  140     SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
  141 
  142 int lazy_fpu_switch = 0;
  143 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
  144     &lazy_fpu_switch, 0,
  145     "Lazily load FPU context after context switch");
  146 
  147 int use_xsave;                  /* non-static for cpu_switch.S */
  148 uint64_t xsave_mask;            /* the same */
  149 static  uma_zone_t fpu_save_area_zone;
  150 static  struct savefpu *fpu_initialstate;
  151 
  152 struct xsave_area_elm_descr {
  153         u_int   offset;
  154         u_int   size;
  155 } *xsave_area_desc;
  156 
  157 void
  158 fpusave(void *addr)
  159 {
  160 
  161         if (use_xsave)
  162                 xsave((char *)addr, xsave_mask);
  163         else
  164                 fxsave((char *)addr);
  165 }
  166 
  167 void
  168 fpurestore(void *addr)
  169 {
  170 
  171         if (use_xsave)
  172                 xrstor((char *)addr, xsave_mask);
  173         else
  174                 fxrstor((char *)addr);
  175 }
  176 
  177 void
  178 fpususpend(void *addr)
  179 {
  180         u_long cr0;
  181 
  182         cr0 = rcr0();
  183         stop_emulating();
  184         fpusave(addr);
  185         load_cr0(cr0);
  186 }
  187 
  188 void
  189 fpuresume(void *addr)
  190 {
  191         u_long cr0;
  192 
  193         cr0 = rcr0();
  194         stop_emulating();
  195         fninit();
  196         if (use_xsave)
  197                 load_xcr(XCR0, xsave_mask);
  198         fpurestore(addr);
  199         load_cr0(cr0);
  200 }
  201 
  202 /*
  203  * Enable XSAVE if supported and allowed by user.
  204  * Calculate the xsave_mask.
  205  */
  206 static void
  207 fpuinit_bsp1(void)
  208 {
  209         u_int cp[4];
  210         uint64_t xsave_mask_user;
  211 
  212         TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch);
  213         if ((cpu_feature2 & CPUID2_XSAVE) != 0) {
  214                 use_xsave = 1;
  215                 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
  216         }
  217         if (!use_xsave)
  218                 return;
  219 
  220         cpuid_count(0xd, 0x0, cp);
  221         xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
  222         if ((cp[0] & xsave_mask) != xsave_mask)
  223                 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
  224         xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
  225         xsave_mask_user = xsave_mask;
  226         TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
  227         xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
  228         xsave_mask &= xsave_mask_user;
  229         if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
  230                 xsave_mask &= ~XFEATURE_AVX512;
  231         if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
  232                 xsave_mask &= ~XFEATURE_MPX;
  233 
  234         cpuid_count(0xd, 0x1, cp);
  235         if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
  236                 /*
  237                  * Patch the XSAVE instruction in the cpu_switch code
  238                  * to XSAVEOPT.  We assume that XSAVE encoding used
  239                  * REX byte, and set the bit 4 of the r/m byte.
  240                  */
  241                 ctx_switch_xsave[3] |= 0x10;
  242         }
  243 }
  244 
  245 /*
  246  * Calculate the fpu save area size.
  247  */
  248 static void
  249 fpuinit_bsp2(void)
  250 {
  251         u_int cp[4];
  252 
  253         if (use_xsave) {
  254                 cpuid_count(0xd, 0x0, cp);
  255                 cpu_max_ext_state_size = cp[1];
  256 
  257                 /*
  258                  * Reload the cpu_feature2, since we enabled OSXSAVE.
  259                  */
  260                 do_cpuid(1, cp);
  261                 cpu_feature2 = cp[2];
  262         } else
  263                 cpu_max_ext_state_size = sizeof(struct savefpu);
  264 }
  265 
  266 /*
  267  * Initialize the floating point unit.
  268  */
  269 void
  270 fpuinit(void)
  271 {
  272         register_t saveintr;
  273         u_int mxcsr;
  274         u_short control;
  275 
  276         if (IS_BSP())
  277                 fpuinit_bsp1();
  278 
  279         if (use_xsave) {
  280                 load_cr4(rcr4() | CR4_XSAVE);
  281                 load_xcr(XCR0, xsave_mask);
  282         }
  283 
  284         /*
  285          * XCR0 shall be set up before CPU can report the save area size.
  286          */
  287         if (IS_BSP())
  288                 fpuinit_bsp2();
  289 
  290         /*
  291          * It is too early for critical_enter() to work on AP.
  292          */
  293         saveintr = intr_disable();
  294         stop_emulating();
  295         fninit();
  296         control = __INITIAL_FPUCW__;
  297         fldcw(control);
  298         mxcsr = __INITIAL_MXCSR__;
  299         ldmxcsr(mxcsr);
  300         start_emulating();
  301         intr_restore(saveintr);
  302 }
  303 
  304 /*
  305  * On the boot CPU we generate a clean state that is used to
  306  * initialize the floating point unit when it is first used by a
  307  * process.
  308  */
  309 static void
  310 fpuinitstate(void *arg __unused)
  311 {
  312         register_t saveintr;
  313         int cp[4], i, max_ext_n;
  314 
  315         fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
  316             M_WAITOK | M_ZERO);
  317         saveintr = intr_disable();
  318         stop_emulating();
  319 
  320         fpusave(fpu_initialstate);
  321         if (fpu_initialstate->sv_env.en_mxcsr_mask)
  322                 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
  323         else
  324                 cpu_mxcsr_mask = 0xFFBF;
  325 
  326         /*
  327          * The fninit instruction does not modify XMM registers or x87
  328          * registers (MM/ST).  The fpusave call dumped the garbage
  329          * contained in the registers after reset to the initial state
  330          * saved.  Clear XMM and x87 registers file image to make the
  331          * startup program state and signal handler XMM/x87 register
  332          * content predictable.
  333          */
  334         bzero(fpu_initialstate->sv_fp, sizeof(fpu_initialstate->sv_fp));
  335         bzero(fpu_initialstate->sv_xmm, sizeof(fpu_initialstate->sv_xmm));
  336 
  337         /*
  338          * Create a table describing the layout of the CPU Extended
  339          * Save Area.
  340          */
  341         if (use_xsave) {
  342                 max_ext_n = flsl(xsave_mask);
  343                 xsave_area_desc = malloc(max_ext_n * sizeof(struct
  344                     xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
  345                 /* x87 state */
  346                 xsave_area_desc[0].offset = 0;
  347                 xsave_area_desc[0].size = 160;
  348                 /* XMM */
  349                 xsave_area_desc[1].offset = 160;
  350                 xsave_area_desc[1].size = 288 - 160;
  351 
  352                 for (i = 2; i < max_ext_n; i++) {
  353                         cpuid_count(0xd, i, cp);
  354                         xsave_area_desc[i].offset = cp[1];
  355                         xsave_area_desc[i].size = cp[0];
  356                 }
  357         }
  358 
  359         fpu_save_area_zone = uma_zcreate("FPU_save_area",
  360             cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
  361             XSAVE_AREA_ALIGN - 1, 0);
  362 
  363         start_emulating();
  364         intr_restore(saveintr);
  365 }
  366 /* EFIRT needs this to be initialized before we can enter our EFI environment */
  367 SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_FIRST, fpuinitstate, NULL);
  368 
  369 /*
  370  * Free coprocessor (if we have it).
  371  */
  372 void
  373 fpuexit(struct thread *td)
  374 {
  375 
  376         critical_enter();
  377         if (curthread == PCPU_GET(fpcurthread)) {
  378                 stop_emulating();
  379                 fpusave(curpcb->pcb_save);
  380                 start_emulating();
  381                 PCPU_SET(fpcurthread, NULL);
  382         }
  383         critical_exit();
  384 }
  385 
  386 int
  387 fpuformat(void)
  388 {
  389 
  390         return (_MC_FPFMT_XMM);
  391 }
  392 
  393 /* 
  394  * The following mechanism is used to ensure that the FPE_... value
  395  * that is passed as a trapcode to the signal handler of the user
  396  * process does not have more than one bit set.
  397  * 
  398  * Multiple bits may be set if the user process modifies the control
  399  * word while a status word bit is already set.  While this is a sign
  400  * of bad coding, we have no choise than to narrow them down to one
  401  * bit, since we must not send a trapcode that is not exactly one of
  402  * the FPE_ macros.
  403  *
  404  * The mechanism has a static table with 127 entries.  Each combination
  405  * of the 7 FPU status word exception bits directly translates to a
  406  * position in this table, where a single FPE_... value is stored.
  407  * This FPE_... value stored there is considered the "most important"
  408  * of the exception bits and will be sent as the signal code.  The
  409  * precedence of the bits is based upon Intel Document "Numerical
  410  * Applications", Chapter "Special Computational Situations".
  411  *
  412  * The macro to choose one of these values does these steps: 1) Throw
  413  * away status word bits that cannot be masked.  2) Throw away the bits
  414  * currently masked in the control word, assuming the user isn't
  415  * interested in them anymore.  3) Reinsert status word bit 7 (stack
  416  * fault) if it is set, which cannot be masked but must be presered.
  417  * 4) Use the remaining bits to point into the trapcode table.
  418  *
  419  * The 6 maskable bits in order of their preference, as stated in the
  420  * above referenced Intel manual:
  421  * 1  Invalid operation (FP_X_INV)
  422  * 1a   Stack underflow
  423  * 1b   Stack overflow
  424  * 1c   Operand of unsupported format
  425  * 1d   SNaN operand.
  426  * 2  QNaN operand (not an exception, irrelavant here)
  427  * 3  Any other invalid-operation not mentioned above or zero divide
  428  *      (FP_X_INV, FP_X_DZ)
  429  * 4  Denormal operand (FP_X_DNML)
  430  * 5  Numeric over/underflow (FP_X_OFL, FP_X_UFL)
  431  * 6  Inexact result (FP_X_IMP) 
  432  */
  433 static char fpetable[128] = {
  434         0,
  435         FPE_FLTINV,     /*  1 - INV */
  436         FPE_FLTUND,     /*  2 - DNML */
  437         FPE_FLTINV,     /*  3 - INV | DNML */
  438         FPE_FLTDIV,     /*  4 - DZ */
  439         FPE_FLTINV,     /*  5 - INV | DZ */
  440         FPE_FLTDIV,     /*  6 - DNML | DZ */
  441         FPE_FLTINV,     /*  7 - INV | DNML | DZ */
  442         FPE_FLTOVF,     /*  8 - OFL */
  443         FPE_FLTINV,     /*  9 - INV | OFL */
  444         FPE_FLTUND,     /*  A - DNML | OFL */
  445         FPE_FLTINV,     /*  B - INV | DNML | OFL */
  446         FPE_FLTDIV,     /*  C - DZ | OFL */
  447         FPE_FLTINV,     /*  D - INV | DZ | OFL */
  448         FPE_FLTDIV,     /*  E - DNML | DZ | OFL */
  449         FPE_FLTINV,     /*  F - INV | DNML | DZ | OFL */
  450         FPE_FLTUND,     /* 10 - UFL */
  451         FPE_FLTINV,     /* 11 - INV | UFL */
  452         FPE_FLTUND,     /* 12 - DNML | UFL */
  453         FPE_FLTINV,     /* 13 - INV | DNML | UFL */
  454         FPE_FLTDIV,     /* 14 - DZ | UFL */
  455         FPE_FLTINV,     /* 15 - INV | DZ | UFL */
  456         FPE_FLTDIV,     /* 16 - DNML | DZ | UFL */
  457         FPE_FLTINV,     /* 17 - INV | DNML | DZ | UFL */
  458         FPE_FLTOVF,     /* 18 - OFL | UFL */
  459         FPE_FLTINV,     /* 19 - INV | OFL | UFL */
  460         FPE_FLTUND,     /* 1A - DNML | OFL | UFL */
  461         FPE_FLTINV,     /* 1B - INV | DNML | OFL | UFL */
  462         FPE_FLTDIV,     /* 1C - DZ | OFL | UFL */
  463         FPE_FLTINV,     /* 1D - INV | DZ | OFL | UFL */
  464         FPE_FLTDIV,     /* 1E - DNML | DZ | OFL | UFL */
  465         FPE_FLTINV,     /* 1F - INV | DNML | DZ | OFL | UFL */
  466         FPE_FLTRES,     /* 20 - IMP */
  467         FPE_FLTINV,     /* 21 - INV | IMP */
  468         FPE_FLTUND,     /* 22 - DNML | IMP */
  469         FPE_FLTINV,     /* 23 - INV | DNML | IMP */
  470         FPE_FLTDIV,     /* 24 - DZ | IMP */
  471         FPE_FLTINV,     /* 25 - INV | DZ | IMP */
  472         FPE_FLTDIV,     /* 26 - DNML | DZ | IMP */
  473         FPE_FLTINV,     /* 27 - INV | DNML | DZ | IMP */
  474         FPE_FLTOVF,     /* 28 - OFL | IMP */
  475         FPE_FLTINV,     /* 29 - INV | OFL | IMP */
  476         FPE_FLTUND,     /* 2A - DNML | OFL | IMP */
  477         FPE_FLTINV,     /* 2B - INV | DNML | OFL | IMP */
  478         FPE_FLTDIV,     /* 2C - DZ | OFL | IMP */
  479         FPE_FLTINV,     /* 2D - INV | DZ | OFL | IMP */
  480         FPE_FLTDIV,     /* 2E - DNML | DZ | OFL | IMP */
  481         FPE_FLTINV,     /* 2F - INV | DNML | DZ | OFL | IMP */
  482         FPE_FLTUND,     /* 30 - UFL | IMP */
  483         FPE_FLTINV,     /* 31 - INV | UFL | IMP */
  484         FPE_FLTUND,     /* 32 - DNML | UFL | IMP */
  485         FPE_FLTINV,     /* 33 - INV | DNML | UFL | IMP */
  486         FPE_FLTDIV,     /* 34 - DZ | UFL | IMP */
  487         FPE_FLTINV,     /* 35 - INV | DZ | UFL | IMP */
  488         FPE_FLTDIV,     /* 36 - DNML | DZ | UFL | IMP */
  489         FPE_FLTINV,     /* 37 - INV | DNML | DZ | UFL | IMP */
  490         FPE_FLTOVF,     /* 38 - OFL | UFL | IMP */
  491         FPE_FLTINV,     /* 39 - INV | OFL | UFL | IMP */
  492         FPE_FLTUND,     /* 3A - DNML | OFL | UFL | IMP */
  493         FPE_FLTINV,     /* 3B - INV | DNML | OFL | UFL | IMP */
  494         FPE_FLTDIV,     /* 3C - DZ | OFL | UFL | IMP */
  495         FPE_FLTINV,     /* 3D - INV | DZ | OFL | UFL | IMP */
  496         FPE_FLTDIV,     /* 3E - DNML | DZ | OFL | UFL | IMP */
  497         FPE_FLTINV,     /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
  498         FPE_FLTSUB,     /* 40 - STK */
  499         FPE_FLTSUB,     /* 41 - INV | STK */
  500         FPE_FLTUND,     /* 42 - DNML | STK */
  501         FPE_FLTSUB,     /* 43 - INV | DNML | STK */
  502         FPE_FLTDIV,     /* 44 - DZ | STK */
  503         FPE_FLTSUB,     /* 45 - INV | DZ | STK */
  504         FPE_FLTDIV,     /* 46 - DNML | DZ | STK */
  505         FPE_FLTSUB,     /* 47 - INV | DNML | DZ | STK */
  506         FPE_FLTOVF,     /* 48 - OFL | STK */
  507         FPE_FLTSUB,     /* 49 - INV | OFL | STK */
  508         FPE_FLTUND,     /* 4A - DNML | OFL | STK */
  509         FPE_FLTSUB,     /* 4B - INV | DNML | OFL | STK */
  510         FPE_FLTDIV,     /* 4C - DZ | OFL | STK */
  511         FPE_FLTSUB,     /* 4D - INV | DZ | OFL | STK */
  512         FPE_FLTDIV,     /* 4E - DNML | DZ | OFL | STK */
  513         FPE_FLTSUB,     /* 4F - INV | DNML | DZ | OFL | STK */
  514         FPE_FLTUND,     /* 50 - UFL | STK */
  515         FPE_FLTSUB,     /* 51 - INV | UFL | STK */
  516         FPE_FLTUND,     /* 52 - DNML | UFL | STK */
  517         FPE_FLTSUB,     /* 53 - INV | DNML | UFL | STK */
  518         FPE_FLTDIV,     /* 54 - DZ | UFL | STK */
  519         FPE_FLTSUB,     /* 55 - INV | DZ | UFL | STK */
  520         FPE_FLTDIV,     /* 56 - DNML | DZ | UFL | STK */
  521         FPE_FLTSUB,     /* 57 - INV | DNML | DZ | UFL | STK */
  522         FPE_FLTOVF,     /* 58 - OFL | UFL | STK */
  523         FPE_FLTSUB,     /* 59 - INV | OFL | UFL | STK */
  524         FPE_FLTUND,     /* 5A - DNML | OFL | UFL | STK */
  525         FPE_FLTSUB,     /* 5B - INV | DNML | OFL | UFL | STK */
  526         FPE_FLTDIV,     /* 5C - DZ | OFL | UFL | STK */
  527         FPE_FLTSUB,     /* 5D - INV | DZ | OFL | UFL | STK */
  528         FPE_FLTDIV,     /* 5E - DNML | DZ | OFL | UFL | STK */
  529         FPE_FLTSUB,     /* 5F - INV | DNML | DZ | OFL | UFL | STK */
  530         FPE_FLTRES,     /* 60 - IMP | STK */
  531         FPE_FLTSUB,     /* 61 - INV | IMP | STK */
  532         FPE_FLTUND,     /* 62 - DNML | IMP | STK */
  533         FPE_FLTSUB,     /* 63 - INV | DNML | IMP | STK */
  534         FPE_FLTDIV,     /* 64 - DZ | IMP | STK */
  535         FPE_FLTSUB,     /* 65 - INV | DZ | IMP | STK */
  536         FPE_FLTDIV,     /* 66 - DNML | DZ | IMP | STK */
  537         FPE_FLTSUB,     /* 67 - INV | DNML | DZ | IMP | STK */
  538         FPE_FLTOVF,     /* 68 - OFL | IMP | STK */
  539         FPE_FLTSUB,     /* 69 - INV | OFL | IMP | STK */
  540         FPE_FLTUND,     /* 6A - DNML | OFL | IMP | STK */
  541         FPE_FLTSUB,     /* 6B - INV | DNML | OFL | IMP | STK */
  542         FPE_FLTDIV,     /* 6C - DZ | OFL | IMP | STK */
  543         FPE_FLTSUB,     /* 6D - INV | DZ | OFL | IMP | STK */
  544         FPE_FLTDIV,     /* 6E - DNML | DZ | OFL | IMP | STK */
  545         FPE_FLTSUB,     /* 6F - INV | DNML | DZ | OFL | IMP | STK */
  546         FPE_FLTUND,     /* 70 - UFL | IMP | STK */
  547         FPE_FLTSUB,     /* 71 - INV | UFL | IMP | STK */
  548         FPE_FLTUND,     /* 72 - DNML | UFL | IMP | STK */
  549         FPE_FLTSUB,     /* 73 - INV | DNML | UFL | IMP | STK */
  550         FPE_FLTDIV,     /* 74 - DZ | UFL | IMP | STK */
  551         FPE_FLTSUB,     /* 75 - INV | DZ | UFL | IMP | STK */
  552         FPE_FLTDIV,     /* 76 - DNML | DZ | UFL | IMP | STK */
  553         FPE_FLTSUB,     /* 77 - INV | DNML | DZ | UFL | IMP | STK */
  554         FPE_FLTOVF,     /* 78 - OFL | UFL | IMP | STK */
  555         FPE_FLTSUB,     /* 79 - INV | OFL | UFL | IMP | STK */
  556         FPE_FLTUND,     /* 7A - DNML | OFL | UFL | IMP | STK */
  557         FPE_FLTSUB,     /* 7B - INV | DNML | OFL | UFL | IMP | STK */
  558         FPE_FLTDIV,     /* 7C - DZ | OFL | UFL | IMP | STK */
  559         FPE_FLTSUB,     /* 7D - INV | DZ | OFL | UFL | IMP | STK */
  560         FPE_FLTDIV,     /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
  561         FPE_FLTSUB,     /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
  562 };
  563 
  564 /*
  565  * Read the FP status and control words, then generate si_code value
  566  * for SIGFPE.  The error code chosen will be one of the
  567  * FPE_... macros.  It will be sent as the second argument to old
  568  * BSD-style signal handlers and as "siginfo_t->si_code" (second
  569  * argument) to SA_SIGINFO signal handlers.
  570  *
  571  * Some time ago, we cleared the x87 exceptions with FNCLEX there.
  572  * Clearing exceptions was necessary mainly to avoid IRQ13 bugs.  The
  573  * usermode code which understands the FPU hardware enough to enable
  574  * the exceptions, can also handle clearing the exception state in the
  575  * handler.  The only consequence of not clearing the exception is the
  576  * rethrow of the SIGFPE on return from the signal handler and
  577  * reexecution of the corresponding instruction.
  578  *
  579  * For XMM traps, the exceptions were never cleared.
  580  */
  581 int
  582 fputrap_x87(void)
  583 {
  584         struct savefpu *pcb_save;
  585         u_short control, status;
  586 
  587         critical_enter();
  588 
  589         /*
  590          * Interrupt handling (for another interrupt) may have pushed the
  591          * state to memory.  Fetch the relevant parts of the state from
  592          * wherever they are.
  593          */
  594         if (PCPU_GET(fpcurthread) != curthread) {
  595                 pcb_save = curpcb->pcb_save;
  596                 control = pcb_save->sv_env.en_cw;
  597                 status = pcb_save->sv_env.en_sw;
  598         } else {
  599                 fnstcw(&control);
  600                 fnstsw(&status);
  601         }
  602 
  603         critical_exit();
  604         return (fpetable[status & ((~control & 0x3f) | 0x40)]);
  605 }
  606 
  607 int
  608 fputrap_sse(void)
  609 {
  610         u_int mxcsr;
  611 
  612         critical_enter();
  613         if (PCPU_GET(fpcurthread) != curthread)
  614                 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
  615         else
  616                 stmxcsr(&mxcsr);
  617         critical_exit();
  618         return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
  619 }
  620 
  621 static void
  622 restore_fpu_curthread(struct thread *td)
  623 {
  624         struct pcb *pcb;
  625 
  626         /*
  627          * Record new context early in case frstor causes a trap.
  628          */
  629         PCPU_SET(fpcurthread, td);
  630 
  631         stop_emulating();
  632         fpu_clean_state();
  633         pcb = td->td_pcb;
  634 
  635         if ((pcb->pcb_flags & PCB_FPUINITDONE) == 0) {
  636                 /*
  637                  * This is the first time this thread has used the FPU or
  638                  * the PCB doesn't contain a clean FPU state.  Explicitly
  639                  * load an initial state.
  640                  *
  641                  * We prefer to restore the state from the actual save
  642                  * area in PCB instead of directly loading from
  643                  * fpu_initialstate, to ignite the XSAVEOPT
  644                  * tracking engine.
  645                  */
  646                 bcopy(fpu_initialstate, pcb->pcb_save,
  647                     cpu_max_ext_state_size);
  648                 fpurestore(pcb->pcb_save);
  649                 if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
  650                         fldcw(pcb->pcb_initial_fpucw);
  651                 if (PCB_USER_FPU(pcb))
  652                         set_pcb_flags(pcb, PCB_FPUINITDONE |
  653                             PCB_USERFPUINITDONE);
  654                 else
  655                         set_pcb_flags(pcb, PCB_FPUINITDONE);
  656         } else
  657                 fpurestore(pcb->pcb_save);
  658 }
  659 
  660 /*
  661  * Device Not Available (DNA, #NM) exception handler.
  662  *
  663  * It would be better to switch FP context here (if curthread !=
  664  * fpcurthread) and not necessarily for every context switch, but it
  665  * is too hard to access foreign pcb's.
  666  */
  667 void
  668 fpudna(void)
  669 {
  670         struct thread *td;
  671 
  672         td = curthread;
  673         /*
  674          * This handler is entered with interrupts enabled, so context
  675          * switches may occur before critical_enter() is executed.  If
  676          * a context switch occurs, then when we regain control, our
  677          * state will have been completely restored.  The CPU may
  678          * change underneath us, but the only part of our context that
  679          * lives in the CPU is CR0.TS and that will be "restored" by
  680          * setting it on the new CPU.
  681          */
  682         critical_enter();
  683 
  684         KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
  685             ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
  686         if (__predict_false(PCPU_GET(fpcurthread) == td)) {
  687                 /*
  688                  * Some virtual machines seems to set %cr0.TS at
  689                  * arbitrary moments.  Silently clear the TS bit
  690                  * regardless of the eager/lazy FPU context switch
  691                  * mode.
  692                  */
  693                 stop_emulating();
  694         } else {
  695                 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
  696                         panic(
  697                     "fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
  698                             PCPU_GET(fpcurthread),
  699                             PCPU_GET(fpcurthread)->td_tid, td, td->td_tid);
  700                 }
  701                 restore_fpu_curthread(td);
  702         }
  703         critical_exit();
  704 }
  705 
  706 void fpu_activate_sw(struct thread *td); /* Called from the context switch */
  707 void
  708 fpu_activate_sw(struct thread *td)
  709 {
  710 
  711         if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
  712             !PCB_USER_FPU(td->td_pcb)) {
  713                 PCPU_SET(fpcurthread, NULL);
  714                 start_emulating();
  715         } else if (PCPU_GET(fpcurthread) != td) {
  716                 restore_fpu_curthread(td);
  717         }
  718 }
  719 
  720 void
  721 fpudrop(void)
  722 {
  723         struct thread *td;
  724 
  725         td = PCPU_GET(fpcurthread);
  726         KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
  727         CRITICAL_ASSERT(td);
  728         PCPU_SET(fpcurthread, NULL);
  729         clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
  730         start_emulating();
  731 }
  732 
  733 /*
  734  * Get the user state of the FPU into pcb->pcb_user_save without
  735  * dropping ownership (if possible).  It returns the FPU ownership
  736  * status.
  737  */
  738 int
  739 fpugetregs(struct thread *td)
  740 {
  741         struct pcb *pcb;
  742         uint64_t *xstate_bv, bit;
  743         char *sa;
  744         int max_ext_n, i, owned;
  745 
  746         pcb = td->td_pcb;
  747         critical_enter();
  748         if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
  749                 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
  750                     cpu_max_ext_state_size);
  751                 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
  752                     pcb->pcb_initial_fpucw;
  753                 fpuuserinited(td);
  754                 critical_exit();
  755                 return (_MC_FPOWNED_PCB);
  756         }
  757         if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
  758                 fpusave(get_pcb_user_save_pcb(pcb));
  759                 owned = _MC_FPOWNED_FPU;
  760         } else {
  761                 owned = _MC_FPOWNED_PCB;
  762         }
  763         if (use_xsave) {
  764                 /*
  765                  * Handle partially saved state.
  766                  */
  767                 sa = (char *)get_pcb_user_save_pcb(pcb);
  768                 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
  769                     offsetof(struct xstate_hdr, xstate_bv));
  770                 max_ext_n = flsl(xsave_mask);
  771                 for (i = 0; i < max_ext_n; i++) {
  772                         bit = 1ULL << i;
  773                         if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
  774                                 continue;
  775                         bcopy((char *)fpu_initialstate +
  776                             xsave_area_desc[i].offset,
  777                             sa + xsave_area_desc[i].offset,
  778                             xsave_area_desc[i].size);
  779                         *xstate_bv |= bit;
  780                 }
  781         }
  782         critical_exit();
  783         return (owned);
  784 }
  785 
  786 void
  787 fpuuserinited(struct thread *td)
  788 {
  789         struct pcb *pcb;
  790 
  791         CRITICAL_ASSERT(td);
  792         pcb = td->td_pcb;
  793         if (PCB_USER_FPU(pcb))
  794                 set_pcb_flags(pcb,
  795                     PCB_FPUINITDONE | PCB_USERFPUINITDONE);
  796         else
  797                 set_pcb_flags(pcb, PCB_FPUINITDONE);
  798 }
  799 
  800 int
  801 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
  802 {
  803         struct xstate_hdr *hdr, *ehdr;
  804         size_t len, max_len;
  805         uint64_t bv;
  806 
  807         /* XXXKIB should we clear all extended state in xstate_bv instead ? */
  808         if (xfpustate == NULL)
  809                 return (0);
  810         if (!use_xsave)
  811                 return (EOPNOTSUPP);
  812 
  813         len = xfpustate_size;
  814         if (len < sizeof(struct xstate_hdr))
  815                 return (EINVAL);
  816         max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
  817         if (len > max_len)
  818                 return (EINVAL);
  819 
  820         ehdr = (struct xstate_hdr *)xfpustate;
  821         bv = ehdr->xstate_bv;
  822 
  823         /*
  824          * Avoid #gp.
  825          */
  826         if (bv & ~xsave_mask)
  827                 return (EINVAL);
  828 
  829         hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
  830 
  831         hdr->xstate_bv = bv;
  832         bcopy(xfpustate + sizeof(struct xstate_hdr),
  833             (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
  834 
  835         return (0);
  836 }
  837 
  838 /*
  839  * Set the state of the FPU.
  840  */
  841 int
  842 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
  843     size_t xfpustate_size)
  844 {
  845         struct pcb *pcb;
  846         int error;
  847 
  848         addr->sv_env.en_mxcsr &= cpu_mxcsr_mask;
  849         pcb = td->td_pcb;
  850         error = 0;
  851         critical_enter();
  852         if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
  853                 error = fpusetxstate(td, xfpustate, xfpustate_size);
  854                 if (error == 0) {
  855                         bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
  856                         fpurestore(get_pcb_user_save_td(td));
  857                         set_pcb_flags(pcb, PCB_FPUINITDONE |
  858                             PCB_USERFPUINITDONE);
  859                 }
  860         } else {
  861                 error = fpusetxstate(td, xfpustate, xfpustate_size);
  862                 if (error == 0) {
  863                         bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
  864                         fpuuserinited(td);
  865                 }
  866         }
  867         critical_exit();
  868         return (error);
  869 }
  870 
  871 /*
  872  * On AuthenticAMD processors, the fxrstor instruction does not restore
  873  * the x87's stored last instruction pointer, last data pointer, and last
  874  * opcode values, except in the rare case in which the exception summary
  875  * (ES) bit in the x87 status word is set to 1.
  876  *
  877  * In order to avoid leaking this information across processes, we clean
  878  * these values by performing a dummy load before executing fxrstor().
  879  */
  880 static void
  881 fpu_clean_state(void)
  882 {
  883         static float dummy_variable = 0.0;
  884         u_short status;
  885 
  886         /*
  887          * Clear the ES bit in the x87 status word if it is currently
  888          * set, in order to avoid causing a fault in the upcoming load.
  889          */
  890         fnstsw(&status);
  891         if (status & 0x80)
  892                 fnclex();
  893 
  894         /*
  895          * Load the dummy variable into the x87 stack.  This mangles
  896          * the x87 stack, but we don't care since we're about to call
  897          * fxrstor() anyway.
  898          */
  899         __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
  900 }
  901 
  902 /*
  903  * This really sucks.  We want the acpi version only, but it requires
  904  * the isa_if.h file in order to get the definitions.
  905  */
  906 #include "opt_isa.h"
  907 #ifdef DEV_ISA
  908 #include <isa/isavar.h>
  909 /*
  910  * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
  911  */
  912 static struct isa_pnp_id fpupnp_ids[] = {
  913         { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
  914         { 0 }
  915 };
  916 
  917 static int
  918 fpupnp_probe(device_t dev)
  919 {
  920         int result;
  921 
  922         result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
  923         if (result <= 0)
  924                 device_quiet(dev);
  925         return (result);
  926 }
  927 
  928 static int
  929 fpupnp_attach(device_t dev)
  930 {
  931 
  932         return (0);
  933 }
  934 
  935 static device_method_t fpupnp_methods[] = {
  936         /* Device interface */
  937         DEVMETHOD(device_probe,         fpupnp_probe),
  938         DEVMETHOD(device_attach,        fpupnp_attach),
  939         DEVMETHOD(device_detach,        bus_generic_detach),
  940         DEVMETHOD(device_shutdown,      bus_generic_shutdown),
  941         DEVMETHOD(device_suspend,       bus_generic_suspend),
  942         DEVMETHOD(device_resume,        bus_generic_resume),
  943         
  944         { 0, 0 }
  945 };
  946 
  947 static driver_t fpupnp_driver = {
  948         "fpupnp",
  949         fpupnp_methods,
  950         1,                      /* no softc */
  951 };
  952 
  953 static devclass_t fpupnp_devclass;
  954 
  955 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
  956 #endif  /* DEV_ISA */
  957 
  958 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
  959     "Kernel contexts for FPU state");
  960 
  961 #define FPU_KERN_CTX_FPUINITDONE 0x01
  962 #define FPU_KERN_CTX_DUMMY       0x02   /* avoided save for the kern thread */
  963 #define FPU_KERN_CTX_INUSE       0x04
  964 
  965 struct fpu_kern_ctx {
  966         struct savefpu *prev;
  967         uint32_t flags;
  968         char hwstate1[];
  969 };
  970 
  971 struct fpu_kern_ctx *
  972 fpu_kern_alloc_ctx(u_int flags)
  973 {
  974         struct fpu_kern_ctx *res;
  975         size_t sz;
  976 
  977         sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
  978             cpu_max_ext_state_size;
  979         res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
  980             M_NOWAIT : M_WAITOK) | M_ZERO);
  981         return (res);
  982 }
  983 
  984 void
  985 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
  986 {
  987 
  988         KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
  989         /* XXXKIB clear the memory ? */
  990         free(ctx, M_FPUKERN_CTX);
  991 }
  992 
  993 static struct savefpu *
  994 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
  995 {
  996         vm_offset_t p;
  997 
  998         p = (vm_offset_t)&ctx->hwstate1;
  999         p = roundup2(p, XSAVE_AREA_ALIGN);
 1000         return ((struct savefpu *)p);
 1001 }
 1002 
 1003 int
 1004 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
 1005 {
 1006         struct pcb *pcb;
 1007 
 1008         pcb = td->td_pcb;
 1009         KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
 1010             ("ctx is required when !FPU_KERN_NOCTX"));
 1011         KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
 1012             ("using inuse ctx"));
 1013         KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
 1014             ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
 1015 
 1016         if ((flags & FPU_KERN_NOCTX) != 0) {
 1017                 critical_enter();
 1018                 stop_emulating();
 1019                 if (curthread == PCPU_GET(fpcurthread)) {
 1020                         fpusave(curpcb->pcb_save);
 1021                         PCPU_SET(fpcurthread, NULL);
 1022                 } else {
 1023                         KASSERT(PCPU_GET(fpcurthread) == NULL,
 1024                             ("invalid fpcurthread"));
 1025                 }
 1026 
 1027                 /*
 1028                  * This breaks XSAVEOPT tracker, but
 1029                  * PCB_FPUNOSAVE state is supposed to never need to
 1030                  * save FPU context at all.
 1031                  */
 1032                 fpurestore(fpu_initialstate);
 1033                 set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
 1034                     PCB_FPUINITDONE);
 1035                 return (0);
 1036         }
 1037         if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
 1038                 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
 1039                 return (0);
 1040         }
 1041         critical_enter();
 1042         KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
 1043             get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
 1044         ctx->flags = FPU_KERN_CTX_INUSE;
 1045         if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
 1046                 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
 1047         fpuexit(td);
 1048         ctx->prev = pcb->pcb_save;
 1049         pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
 1050         set_pcb_flags(pcb, PCB_KERNFPU);
 1051         clear_pcb_flags(pcb, PCB_FPUINITDONE);
 1052         critical_exit();
 1053         return (0);
 1054 }
 1055 
 1056 int
 1057 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
 1058 {
 1059         struct pcb *pcb;
 1060 
 1061         pcb = td->td_pcb;
 1062 
 1063         if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
 1064                 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
 1065                 KASSERT(PCPU_GET(fpcurthread) == NULL,
 1066                     ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
 1067                 CRITICAL_ASSERT(td);
 1068 
 1069                 clear_pcb_flags(pcb,  PCB_FPUNOSAVE | PCB_FPUINITDONE);
 1070                 start_emulating();
 1071         } else {
 1072                 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
 1073                     ("leaving not inuse ctx"));
 1074                 ctx->flags &= ~FPU_KERN_CTX_INUSE;
 1075 
 1076                 if (is_fpu_kern_thread(0) &&
 1077                     (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
 1078                         return (0);
 1079                 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
 1080                     ("dummy ctx"));
 1081                 critical_enter();
 1082                 if (curthread == PCPU_GET(fpcurthread))
 1083                         fpudrop();
 1084                 pcb->pcb_save = ctx->prev;
 1085         }
 1086 
 1087         if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
 1088                 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
 1089                         set_pcb_flags(pcb, PCB_FPUINITDONE);
 1090                         clear_pcb_flags(pcb, PCB_KERNFPU);
 1091                 } else
 1092                         clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
 1093         } else {
 1094                 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
 1095                         set_pcb_flags(pcb, PCB_FPUINITDONE);
 1096                 else
 1097                         clear_pcb_flags(pcb, PCB_FPUINITDONE);
 1098                 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
 1099         }
 1100         critical_exit();
 1101         return (0);
 1102 }
 1103 
 1104 int
 1105 fpu_kern_thread(u_int flags)
 1106 {
 1107 
 1108         KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
 1109             ("Only kthread may use fpu_kern_thread"));
 1110         KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
 1111             ("mangled pcb_save"));
 1112         KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
 1113 
 1114         set_pcb_flags(curpcb, PCB_KERNFPU);
 1115         return (0);
 1116 }
 1117 
 1118 int
 1119 is_fpu_kern_thread(u_int flags)
 1120 {
 1121 
 1122         if ((curthread->td_pflags & TDP_KTHREAD) == 0)
 1123                 return (0);
 1124         return ((curpcb->pcb_flags & PCB_KERNFPU) != 0);
 1125 }
 1126 
 1127 /*
 1128  * FPU save area alloc/free/init utility routines
 1129  */
 1130 struct savefpu *
 1131 fpu_save_area_alloc(void)
 1132 {
 1133 
 1134         return (uma_zalloc(fpu_save_area_zone, 0));
 1135 }
 1136 
 1137 void
 1138 fpu_save_area_free(struct savefpu *fsa)
 1139 {
 1140 
 1141         uma_zfree(fpu_save_area_zone, fsa);
 1142 }
 1143 
 1144 void
 1145 fpu_save_area_reset(struct savefpu *fsa)
 1146 {
 1147 
 1148         bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);
 1149 }

Cache object: 81d38efbc5937f8fdc2117e425e166d2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.