FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/npx.c
1 /*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "opt_cpu.h"
37 #include "opt_isa.h"
38 #include "opt_npx.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52 #include <machine/bus.h>
53 #include <sys/rman.h>
54 #ifdef NPX_DEBUG
55 #include <sys/syslog.h>
56 #endif
57 #include <sys/signalvar.h>
58 #include <vm/uma.h>
59
60 #include <machine/asmacros.h>
61 #include <machine/cputypes.h>
62 #include <machine/frame.h>
63 #include <machine/md_var.h>
64 #include <machine/pcb.h>
65 #include <machine/psl.h>
66 #include <machine/resource.h>
67 #include <machine/specialreg.h>
68 #include <machine/segments.h>
69 #include <machine/ucontext.h>
70 #include <x86/ifunc.h>
71
72 #include <machine/intr_machdep.h>
73
74 #ifdef DEV_ISA
75 #include <isa/isavar.h>
76 #endif
77
78 /*
79 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
80 */
81
82 #if defined(__GNUCLIKE_ASM) && !defined(lint)
83
84 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
85 #define fnclex() __asm __volatile("fnclex")
86 #define fninit() __asm __volatile("fninit")
87 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr)))
88 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
89 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
90 #define fp_divide_by_0() __asm __volatile( \
91 "fldz; fld1; fdiv %st,%st(1); fnop")
92 #define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr)))
93 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
94 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
95 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
96 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
97
98 static __inline void
99 xrstor(char *addr, uint64_t mask)
100 {
101 uint32_t low, hi;
102
103 low = mask;
104 hi = mask >> 32;
105 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
106 }
107
108 static __inline void
109 xsave(char *addr, uint64_t mask)
110 {
111 uint32_t low, hi;
112
113 low = mask;
114 hi = mask >> 32;
115 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
116 "memory");
117 }
118
119 static __inline void
120 xsaveopt(char *addr, uint64_t mask)
121 {
122 uint32_t low, hi;
123
124 low = mask;
125 hi = mask >> 32;
126 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
127 "memory");
128 }
129 #else /* !(__GNUCLIKE_ASM && !lint) */
130
131 void fldcw(u_short cw);
132 void fnclex(void);
133 void fninit(void);
134 void fnsave(caddr_t addr);
135 void fnstcw(caddr_t addr);
136 void fnstsw(caddr_t addr);
137 void fp_divide_by_0(void);
138 void frstor(caddr_t addr);
139 void fxsave(caddr_t addr);
140 void fxrstor(caddr_t addr);
141 void ldmxcsr(u_int csr);
142 void stmxcsr(u_int *csr);
143 void xrstor(char *addr, uint64_t mask);
144 void xsave(char *addr, uint64_t mask);
145 void xsaveopt(char *addr, uint64_t mask);
146
147 #endif /* __GNUCLIKE_ASM && !lint */
148
149 #define start_emulating() load_cr0(rcr0() | CR0_TS)
150 #define stop_emulating() clts()
151
152 #define GET_FPU_CW(thread) \
153 (cpu_fxsr ? \
154 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \
155 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw)
156 #define GET_FPU_SW(thread) \
157 (cpu_fxsr ? \
158 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \
159 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw)
160 #define SET_FPU_CW(savefpu, value) do { \
161 if (cpu_fxsr) \
162 (savefpu)->sv_xmm.sv_env.en_cw = (value); \
163 else \
164 (savefpu)->sv_87.sv_env.en_cw = (value); \
165 } while (0)
166
167 CTASSERT(sizeof(union savefpu) == 512);
168 CTASSERT(sizeof(struct xstate_hdr) == 64);
169 CTASSERT(sizeof(struct savefpu_ymm) == 832);
170
171 /*
172 * This requirement is to make it easier for asm code to calculate
173 * offset of the fpu save area from the pcb address. FPU save area
174 * must be 64-byte aligned.
175 */
176 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
177
178 /*
179 * Ensure the copy of XCR0 saved in a core is contained in the padding
180 * area.
181 */
182 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) &&
183 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm));
184
185 static void fpu_clean_state(void);
186
187 static void fpurstor(union savefpu *);
188
189 int hw_float;
190
191 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
192 &hw_float, 0, "Floating point instructions executed in hardware");
193
194 int lazy_fpu_switch = 0;
195 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
196 &lazy_fpu_switch, 0,
197 "Lazily load FPU context after context switch");
198
199 int use_xsave;
200 uint64_t xsave_mask;
201 static uma_zone_t fpu_save_area_zone;
202 static union savefpu *npx_initialstate;
203
204 static struct xsave_area_elm_descr {
205 u_int offset;
206 u_int size;
207 } *xsave_area_desc;
208
209 static volatile u_int npx_traps_while_probing;
210
211 alias_for_inthand_t probetrap;
212 __asm(" \n\
213 .text \n\
214 .p2align 2,0x90 \n\
215 .type " __XSTRING(CNAME(probetrap)) ",@function \n\
216 " __XSTRING(CNAME(probetrap)) ": \n\
217 ss \n\
218 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\
219 fnclex \n\
220 iret \n\
221 ");
222
223 /*
224 * Determine if an FPU is present and how to use it.
225 */
226 static int
227 npx_probe(void)
228 {
229 struct gate_descriptor save_idt_npxtrap;
230 u_short control, status;
231
232 /*
233 * Modern CPUs all have an FPU that uses the INT16 interface
234 * and provide a simple way to verify that, so handle the
235 * common case right away.
236 */
237 if (cpu_feature & CPUID_FPU) {
238 hw_float = 1;
239 return (1);
240 }
241
242 save_idt_npxtrap = idt[IDT_MF];
243 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL,
244 GSEL(GCODE_SEL, SEL_KPL));
245
246 /*
247 * Don't trap while we're probing.
248 */
249 stop_emulating();
250
251 /*
252 * Finish resetting the coprocessor, if any. If there is an error
253 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
254 * it OK. Bogus halts have never been observed, but we enabled
255 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
256 */
257 fninit();
258
259 /*
260 * Don't use fwait here because it might hang.
261 * Don't use fnop here because it usually hangs if there is no FPU.
262 */
263 DELAY(1000); /* wait for any IRQ13 */
264 #ifdef DIAGNOSTIC
265 if (npx_traps_while_probing != 0)
266 printf("fninit caused %u bogus npx trap(s)\n",
267 npx_traps_while_probing);
268 #endif
269 /*
270 * Check for a status of mostly zero.
271 */
272 status = 0x5a5a;
273 fnstsw(&status);
274 if ((status & 0xb8ff) == 0) {
275 /*
276 * Good, now check for a proper control word.
277 */
278 control = 0x5a5a;
279 fnstcw(&control);
280 if ((control & 0x1f3f) == 0x033f) {
281 /*
282 * We have an npx, now divide by 0 to see if exception
283 * 16 works.
284 */
285 control &= ~(1 << 2); /* enable divide by 0 trap */
286 fldcw(control);
287 npx_traps_while_probing = 0;
288 fp_divide_by_0();
289 if (npx_traps_while_probing != 0) {
290 /*
291 * Good, exception 16 works.
292 */
293 hw_float = 1;
294 goto cleanup;
295 }
296 printf(
297 "FPU does not use exception 16 for error reporting\n");
298 goto cleanup;
299 }
300 }
301
302 /*
303 * Probe failed. Floating point simply won't work.
304 * Notify user and disable FPU/MMX/SSE instruction execution.
305 */
306 printf("WARNING: no FPU!\n");
307 __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : :
308 "n" (CR0_EM | CR0_MP) : "ax");
309
310 cleanup:
311 idt[IDT_MF] = save_idt_npxtrap;
312 return (hw_float);
313 }
314
315 static void
316 fpusave_xsaveopt(union savefpu *addr)
317 {
318
319 xsaveopt((char *)addr, xsave_mask);
320 }
321
322 static void
323 fpusave_xsave(union savefpu *addr)
324 {
325
326 xsave((char *)addr, xsave_mask);
327 }
328
329 static void
330 fpusave_fxsave(union savefpu *addr)
331 {
332
333 fxsave((char *)addr);
334 }
335
336 static void
337 fpusave_fnsave(union savefpu *addr)
338 {
339
340 fnsave((char *)addr);
341 }
342
343 static void
344 init_xsave(void)
345 {
346
347 if (use_xsave)
348 return;
349 if (!cpu_fxsr || (cpu_feature2 & CPUID2_XSAVE) == 0)
350 return;
351 use_xsave = 1;
352 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
353 }
354
355 DEFINE_IFUNC(, void, fpusave, (union savefpu *))
356 {
357
358 init_xsave();
359 if (use_xsave)
360 return ((cpu_stdext_feature & CPUID_EXTSTATE_XSAVEOPT) != 0 ?
361 fpusave_xsaveopt : fpusave_xsave);
362 if (cpu_fxsr)
363 return (fpusave_fxsave);
364 return (fpusave_fnsave);
365 }
366
367 /*
368 * Enable XSAVE if supported and allowed by user.
369 * Calculate the xsave_mask.
370 */
371 static void
372 npxinit_bsp1(void)
373 {
374 u_int cp[4];
375 uint64_t xsave_mask_user;
376
377 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch);
378 if (!use_xsave)
379 return;
380 cpuid_count(0xd, 0x0, cp);
381 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
382 if ((cp[0] & xsave_mask) != xsave_mask)
383 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
384 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
385 xsave_mask_user = xsave_mask;
386 TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user);
387 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
388 xsave_mask &= xsave_mask_user;
389 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
390 xsave_mask &= ~XFEATURE_AVX512;
391 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
392 xsave_mask &= ~XFEATURE_MPX;
393 }
394
395 /*
396 * Calculate the fpu save area size.
397 */
398 static void
399 npxinit_bsp2(void)
400 {
401 u_int cp[4];
402
403 if (use_xsave) {
404 cpuid_count(0xd, 0x0, cp);
405 cpu_max_ext_state_size = cp[1];
406
407 /*
408 * Reload the cpu_feature2, since we enabled OSXSAVE.
409 */
410 do_cpuid(1, cp);
411 cpu_feature2 = cp[2];
412 } else
413 cpu_max_ext_state_size = sizeof(union savefpu);
414 }
415
416 /*
417 * Initialize floating point unit.
418 */
419 void
420 npxinit(bool bsp)
421 {
422 static union savefpu dummy;
423 register_t saveintr;
424 u_int mxcsr;
425 u_short control;
426
427 if (bsp) {
428 if (!npx_probe())
429 return;
430 npxinit_bsp1();
431 }
432
433 if (use_xsave) {
434 load_cr4(rcr4() | CR4_XSAVE);
435 load_xcr(XCR0, xsave_mask);
436 }
437
438 /*
439 * XCR0 shall be set up before CPU can report the save area size.
440 */
441 if (bsp)
442 npxinit_bsp2();
443
444 /*
445 * fninit has the same h/w bugs as fnsave. Use the detoxified
446 * fnsave to throw away any junk in the fpu. fpusave() initializes
447 * the fpu.
448 *
449 * It is too early for critical_enter() to work on AP.
450 */
451 saveintr = intr_disable();
452 stop_emulating();
453 if (cpu_fxsr)
454 fninit();
455 else
456 fnsave(&dummy);
457 control = __INITIAL_NPXCW__;
458 fldcw(control);
459 if (cpu_fxsr) {
460 mxcsr = __INITIAL_MXCSR__;
461 ldmxcsr(mxcsr);
462 }
463 start_emulating();
464 intr_restore(saveintr);
465 }
466
467 /*
468 * On the boot CPU we generate a clean state that is used to
469 * initialize the floating point unit when it is first used by a
470 * process.
471 */
472 static void
473 npxinitstate(void *arg __unused)
474 {
475 uint64_t *xstate_bv;
476 register_t saveintr;
477 int cp[4], i, max_ext_n;
478
479 if (!hw_float)
480 return;
481
482 /* Do potentially blocking operations before disabling interrupts. */
483 fpu_save_area_zone = uma_zcreate("FPU_save_area",
484 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
485 XSAVE_AREA_ALIGN - 1, 0);
486 npx_initialstate = uma_zalloc(fpu_save_area_zone, M_WAITOK | M_ZERO);
487 if (use_xsave) {
488 if (xsave_mask >> 32 != 0)
489 max_ext_n = fls(xsave_mask >> 32) + 32;
490 else
491 max_ext_n = fls(xsave_mask);
492 xsave_area_desc = malloc(max_ext_n * sizeof(struct
493 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
494 }
495
496 saveintr = intr_disable();
497 stop_emulating();
498
499 if (cpu_fxsr)
500 fpusave_fxsave(npx_initialstate);
501 else
502 fpusave_fnsave(npx_initialstate);
503 if (cpu_fxsr) {
504 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask)
505 cpu_mxcsr_mask =
506 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask;
507 else
508 cpu_mxcsr_mask = 0xFFBF;
509
510 /*
511 * The fninit instruction does not modify XMM
512 * registers or x87 registers (MM/ST). The fpusave
513 * call dumped the garbage contained in the registers
514 * after reset to the initial state saved. Clear XMM
515 * and x87 registers file image to make the startup
516 * program state and signal handler XMM/x87 register
517 * content predictable.
518 */
519 bzero(npx_initialstate->sv_xmm.sv_fp,
520 sizeof(npx_initialstate->sv_xmm.sv_fp));
521 bzero(npx_initialstate->sv_xmm.sv_xmm,
522 sizeof(npx_initialstate->sv_xmm.sv_xmm));
523
524 } else
525 bzero(npx_initialstate->sv_87.sv_ac,
526 sizeof(npx_initialstate->sv_87.sv_ac));
527
528 /*
529 * Create a table describing the layout of the CPU Extended
530 * Save Area. See Intel SDM rev. 075 Vol. 1 13.4.1 "Legacy
531 * Region of an XSAVE Area" for the source of offsets/sizes.
532 * Note that 32bit XSAVE does not use %xmm8-%xmm15, see
533 * 10.5.1.2 and 13.5.2 "SSE State".
534 */
535 if (use_xsave) {
536 xstate_bv = (uint64_t *)((char *)(npx_initialstate + 1) +
537 offsetof(struct xstate_hdr, xstate_bv));
538 *xstate_bv = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
539
540 /* x87 state */
541 xsave_area_desc[0].offset = 0;
542 xsave_area_desc[0].size = 160;
543 /* XMM */
544 xsave_area_desc[1].offset = 160;
545 xsave_area_desc[1].size = 288 - 160;
546
547 for (i = 2; i < max_ext_n; i++) {
548 cpuid_count(0xd, i, cp);
549 xsave_area_desc[i].offset = cp[1];
550 xsave_area_desc[i].size = cp[0];
551 }
552 }
553
554 start_emulating();
555 intr_restore(saveintr);
556 }
557 SYSINIT(npxinitstate, SI_SUB_CPU, SI_ORDER_ANY, npxinitstate, NULL);
558
559 /*
560 * Free coprocessor (if we have it).
561 */
562 void
563 npxexit(struct thread *td)
564 {
565
566 critical_enter();
567 if (curthread == PCPU_GET(fpcurthread)) {
568 stop_emulating();
569 fpusave(curpcb->pcb_save);
570 start_emulating();
571 PCPU_SET(fpcurthread, NULL);
572 }
573 critical_exit();
574 #ifdef NPX_DEBUG
575 if (hw_float) {
576 u_int masked_exceptions;
577
578 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f;
579 /*
580 * Log exceptions that would have trapped with the old
581 * control word (overflow, divide by 0, and invalid operand).
582 */
583 if (masked_exceptions & 0x0d)
584 log(LOG_ERR,
585 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
586 td->td_proc->p_pid, td->td_proc->p_comm,
587 masked_exceptions);
588 }
589 #endif
590 }
591
592 int
593 npxformat(void)
594 {
595
596 if (!hw_float)
597 return (_MC_FPFMT_NODEV);
598 if (cpu_fxsr)
599 return (_MC_FPFMT_XMM);
600 return (_MC_FPFMT_387);
601 }
602
603 /*
604 * The following mechanism is used to ensure that the FPE_... value
605 * that is passed as a trapcode to the signal handler of the user
606 * process does not have more than one bit set.
607 *
608 * Multiple bits may be set if the user process modifies the control
609 * word while a status word bit is already set. While this is a sign
610 * of bad coding, we have no choice than to narrow them down to one
611 * bit, since we must not send a trapcode that is not exactly one of
612 * the FPE_ macros.
613 *
614 * The mechanism has a static table with 127 entries. Each combination
615 * of the 7 FPU status word exception bits directly translates to a
616 * position in this table, where a single FPE_... value is stored.
617 * This FPE_... value stored there is considered the "most important"
618 * of the exception bits and will be sent as the signal code. The
619 * precedence of the bits is based upon Intel Document "Numerical
620 * Applications", Chapter "Special Computational Situations".
621 *
622 * The macro to choose one of these values does these steps: 1) Throw
623 * away status word bits that cannot be masked. 2) Throw away the bits
624 * currently masked in the control word, assuming the user isn't
625 * interested in them anymore. 3) Reinsert status word bit 7 (stack
626 * fault) if it is set, which cannot be masked but must be presered.
627 * 4) Use the remaining bits to point into the trapcode table.
628 *
629 * The 6 maskable bits in order of their preference, as stated in the
630 * above referenced Intel manual:
631 * 1 Invalid operation (FP_X_INV)
632 * 1a Stack underflow
633 * 1b Stack overflow
634 * 1c Operand of unsupported format
635 * 1d SNaN operand.
636 * 2 QNaN operand (not an exception, irrelavant here)
637 * 3 Any other invalid-operation not mentioned above or zero divide
638 * (FP_X_INV, FP_X_DZ)
639 * 4 Denormal operand (FP_X_DNML)
640 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
641 * 6 Inexact result (FP_X_IMP)
642 */
643 static char fpetable[128] = {
644 0,
645 FPE_FLTINV, /* 1 - INV */
646 FPE_FLTUND, /* 2 - DNML */
647 FPE_FLTINV, /* 3 - INV | DNML */
648 FPE_FLTDIV, /* 4 - DZ */
649 FPE_FLTINV, /* 5 - INV | DZ */
650 FPE_FLTDIV, /* 6 - DNML | DZ */
651 FPE_FLTINV, /* 7 - INV | DNML | DZ */
652 FPE_FLTOVF, /* 8 - OFL */
653 FPE_FLTINV, /* 9 - INV | OFL */
654 FPE_FLTUND, /* A - DNML | OFL */
655 FPE_FLTINV, /* B - INV | DNML | OFL */
656 FPE_FLTDIV, /* C - DZ | OFL */
657 FPE_FLTINV, /* D - INV | DZ | OFL */
658 FPE_FLTDIV, /* E - DNML | DZ | OFL */
659 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
660 FPE_FLTUND, /* 10 - UFL */
661 FPE_FLTINV, /* 11 - INV | UFL */
662 FPE_FLTUND, /* 12 - DNML | UFL */
663 FPE_FLTINV, /* 13 - INV | DNML | UFL */
664 FPE_FLTDIV, /* 14 - DZ | UFL */
665 FPE_FLTINV, /* 15 - INV | DZ | UFL */
666 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
667 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
668 FPE_FLTOVF, /* 18 - OFL | UFL */
669 FPE_FLTINV, /* 19 - INV | OFL | UFL */
670 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
671 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
672 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
673 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
674 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
675 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
676 FPE_FLTRES, /* 20 - IMP */
677 FPE_FLTINV, /* 21 - INV | IMP */
678 FPE_FLTUND, /* 22 - DNML | IMP */
679 FPE_FLTINV, /* 23 - INV | DNML | IMP */
680 FPE_FLTDIV, /* 24 - DZ | IMP */
681 FPE_FLTINV, /* 25 - INV | DZ | IMP */
682 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
683 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
684 FPE_FLTOVF, /* 28 - OFL | IMP */
685 FPE_FLTINV, /* 29 - INV | OFL | IMP */
686 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
687 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
688 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
689 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
690 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
691 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
692 FPE_FLTUND, /* 30 - UFL | IMP */
693 FPE_FLTINV, /* 31 - INV | UFL | IMP */
694 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
695 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
696 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
697 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
698 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
699 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
700 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
701 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
702 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
703 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
704 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
705 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
706 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
707 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
708 FPE_FLTSUB, /* 40 - STK */
709 FPE_FLTSUB, /* 41 - INV | STK */
710 FPE_FLTUND, /* 42 - DNML | STK */
711 FPE_FLTSUB, /* 43 - INV | DNML | STK */
712 FPE_FLTDIV, /* 44 - DZ | STK */
713 FPE_FLTSUB, /* 45 - INV | DZ | STK */
714 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
715 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
716 FPE_FLTOVF, /* 48 - OFL | STK */
717 FPE_FLTSUB, /* 49 - INV | OFL | STK */
718 FPE_FLTUND, /* 4A - DNML | OFL | STK */
719 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
720 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
721 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
722 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
723 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
724 FPE_FLTUND, /* 50 - UFL | STK */
725 FPE_FLTSUB, /* 51 - INV | UFL | STK */
726 FPE_FLTUND, /* 52 - DNML | UFL | STK */
727 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
728 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
729 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
730 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
731 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
732 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
733 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
734 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
735 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
736 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
737 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
738 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
739 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
740 FPE_FLTRES, /* 60 - IMP | STK */
741 FPE_FLTSUB, /* 61 - INV | IMP | STK */
742 FPE_FLTUND, /* 62 - DNML | IMP | STK */
743 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
744 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
745 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
746 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
747 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
748 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
749 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
750 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
751 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
752 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
753 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
754 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
755 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
756 FPE_FLTUND, /* 70 - UFL | IMP | STK */
757 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
758 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
759 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
760 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
761 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
762 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
763 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
764 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
765 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
766 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
767 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
768 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
769 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
770 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
771 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
772 };
773
774 /*
775 * Read the FP status and control words, then generate si_code value
776 * for SIGFPE. The error code chosen will be one of the
777 * FPE_... macros. It will be sent as the second argument to old
778 * BSD-style signal handlers and as "siginfo_t->si_code" (second
779 * argument) to SA_SIGINFO signal handlers.
780 *
781 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
782 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
783 * usermode code which understands the FPU hardware enough to enable
784 * the exceptions, can also handle clearing the exception state in the
785 * handler. The only consequence of not clearing the exception is the
786 * rethrow of the SIGFPE on return from the signal handler and
787 * reexecution of the corresponding instruction.
788 *
789 * For XMM traps, the exceptions were never cleared.
790 */
791 int
792 npxtrap_x87(void)
793 {
794 u_short control, status;
795
796 if (!hw_float) {
797 printf(
798 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n",
799 PCPU_GET(fpcurthread), curthread, hw_float);
800 panic("npxtrap from nowhere");
801 }
802 critical_enter();
803
804 /*
805 * Interrupt handling (for another interrupt) may have pushed the
806 * state to memory. Fetch the relevant parts of the state from
807 * wherever they are.
808 */
809 if (PCPU_GET(fpcurthread) != curthread) {
810 control = GET_FPU_CW(curthread);
811 status = GET_FPU_SW(curthread);
812 } else {
813 fnstcw(&control);
814 fnstsw(&status);
815 }
816 critical_exit();
817 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
818 }
819
820 int
821 npxtrap_sse(void)
822 {
823 u_int mxcsr;
824
825 if (!hw_float) {
826 printf(
827 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n",
828 PCPU_GET(fpcurthread), curthread, hw_float);
829 panic("npxtrap from nowhere");
830 }
831 critical_enter();
832 if (PCPU_GET(fpcurthread) != curthread)
833 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr;
834 else
835 stmxcsr(&mxcsr);
836 critical_exit();
837 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
838 }
839
840 static void
841 restore_npx_curthread(struct thread *td, struct pcb *pcb)
842 {
843
844 /*
845 * Record new context early in case frstor causes a trap.
846 */
847 PCPU_SET(fpcurthread, td);
848
849 stop_emulating();
850 if (cpu_fxsr)
851 fpu_clean_state();
852
853 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
854 /*
855 * This is the first time this thread has used the FPU or
856 * the PCB doesn't contain a clean FPU state. Explicitly
857 * load an initial state.
858 *
859 * We prefer to restore the state from the actual save
860 * area in PCB instead of directly loading from
861 * npx_initialstate, to ignite the XSAVEOPT
862 * tracking engine.
863 */
864 bcopy(npx_initialstate, pcb->pcb_save, cpu_max_ext_state_size);
865 fpurstor(pcb->pcb_save);
866 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__)
867 fldcw(pcb->pcb_initial_npxcw);
868 pcb->pcb_flags |= PCB_NPXINITDONE;
869 if (PCB_USER_FPU(pcb))
870 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
871 } else {
872 fpurstor(pcb->pcb_save);
873 }
874 }
875
876 /*
877 * Implement device not available (DNA) exception
878 *
879 * It would be better to switch FP context here (if curthread != fpcurthread)
880 * and not necessarily for every context switch, but it is too hard to
881 * access foreign pcb's.
882 */
883 int
884 npxdna(void)
885 {
886 struct thread *td;
887
888 if (!hw_float)
889 return (0);
890 td = curthread;
891 critical_enter();
892
893 KASSERT((curpcb->pcb_flags & PCB_NPXNOSAVE) == 0,
894 ("npxdna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
895 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
896 /*
897 * Some virtual machines seems to set %cr0.TS at
898 * arbitrary moments. Silently clear the TS bit
899 * regardless of the eager/lazy FPU context switch
900 * mode.
901 */
902 stop_emulating();
903 } else {
904 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
905 printf(
906 "npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n",
907 PCPU_GET(fpcurthread),
908 PCPU_GET(fpcurthread)->td_proc->p_pid,
909 td, td->td_proc->p_pid);
910 panic("npxdna");
911 }
912 restore_npx_curthread(td, td->td_pcb);
913 }
914 critical_exit();
915 return (1);
916 }
917
918 /*
919 * Wrapper for fpusave() called from context switch routines.
920 *
921 * npxsave() must be called with interrupts disabled, so that it clears
922 * fpcurthread atomically with saving the state. We require callers to do the
923 * disabling, since most callers need to disable interrupts anyway to call
924 * npxsave() atomically with checking fpcurthread.
925 */
926 void
927 npxsave(union savefpu *addr)
928 {
929
930 stop_emulating();
931 fpusave(addr);
932 }
933
934 void npxswitch(struct thread *td, struct pcb *pcb);
935 void
936 npxswitch(struct thread *td, struct pcb *pcb)
937 {
938
939 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
940 !PCB_USER_FPU(pcb)) {
941 start_emulating();
942 PCPU_SET(fpcurthread, NULL);
943 } else if (PCPU_GET(fpcurthread) != td) {
944 restore_npx_curthread(td, pcb);
945 }
946 }
947
948 /*
949 * Unconditionally save the current co-processor state across suspend and
950 * resume.
951 */
952 void
953 npxsuspend(union savefpu *addr)
954 {
955 register_t cr0;
956
957 if (!hw_float)
958 return;
959 if (PCPU_GET(fpcurthread) == NULL) {
960 bcopy(npx_initialstate, addr, cpu_max_ext_state_size);
961 return;
962 }
963 cr0 = rcr0();
964 stop_emulating();
965 fpusave(addr);
966 load_cr0(cr0);
967 }
968
969 void
970 npxresume(union savefpu *addr)
971 {
972 register_t cr0;
973
974 if (!hw_float)
975 return;
976
977 cr0 = rcr0();
978 npxinit(false);
979 stop_emulating();
980 fpurstor(addr);
981 load_cr0(cr0);
982 }
983
984 void
985 npxdrop(void)
986 {
987 struct thread *td;
988
989 /*
990 * Discard pending exceptions in the !cpu_fxsr case so that unmasked
991 * ones don't cause a panic on the next frstor.
992 */
993 if (!cpu_fxsr)
994 fnclex();
995
996 td = PCPU_GET(fpcurthread);
997 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
998 CRITICAL_ASSERT(td);
999 PCPU_SET(fpcurthread, NULL);
1000 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
1001 start_emulating();
1002 }
1003
1004 /*
1005 * Get the user state of the FPU into pcb->pcb_user_save without
1006 * dropping ownership (if possible). It returns the FPU ownership
1007 * status.
1008 */
1009 int
1010 npxgetregs(struct thread *td)
1011 {
1012 struct pcb *pcb;
1013 uint64_t *xstate_bv, bit;
1014 char *sa;
1015 int max_ext_n, i;
1016 int owned;
1017
1018 if (!hw_float)
1019 return (_MC_FPOWNED_NONE);
1020
1021 pcb = td->td_pcb;
1022 critical_enter();
1023 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
1024 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb),
1025 cpu_max_ext_state_size);
1026 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw);
1027 npxuserinited(td);
1028 critical_exit();
1029 return (_MC_FPOWNED_PCB);
1030 }
1031 if (td == PCPU_GET(fpcurthread)) {
1032 fpusave(get_pcb_user_save_pcb(pcb));
1033 if (!cpu_fxsr)
1034 /*
1035 * fnsave initializes the FPU and destroys whatever
1036 * context it contains. Make sure the FPU owner
1037 * starts with a clean state next time.
1038 */
1039 npxdrop();
1040 owned = _MC_FPOWNED_FPU;
1041 } else {
1042 owned = _MC_FPOWNED_PCB;
1043 }
1044 if (use_xsave) {
1045 /*
1046 * Handle partially saved state.
1047 */
1048 sa = (char *)get_pcb_user_save_pcb(pcb);
1049 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) +
1050 offsetof(struct xstate_hdr, xstate_bv));
1051 if (xsave_mask >> 32 != 0)
1052 max_ext_n = fls(xsave_mask >> 32) + 32;
1053 else
1054 max_ext_n = fls(xsave_mask);
1055 for (i = 0; i < max_ext_n; i++) {
1056 bit = 1ULL << i;
1057 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
1058 continue;
1059 bcopy((char *)npx_initialstate +
1060 xsave_area_desc[i].offset,
1061 sa + xsave_area_desc[i].offset,
1062 xsave_area_desc[i].size);
1063 *xstate_bv |= bit;
1064 }
1065 }
1066 critical_exit();
1067 return (owned);
1068 }
1069
1070 void
1071 npxuserinited(struct thread *td)
1072 {
1073 struct pcb *pcb;
1074
1075 CRITICAL_ASSERT(td);
1076 pcb = td->td_pcb;
1077 if (PCB_USER_FPU(pcb))
1078 pcb->pcb_flags |= PCB_NPXINITDONE;
1079 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
1080 }
1081
1082 int
1083 npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
1084 {
1085 struct xstate_hdr *hdr, *ehdr;
1086 size_t len, max_len;
1087 uint64_t bv;
1088
1089 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
1090 if (xfpustate == NULL)
1091 return (0);
1092 if (!use_xsave)
1093 return (EOPNOTSUPP);
1094
1095 len = xfpustate_size;
1096 if (len < sizeof(struct xstate_hdr))
1097 return (EINVAL);
1098 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
1099 if (len > max_len)
1100 return (EINVAL);
1101
1102 ehdr = (struct xstate_hdr *)xfpustate;
1103 bv = ehdr->xstate_bv;
1104
1105 /*
1106 * Avoid #gp.
1107 */
1108 if (bv & ~xsave_mask)
1109 return (EINVAL);
1110
1111 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
1112
1113 hdr->xstate_bv = bv;
1114 bcopy(xfpustate + sizeof(struct xstate_hdr),
1115 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
1116
1117 return (0);
1118 }
1119
1120 int
1121 npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate,
1122 size_t xfpustate_size)
1123 {
1124 struct pcb *pcb;
1125 int error;
1126
1127 if (!hw_float)
1128 return (ENXIO);
1129
1130 if (cpu_fxsr)
1131 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
1132 pcb = td->td_pcb;
1133 error = 0;
1134 critical_enter();
1135 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
1136 error = npxsetxstate(td, xfpustate, xfpustate_size);
1137 if (error == 0) {
1138 if (!cpu_fxsr)
1139 fnclex(); /* As in npxdrop(). */
1140 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1141 fpurstor(get_pcb_user_save_td(td));
1142 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE;
1143 }
1144 } else {
1145 error = npxsetxstate(td, xfpustate, xfpustate_size);
1146 if (error == 0) {
1147 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1148 npxuserinited(td);
1149 }
1150 }
1151 critical_exit();
1152 return (error);
1153 }
1154
1155 static void
1156 npx_fill_fpregs_xmm1(struct savexmm *sv_xmm, struct save87 *sv_87)
1157 {
1158 struct env87 *penv_87;
1159 struct envxmm *penv_xmm;
1160 struct fpacc87 *fx_reg;
1161 int i, st;
1162 uint64_t mantissa;
1163 uint16_t tw, exp;
1164 uint8_t ab_tw;
1165
1166 penv_87 = &sv_87->sv_env;
1167 penv_xmm = &sv_xmm->sv_env;
1168
1169 /* FPU control/status */
1170 penv_87->en_cw = penv_xmm->en_cw;
1171 penv_87->en_sw = penv_xmm->en_sw;
1172 penv_87->en_fip = penv_xmm->en_fip;
1173 penv_87->en_fcs = penv_xmm->en_fcs;
1174 penv_87->en_opcode = penv_xmm->en_opcode;
1175 penv_87->en_foo = penv_xmm->en_foo;
1176 penv_87->en_fos = penv_xmm->en_fos;
1177
1178 /*
1179 * FPU registers and tags.
1180 * For ST(i), i = fpu_reg - top; we start with fpu_reg=7.
1181 */
1182 st = 7 - ((penv_xmm->en_sw >> 11) & 7);
1183 ab_tw = penv_xmm->en_tw;
1184 tw = 0;
1185 for (i = 0x80; i != 0; i >>= 1) {
1186 sv_87->sv_ac[st] = sv_xmm->sv_fp[st].fp_acc;
1187 tw <<= 2;
1188 if (ab_tw & i) {
1189 /* Non-empty - we need to check ST(i) */
1190 fx_reg = &sv_xmm->sv_fp[st].fp_acc;
1191 /* The first 64 bits contain the mantissa. */
1192 mantissa = *((uint64_t *)fx_reg->fp_bytes);
1193 /*
1194 * The final 16 bits contain the sign bit and the exponent.
1195 * Mask the sign bit since it is of no consequence to these
1196 * tests.
1197 */
1198 exp = *((uint16_t *)&fx_reg->fp_bytes[8]) & 0x7fff;
1199 if (exp == 0) {
1200 if (mantissa == 0)
1201 tw |= 1; /* Zero */
1202 else
1203 tw |= 2; /* Denormal */
1204 } else if (exp == 0x7fff)
1205 tw |= 2; /* Infinity or NaN */
1206 } else
1207 tw |= 3; /* Empty */
1208 st = (st - 1) & 7;
1209 }
1210 penv_87->en_tw = tw;
1211 }
1212
1213 void
1214 npx_fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
1215 {
1216
1217 bzero(sv_87, sizeof(*sv_87));
1218 npx_fill_fpregs_xmm1(sv_xmm, sv_87);
1219 }
1220
1221 void
1222 npx_set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
1223 {
1224 struct env87 *penv_87;
1225 struct envxmm *penv_xmm;
1226 int i;
1227
1228 penv_87 = &sv_87->sv_env;
1229 penv_xmm = &sv_xmm->sv_env;
1230
1231 /* FPU control/status */
1232 penv_xmm->en_cw = penv_87->en_cw;
1233 penv_xmm->en_sw = penv_87->en_sw;
1234 penv_xmm->en_fip = penv_87->en_fip;
1235 penv_xmm->en_fcs = penv_87->en_fcs;
1236 penv_xmm->en_opcode = penv_87->en_opcode;
1237 penv_xmm->en_foo = penv_87->en_foo;
1238 penv_xmm->en_fos = penv_87->en_fos;
1239
1240 /*
1241 * FPU registers and tags.
1242 * Abridged / Full translation (values in binary), see FXSAVE spec.
1243 * 0 11
1244 * 1 00, 01, 10
1245 */
1246 penv_xmm->en_tw = 0;
1247 for (i = 0; i < 8; ++i) {
1248 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
1249 if ((penv_87->en_tw & (3 << i * 2)) != (3 << i * 2))
1250 penv_xmm->en_tw |= 1 << i;
1251 }
1252 }
1253
1254 void
1255 npx_get_fsave(void *addr)
1256 {
1257 struct thread *td;
1258 union savefpu *sv;
1259
1260 td = curthread;
1261 npxgetregs(td);
1262 sv = get_pcb_user_save_td(td);
1263 if (cpu_fxsr)
1264 npx_fill_fpregs_xmm1(&sv->sv_xmm, addr);
1265 else
1266 bcopy(sv, addr, sizeof(struct env87) +
1267 sizeof(struct fpacc87[8]));
1268 }
1269
1270 int
1271 npx_set_fsave(void *addr)
1272 {
1273 union savefpu sv;
1274 int error;
1275
1276 bzero(&sv, sizeof(sv));
1277 if (cpu_fxsr)
1278 npx_set_fpregs_xmm(addr, &sv.sv_xmm);
1279 else
1280 bcopy(addr, &sv, sizeof(struct env87) +
1281 sizeof(struct fpacc87[8]));
1282 error = npxsetregs(curthread, &sv, NULL, 0);
1283 return (error);
1284 }
1285
1286 /*
1287 * On AuthenticAMD processors, the fxrstor instruction does not restore
1288 * the x87's stored last instruction pointer, last data pointer, and last
1289 * opcode values, except in the rare case in which the exception summary
1290 * (ES) bit in the x87 status word is set to 1.
1291 *
1292 * In order to avoid leaking this information across processes, we clean
1293 * these values by performing a dummy load before executing fxrstor().
1294 */
1295 static void
1296 fpu_clean_state(void)
1297 {
1298 static float dummy_variable = 0.0;
1299 u_short status;
1300
1301 /*
1302 * Clear the ES bit in the x87 status word if it is currently
1303 * set, in order to avoid causing a fault in the upcoming load.
1304 */
1305 fnstsw(&status);
1306 if (status & 0x80)
1307 fnclex();
1308
1309 /*
1310 * Load the dummy variable into the x87 stack. This mangles
1311 * the x87 stack, but we don't care since we're about to call
1312 * fxrstor() anyway.
1313 */
1314 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
1315 }
1316
1317 static void
1318 fpurstor(union savefpu *addr)
1319 {
1320
1321 if (use_xsave)
1322 xrstor((char *)addr, xsave_mask);
1323 else if (cpu_fxsr)
1324 fxrstor(addr);
1325 else
1326 frstor(addr);
1327 }
1328
1329 #ifdef DEV_ISA
1330 /*
1331 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
1332 */
1333 static struct isa_pnp_id npxisa_ids[] = {
1334 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
1335 { 0 }
1336 };
1337
1338 static int
1339 npxisa_probe(device_t dev)
1340 {
1341 int result;
1342 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
1343 device_quiet(dev);
1344 }
1345 return(result);
1346 }
1347
1348 static int
1349 npxisa_attach(device_t dev)
1350 {
1351 return (0);
1352 }
1353
1354 static device_method_t npxisa_methods[] = {
1355 /* Device interface */
1356 DEVMETHOD(device_probe, npxisa_probe),
1357 DEVMETHOD(device_attach, npxisa_attach),
1358 DEVMETHOD(device_detach, bus_generic_detach),
1359 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1360 DEVMETHOD(device_suspend, bus_generic_suspend),
1361 DEVMETHOD(device_resume, bus_generic_resume),
1362 { 0, 0 }
1363 };
1364
1365 static driver_t npxisa_driver = {
1366 "npxisa",
1367 npxisa_methods,
1368 1, /* no softc */
1369 };
1370
1371 static devclass_t npxisa_devclass;
1372
1373 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1374 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1375 ISA_PNP_INFO(npxisa_ids);
1376 #endif /* DEV_ISA */
1377
1378 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1379 "Kernel contexts for FPU state");
1380
1381 #define FPU_KERN_CTX_NPXINITDONE 0x01
1382 #define FPU_KERN_CTX_DUMMY 0x02
1383 #define FPU_KERN_CTX_INUSE 0x04
1384
1385 struct fpu_kern_ctx {
1386 union savefpu *prev;
1387 uint32_t flags;
1388 char hwstate1[];
1389 };
1390
1391 struct fpu_kern_ctx *
1392 fpu_kern_alloc_ctx(u_int flags)
1393 {
1394 struct fpu_kern_ctx *res;
1395 size_t sz;
1396
1397 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
1398 cpu_max_ext_state_size;
1399 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
1400 M_NOWAIT : M_WAITOK) | M_ZERO);
1401 return (res);
1402 }
1403
1404 void
1405 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1406 {
1407
1408 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
1409 /* XXXKIB clear the memory ? */
1410 free(ctx, M_FPUKERN_CTX);
1411 }
1412
1413 static union savefpu *
1414 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1415 {
1416 vm_offset_t p;
1417
1418 p = (vm_offset_t)&ctx->hwstate1;
1419 p = roundup2(p, XSAVE_AREA_ALIGN);
1420 return ((union savefpu *)p);
1421 }
1422
1423 void
1424 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1425 {
1426 struct pcb *pcb;
1427
1428 pcb = td->td_pcb;
1429 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
1430 ("ctx is required when !FPU_KERN_NOCTX"));
1431 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
1432 ("using inuse ctx"));
1433 KASSERT((pcb->pcb_flags & PCB_NPXNOSAVE) == 0,
1434 ("recursive fpu_kern_enter while in PCB_NPXNOSAVE state"));
1435
1436 if ((flags & FPU_KERN_NOCTX) != 0) {
1437 critical_enter();
1438 stop_emulating();
1439 if (curthread == PCPU_GET(fpcurthread)) {
1440 fpusave(curpcb->pcb_save);
1441 PCPU_SET(fpcurthread, NULL);
1442 } else {
1443 KASSERT(PCPU_GET(fpcurthread) == NULL,
1444 ("invalid fpcurthread"));
1445 }
1446
1447 /*
1448 * This breaks XSAVEOPT tracker, but
1449 * PCB_NPXNOSAVE state is supposed to never need to
1450 * save FPU context at all.
1451 */
1452 fpurstor(npx_initialstate);
1453 pcb->pcb_flags |= PCB_KERNNPX | PCB_NPXNOSAVE | PCB_NPXINITDONE;
1454 return;
1455 }
1456 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1457 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1458 return;
1459 }
1460 pcb = td->td_pcb;
1461 critical_enter();
1462 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1463 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1464 ctx->flags = FPU_KERN_CTX_INUSE;
1465 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0)
1466 ctx->flags |= FPU_KERN_CTX_NPXINITDONE;
1467 npxexit(td);
1468 ctx->prev = pcb->pcb_save;
1469 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1470 pcb->pcb_flags |= PCB_KERNNPX;
1471 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1472 critical_exit();
1473 }
1474
1475 int
1476 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1477 {
1478 struct pcb *pcb;
1479
1480 pcb = td->td_pcb;
1481
1482 if ((pcb->pcb_flags & PCB_NPXNOSAVE) != 0) {
1483 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1484 KASSERT(PCPU_GET(fpcurthread) == NULL,
1485 ("non-NULL fpcurthread for PCB_NPXNOSAVE"));
1486 CRITICAL_ASSERT(td);
1487
1488 pcb->pcb_flags &= ~(PCB_NPXNOSAVE | PCB_NPXINITDONE);
1489 start_emulating();
1490 } else {
1491 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1492 ("leaving not inuse ctx"));
1493 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1494
1495 if (is_fpu_kern_thread(0) &&
1496 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1497 return (0);
1498 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
1499 ("dummy ctx"));
1500 critical_enter();
1501 if (curthread == PCPU_GET(fpcurthread))
1502 npxdrop();
1503 pcb->pcb_save = ctx->prev;
1504 }
1505
1506 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1507 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) {
1508 pcb->pcb_flags |= PCB_NPXINITDONE;
1509 if ((pcb->pcb_flags & PCB_KERNNPX_THR) == 0)
1510 pcb->pcb_flags &= ~PCB_KERNNPX;
1511 } else if ((pcb->pcb_flags & PCB_KERNNPX_THR) == 0)
1512 pcb->pcb_flags &= ~(PCB_NPXINITDONE | PCB_KERNNPX);
1513 } else {
1514 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0)
1515 pcb->pcb_flags |= PCB_NPXINITDONE;
1516 else
1517 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1518 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1519 }
1520 critical_exit();
1521 return (0);
1522 }
1523
1524 int
1525 fpu_kern_thread(u_int flags)
1526 {
1527
1528 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1529 ("Only kthread may use fpu_kern_thread"));
1530 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1531 ("mangled pcb_save"));
1532 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1533
1534 curpcb->pcb_flags |= PCB_KERNNPX | PCB_KERNNPX_THR;
1535 return (0);
1536 }
1537
1538 int
1539 is_fpu_kern_thread(u_int flags)
1540 {
1541
1542 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1543 return (0);
1544 return ((curpcb->pcb_flags & PCB_KERNNPX_THR) != 0);
1545 }
1546
1547 /*
1548 * FPU save area alloc/free/init utility routines
1549 */
1550 union savefpu *
1551 fpu_save_area_alloc(void)
1552 {
1553
1554 return (uma_zalloc(fpu_save_area_zone, M_WAITOK));
1555 }
1556
1557 void
1558 fpu_save_area_free(union savefpu *fsa)
1559 {
1560
1561 uma_zfree(fpu_save_area_zone, fsa);
1562 }
1563
1564 void
1565 fpu_save_area_reset(union savefpu *fsa)
1566 {
1567
1568 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size);
1569 }
Cache object: 9b75ce0392ecc5c5ee63e56c4c384328
|