FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/npx.c
1 /*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/12.0/sys/i386/i386/npx.c 338803 2018-09-19 16:37:43Z kib $");
35
36 #include "opt_cpu.h"
37 #include "opt_isa.h"
38 #include "opt_npx.h"
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/mutex.h>
48 #include <sys/mutex.h>
49 #include <sys/proc.h>
50 #include <sys/smp.h>
51 #include <sys/sysctl.h>
52 #include <machine/bus.h>
53 #include <sys/rman.h>
54 #ifdef NPX_DEBUG
55 #include <sys/syslog.h>
56 #endif
57 #include <sys/signalvar.h>
58 #include <vm/uma.h>
59
60 #include <machine/asmacros.h>
61 #include <machine/cputypes.h>
62 #include <machine/frame.h>
63 #include <machine/md_var.h>
64 #include <machine/pcb.h>
65 #include <machine/psl.h>
66 #include <machine/resource.h>
67 #include <machine/specialreg.h>
68 #include <machine/segments.h>
69 #include <machine/ucontext.h>
70 #include <x86/ifunc.h>
71
72 #include <machine/intr_machdep.h>
73
74 #ifdef DEV_ISA
75 #include <isa/isavar.h>
76 #endif
77
78 /*
79 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
80 */
81
82 #if defined(__GNUCLIKE_ASM) && !defined(lint)
83
84 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
85 #define fnclex() __asm __volatile("fnclex")
86 #define fninit() __asm __volatile("fninit")
87 #define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr)))
88 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
89 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
90 #define fp_divide_by_0() __asm __volatile( \
91 "fldz; fld1; fdiv %st,%st(1); fnop")
92 #define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr)))
93 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
94 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
95 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
96 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
97
98 static __inline void
99 xrstor(char *addr, uint64_t mask)
100 {
101 uint32_t low, hi;
102
103 low = mask;
104 hi = mask >> 32;
105 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
106 }
107
108 static __inline void
109 xsave(char *addr, uint64_t mask)
110 {
111 uint32_t low, hi;
112
113 low = mask;
114 hi = mask >> 32;
115 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
116 "memory");
117 }
118
119 static __inline void
120 xsaveopt(char *addr, uint64_t mask)
121 {
122 uint32_t low, hi;
123
124 low = mask;
125 hi = mask >> 32;
126 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
127 "memory");
128 }
129 #else /* !(__GNUCLIKE_ASM && !lint) */
130
131 void fldcw(u_short cw);
132 void fnclex(void);
133 void fninit(void);
134 void fnsave(caddr_t addr);
135 void fnstcw(caddr_t addr);
136 void fnstsw(caddr_t addr);
137 void fp_divide_by_0(void);
138 void frstor(caddr_t addr);
139 void fxsave(caddr_t addr);
140 void fxrstor(caddr_t addr);
141 void ldmxcsr(u_int csr);
142 void stmxcsr(u_int *csr);
143 void xrstor(char *addr, uint64_t mask);
144 void xsave(char *addr, uint64_t mask);
145 void xsaveopt(char *addr, uint64_t mask);
146
147 #endif /* __GNUCLIKE_ASM && !lint */
148
149 #define start_emulating() load_cr0(rcr0() | CR0_TS)
150 #define stop_emulating() clts()
151
152 #define GET_FPU_CW(thread) \
153 (cpu_fxsr ? \
154 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \
155 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw)
156 #define GET_FPU_SW(thread) \
157 (cpu_fxsr ? \
158 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \
159 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw)
160 #define SET_FPU_CW(savefpu, value) do { \
161 if (cpu_fxsr) \
162 (savefpu)->sv_xmm.sv_env.en_cw = (value); \
163 else \
164 (savefpu)->sv_87.sv_env.en_cw = (value); \
165 } while (0)
166
167 CTASSERT(sizeof(union savefpu) == 512);
168 CTASSERT(sizeof(struct xstate_hdr) == 64);
169 CTASSERT(sizeof(struct savefpu_ymm) == 832);
170
171 /*
172 * This requirement is to make it easier for asm code to calculate
173 * offset of the fpu save area from the pcb address. FPU save area
174 * must be 64-byte aligned.
175 */
176 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
177
178 /*
179 * Ensure the copy of XCR0 saved in a core is contained in the padding
180 * area.
181 */
182 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) &&
183 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm));
184
185 static void fpu_clean_state(void);
186
187 static void fpurstor(union savefpu *);
188
189 int hw_float;
190
191 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
192 &hw_float, 0, "Floating point instructions executed in hardware");
193
194 int lazy_fpu_switch = 0;
195 SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH,
196 &lazy_fpu_switch, 0,
197 "Lazily load FPU context after context switch");
198
199 int use_xsave;
200 uint64_t xsave_mask;
201 static uma_zone_t fpu_save_area_zone;
202 static union savefpu *npx_initialstate;
203
204 struct xsave_area_elm_descr {
205 u_int offset;
206 u_int size;
207 } *xsave_area_desc;
208
209 static volatile u_int npx_traps_while_probing;
210
211 alias_for_inthand_t probetrap;
212 __asm(" \n\
213 .text \n\
214 .p2align 2,0x90 \n\
215 .type " __XSTRING(CNAME(probetrap)) ",@function \n\
216 " __XSTRING(CNAME(probetrap)) ": \n\
217 ss \n\
218 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\
219 fnclex \n\
220 iret \n\
221 ");
222
223 /*
224 * Determine if an FPU is present and how to use it.
225 */
226 static int
227 npx_probe(void)
228 {
229 struct gate_descriptor save_idt_npxtrap;
230 u_short control, status;
231
232 /*
233 * Modern CPUs all have an FPU that uses the INT16 interface
234 * and provide a simple way to verify that, so handle the
235 * common case right away.
236 */
237 if (cpu_feature & CPUID_FPU) {
238 hw_float = 1;
239 return (1);
240 }
241
242 save_idt_npxtrap = idt[IDT_MF];
243 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL,
244 GSEL(GCODE_SEL, SEL_KPL));
245
246 /*
247 * Don't trap while we're probing.
248 */
249 stop_emulating();
250
251 /*
252 * Finish resetting the coprocessor, if any. If there is an error
253 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
254 * it OK. Bogus halts have never been observed, but we enabled
255 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
256 */
257 fninit();
258
259 /*
260 * Don't use fwait here because it might hang.
261 * Don't use fnop here because it usually hangs if there is no FPU.
262 */
263 DELAY(1000); /* wait for any IRQ13 */
264 #ifdef DIAGNOSTIC
265 if (npx_traps_while_probing != 0)
266 printf("fninit caused %u bogus npx trap(s)\n",
267 npx_traps_while_probing);
268 #endif
269 /*
270 * Check for a status of mostly zero.
271 */
272 status = 0x5a5a;
273 fnstsw(&status);
274 if ((status & 0xb8ff) == 0) {
275 /*
276 * Good, now check for a proper control word.
277 */
278 control = 0x5a5a;
279 fnstcw(&control);
280 if ((control & 0x1f3f) == 0x033f) {
281 /*
282 * We have an npx, now divide by 0 to see if exception
283 * 16 works.
284 */
285 control &= ~(1 << 2); /* enable divide by 0 trap */
286 fldcw(control);
287 npx_traps_while_probing = 0;
288 fp_divide_by_0();
289 if (npx_traps_while_probing != 0) {
290 /*
291 * Good, exception 16 works.
292 */
293 hw_float = 1;
294 goto cleanup;
295 }
296 printf(
297 "FPU does not use exception 16 for error reporting\n");
298 goto cleanup;
299 }
300 }
301
302 /*
303 * Probe failed. Floating point simply won't work.
304 * Notify user and disable FPU/MMX/SSE instruction execution.
305 */
306 printf("WARNING: no FPU!\n");
307 __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : :
308 "n" (CR0_EM | CR0_MP) : "ax");
309
310 cleanup:
311 idt[IDT_MF] = save_idt_npxtrap;
312 return (hw_float);
313 }
314
315 static void
316 npxsave_xsaveopt(union savefpu *addr)
317 {
318
319 xsaveopt((char *)addr, xsave_mask);
320 }
321
322 static void
323 fpusave_xsave(union savefpu *addr)
324 {
325
326 xsave((char *)addr, xsave_mask);
327 }
328
329 static void
330 fpusave_fxsave(union savefpu *addr)
331 {
332
333 fxsave((char *)addr);
334 }
335
336 static void
337 fpusave_fnsave(union savefpu *addr)
338 {
339
340 fnsave((char *)addr);
341 }
342
343 static void
344 init_xsave(void)
345 {
346
347 if (use_xsave)
348 return;
349 if (!cpu_fxsr || (cpu_feature2 & CPUID2_XSAVE) == 0)
350 return;
351 use_xsave = 1;
352 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
353 }
354
355 DEFINE_IFUNC(, void, npxsave_core, (union savefpu *), static)
356 {
357
358 init_xsave();
359 if (use_xsave)
360 return ((cpu_stdext_feature & CPUID_EXTSTATE_XSAVEOPT) != 0 ?
361 npxsave_xsaveopt : fpusave_xsave);
362 if (cpu_fxsr)
363 return (fpusave_fxsave);
364 return (fpusave_fnsave);
365 }
366
367 DEFINE_IFUNC(, void, fpusave, (union savefpu *), static)
368 {
369
370 init_xsave();
371 if (use_xsave)
372 return (fpusave_xsave);
373 if (cpu_fxsr)
374 return (fpusave_fxsave);
375 return (fpusave_fnsave);
376 }
377
378 /*
379 * Enable XSAVE if supported and allowed by user.
380 * Calculate the xsave_mask.
381 */
382 static void
383 npxinit_bsp1(void)
384 {
385 u_int cp[4];
386 uint64_t xsave_mask_user;
387
388 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch);
389 if (!use_xsave)
390 return;
391 cpuid_count(0xd, 0x0, cp);
392 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
393 if ((cp[0] & xsave_mask) != xsave_mask)
394 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
395 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
396 xsave_mask_user = xsave_mask;
397 TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user);
398 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
399 xsave_mask &= xsave_mask_user;
400 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
401 xsave_mask &= ~XFEATURE_AVX512;
402 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
403 xsave_mask &= ~XFEATURE_MPX;
404 }
405
406 /*
407 * Calculate the fpu save area size.
408 */
409 static void
410 npxinit_bsp2(void)
411 {
412 u_int cp[4];
413
414 if (use_xsave) {
415 cpuid_count(0xd, 0x0, cp);
416 cpu_max_ext_state_size = cp[1];
417
418 /*
419 * Reload the cpu_feature2, since we enabled OSXSAVE.
420 */
421 do_cpuid(1, cp);
422 cpu_feature2 = cp[2];
423 } else
424 cpu_max_ext_state_size = sizeof(union savefpu);
425 }
426
427 /*
428 * Initialize floating point unit.
429 */
430 void
431 npxinit(bool bsp)
432 {
433 static union savefpu dummy;
434 register_t saveintr;
435 u_int mxcsr;
436 u_short control;
437
438 if (bsp) {
439 if (!npx_probe())
440 return;
441 npxinit_bsp1();
442 }
443
444 if (use_xsave) {
445 load_cr4(rcr4() | CR4_XSAVE);
446 load_xcr(XCR0, xsave_mask);
447 }
448
449 /*
450 * XCR0 shall be set up before CPU can report the save area size.
451 */
452 if (bsp)
453 npxinit_bsp2();
454
455 /*
456 * fninit has the same h/w bugs as fnsave. Use the detoxified
457 * fnsave to throw away any junk in the fpu. fpusave() initializes
458 * the fpu.
459 *
460 * It is too early for critical_enter() to work on AP.
461 */
462 saveintr = intr_disable();
463 stop_emulating();
464 if (cpu_fxsr)
465 fninit();
466 else
467 fnsave(&dummy);
468 control = __INITIAL_NPXCW__;
469 fldcw(control);
470 if (cpu_fxsr) {
471 mxcsr = __INITIAL_MXCSR__;
472 ldmxcsr(mxcsr);
473 }
474 start_emulating();
475 intr_restore(saveintr);
476 }
477
478 /*
479 * On the boot CPU we generate a clean state that is used to
480 * initialize the floating point unit when it is first used by a
481 * process.
482 */
483 static void
484 npxinitstate(void *arg __unused)
485 {
486 register_t saveintr;
487 int cp[4], i, max_ext_n;
488
489 if (!hw_float)
490 return;
491
492 npx_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
493 M_WAITOK | M_ZERO);
494 saveintr = intr_disable();
495 stop_emulating();
496
497 fpusave(npx_initialstate);
498 if (cpu_fxsr) {
499 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask)
500 cpu_mxcsr_mask =
501 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask;
502 else
503 cpu_mxcsr_mask = 0xFFBF;
504
505 /*
506 * The fninit instruction does not modify XMM
507 * registers or x87 registers (MM/ST). The fpusave
508 * call dumped the garbage contained in the registers
509 * after reset to the initial state saved. Clear XMM
510 * and x87 registers file image to make the startup
511 * program state and signal handler XMM/x87 register
512 * content predictable.
513 */
514 bzero(npx_initialstate->sv_xmm.sv_fp,
515 sizeof(npx_initialstate->sv_xmm.sv_fp));
516 bzero(npx_initialstate->sv_xmm.sv_xmm,
517 sizeof(npx_initialstate->sv_xmm.sv_xmm));
518 } else
519 bzero(npx_initialstate->sv_87.sv_ac,
520 sizeof(npx_initialstate->sv_87.sv_ac));
521
522 /*
523 * Create a table describing the layout of the CPU Extended
524 * Save Area.
525 */
526 if (use_xsave) {
527 if (xsave_mask >> 32 != 0)
528 max_ext_n = fls(xsave_mask >> 32) + 32;
529 else
530 max_ext_n = fls(xsave_mask);
531 xsave_area_desc = malloc(max_ext_n * sizeof(struct
532 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
533 /* x87 state */
534 xsave_area_desc[0].offset = 0;
535 xsave_area_desc[0].size = 160;
536 /* XMM */
537 xsave_area_desc[1].offset = 160;
538 xsave_area_desc[1].size = 288 - 160;
539
540 for (i = 2; i < max_ext_n; i++) {
541 cpuid_count(0xd, i, cp);
542 xsave_area_desc[i].offset = cp[1];
543 xsave_area_desc[i].size = cp[0];
544 }
545 }
546
547 fpu_save_area_zone = uma_zcreate("FPU_save_area",
548 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
549 XSAVE_AREA_ALIGN - 1, 0);
550
551 start_emulating();
552 intr_restore(saveintr);
553 }
554 SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL);
555
556 /*
557 * Free coprocessor (if we have it).
558 */
559 void
560 npxexit(struct thread *td)
561 {
562
563 critical_enter();
564 if (curthread == PCPU_GET(fpcurthread)) {
565 stop_emulating();
566 fpusave(curpcb->pcb_save);
567 start_emulating();
568 PCPU_SET(fpcurthread, NULL);
569 }
570 critical_exit();
571 #ifdef NPX_DEBUG
572 if (hw_float) {
573 u_int masked_exceptions;
574
575 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f;
576 /*
577 * Log exceptions that would have trapped with the old
578 * control word (overflow, divide by 0, and invalid operand).
579 */
580 if (masked_exceptions & 0x0d)
581 log(LOG_ERR,
582 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
583 td->td_proc->p_pid, td->td_proc->p_comm,
584 masked_exceptions);
585 }
586 #endif
587 }
588
589 int
590 npxformat(void)
591 {
592
593 if (!hw_float)
594 return (_MC_FPFMT_NODEV);
595 if (cpu_fxsr)
596 return (_MC_FPFMT_XMM);
597 return (_MC_FPFMT_387);
598 }
599
600 /*
601 * The following mechanism is used to ensure that the FPE_... value
602 * that is passed as a trapcode to the signal handler of the user
603 * process does not have more than one bit set.
604 *
605 * Multiple bits may be set if the user process modifies the control
606 * word while a status word bit is already set. While this is a sign
607 * of bad coding, we have no choise than to narrow them down to one
608 * bit, since we must not send a trapcode that is not exactly one of
609 * the FPE_ macros.
610 *
611 * The mechanism has a static table with 127 entries. Each combination
612 * of the 7 FPU status word exception bits directly translates to a
613 * position in this table, where a single FPE_... value is stored.
614 * This FPE_... value stored there is considered the "most important"
615 * of the exception bits and will be sent as the signal code. The
616 * precedence of the bits is based upon Intel Document "Numerical
617 * Applications", Chapter "Special Computational Situations".
618 *
619 * The macro to choose one of these values does these steps: 1) Throw
620 * away status word bits that cannot be masked. 2) Throw away the bits
621 * currently masked in the control word, assuming the user isn't
622 * interested in them anymore. 3) Reinsert status word bit 7 (stack
623 * fault) if it is set, which cannot be masked but must be presered.
624 * 4) Use the remaining bits to point into the trapcode table.
625 *
626 * The 6 maskable bits in order of their preference, as stated in the
627 * above referenced Intel manual:
628 * 1 Invalid operation (FP_X_INV)
629 * 1a Stack underflow
630 * 1b Stack overflow
631 * 1c Operand of unsupported format
632 * 1d SNaN operand.
633 * 2 QNaN operand (not an exception, irrelavant here)
634 * 3 Any other invalid-operation not mentioned above or zero divide
635 * (FP_X_INV, FP_X_DZ)
636 * 4 Denormal operand (FP_X_DNML)
637 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
638 * 6 Inexact result (FP_X_IMP)
639 */
640 static char fpetable[128] = {
641 0,
642 FPE_FLTINV, /* 1 - INV */
643 FPE_FLTUND, /* 2 - DNML */
644 FPE_FLTINV, /* 3 - INV | DNML */
645 FPE_FLTDIV, /* 4 - DZ */
646 FPE_FLTINV, /* 5 - INV | DZ */
647 FPE_FLTDIV, /* 6 - DNML | DZ */
648 FPE_FLTINV, /* 7 - INV | DNML | DZ */
649 FPE_FLTOVF, /* 8 - OFL */
650 FPE_FLTINV, /* 9 - INV | OFL */
651 FPE_FLTUND, /* A - DNML | OFL */
652 FPE_FLTINV, /* B - INV | DNML | OFL */
653 FPE_FLTDIV, /* C - DZ | OFL */
654 FPE_FLTINV, /* D - INV | DZ | OFL */
655 FPE_FLTDIV, /* E - DNML | DZ | OFL */
656 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
657 FPE_FLTUND, /* 10 - UFL */
658 FPE_FLTINV, /* 11 - INV | UFL */
659 FPE_FLTUND, /* 12 - DNML | UFL */
660 FPE_FLTINV, /* 13 - INV | DNML | UFL */
661 FPE_FLTDIV, /* 14 - DZ | UFL */
662 FPE_FLTINV, /* 15 - INV | DZ | UFL */
663 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
664 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
665 FPE_FLTOVF, /* 18 - OFL | UFL */
666 FPE_FLTINV, /* 19 - INV | OFL | UFL */
667 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
668 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
669 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
670 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
671 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
672 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
673 FPE_FLTRES, /* 20 - IMP */
674 FPE_FLTINV, /* 21 - INV | IMP */
675 FPE_FLTUND, /* 22 - DNML | IMP */
676 FPE_FLTINV, /* 23 - INV | DNML | IMP */
677 FPE_FLTDIV, /* 24 - DZ | IMP */
678 FPE_FLTINV, /* 25 - INV | DZ | IMP */
679 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
680 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
681 FPE_FLTOVF, /* 28 - OFL | IMP */
682 FPE_FLTINV, /* 29 - INV | OFL | IMP */
683 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
684 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
685 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
686 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
687 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
688 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
689 FPE_FLTUND, /* 30 - UFL | IMP */
690 FPE_FLTINV, /* 31 - INV | UFL | IMP */
691 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
692 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
693 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
694 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
695 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
696 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
697 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
698 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
699 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
700 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
701 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
702 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
703 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
704 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
705 FPE_FLTSUB, /* 40 - STK */
706 FPE_FLTSUB, /* 41 - INV | STK */
707 FPE_FLTUND, /* 42 - DNML | STK */
708 FPE_FLTSUB, /* 43 - INV | DNML | STK */
709 FPE_FLTDIV, /* 44 - DZ | STK */
710 FPE_FLTSUB, /* 45 - INV | DZ | STK */
711 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
712 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
713 FPE_FLTOVF, /* 48 - OFL | STK */
714 FPE_FLTSUB, /* 49 - INV | OFL | STK */
715 FPE_FLTUND, /* 4A - DNML | OFL | STK */
716 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
717 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
718 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
719 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
720 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
721 FPE_FLTUND, /* 50 - UFL | STK */
722 FPE_FLTSUB, /* 51 - INV | UFL | STK */
723 FPE_FLTUND, /* 52 - DNML | UFL | STK */
724 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
725 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
726 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
727 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
728 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
729 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
730 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
731 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
732 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
733 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
734 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
735 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
736 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
737 FPE_FLTRES, /* 60 - IMP | STK */
738 FPE_FLTSUB, /* 61 - INV | IMP | STK */
739 FPE_FLTUND, /* 62 - DNML | IMP | STK */
740 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
741 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
742 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
743 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
744 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
745 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
746 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
747 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
748 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
749 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
750 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
751 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
752 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
753 FPE_FLTUND, /* 70 - UFL | IMP | STK */
754 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
755 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
756 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
757 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
758 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
759 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
760 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
761 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
762 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
763 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
764 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
765 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
766 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
767 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
768 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
769 };
770
771 /*
772 * Read the FP status and control words, then generate si_code value
773 * for SIGFPE. The error code chosen will be one of the
774 * FPE_... macros. It will be sent as the second argument to old
775 * BSD-style signal handlers and as "siginfo_t->si_code" (second
776 * argument) to SA_SIGINFO signal handlers.
777 *
778 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
779 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
780 * usermode code which understands the FPU hardware enough to enable
781 * the exceptions, can also handle clearing the exception state in the
782 * handler. The only consequence of not clearing the exception is the
783 * rethrow of the SIGFPE on return from the signal handler and
784 * reexecution of the corresponding instruction.
785 *
786 * For XMM traps, the exceptions were never cleared.
787 */
788 int
789 npxtrap_x87(void)
790 {
791 u_short control, status;
792
793 if (!hw_float) {
794 printf(
795 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n",
796 PCPU_GET(fpcurthread), curthread, hw_float);
797 panic("npxtrap from nowhere");
798 }
799 critical_enter();
800
801 /*
802 * Interrupt handling (for another interrupt) may have pushed the
803 * state to memory. Fetch the relevant parts of the state from
804 * wherever they are.
805 */
806 if (PCPU_GET(fpcurthread) != curthread) {
807 control = GET_FPU_CW(curthread);
808 status = GET_FPU_SW(curthread);
809 } else {
810 fnstcw(&control);
811 fnstsw(&status);
812 }
813 critical_exit();
814 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
815 }
816
817 int
818 npxtrap_sse(void)
819 {
820 u_int mxcsr;
821
822 if (!hw_float) {
823 printf(
824 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n",
825 PCPU_GET(fpcurthread), curthread, hw_float);
826 panic("npxtrap from nowhere");
827 }
828 critical_enter();
829 if (PCPU_GET(fpcurthread) != curthread)
830 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr;
831 else
832 stmxcsr(&mxcsr);
833 critical_exit();
834 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
835 }
836
837 static void
838 restore_npx_curthread(struct thread *td, struct pcb *pcb)
839 {
840
841 /*
842 * Record new context early in case frstor causes a trap.
843 */
844 PCPU_SET(fpcurthread, td);
845
846 stop_emulating();
847 if (cpu_fxsr)
848 fpu_clean_state();
849
850 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
851 /*
852 * This is the first time this thread has used the FPU or
853 * the PCB doesn't contain a clean FPU state. Explicitly
854 * load an initial state.
855 *
856 * We prefer to restore the state from the actual save
857 * area in PCB instead of directly loading from
858 * npx_initialstate, to ignite the XSAVEOPT
859 * tracking engine.
860 */
861 bcopy(npx_initialstate, pcb->pcb_save, cpu_max_ext_state_size);
862 fpurstor(pcb->pcb_save);
863 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__)
864 fldcw(pcb->pcb_initial_npxcw);
865 pcb->pcb_flags |= PCB_NPXINITDONE;
866 if (PCB_USER_FPU(pcb))
867 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
868 } else {
869 fpurstor(pcb->pcb_save);
870 }
871 }
872
873 /*
874 * Implement device not available (DNA) exception
875 *
876 * It would be better to switch FP context here (if curthread != fpcurthread)
877 * and not necessarily for every context switch, but it is too hard to
878 * access foreign pcb's.
879 */
880 int
881 npxdna(void)
882 {
883 struct thread *td;
884
885 if (!hw_float)
886 return (0);
887 td = curthread;
888 critical_enter();
889 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
890 /*
891 * Some virtual machines seems to set %cr0.TS at
892 * arbitrary moments. Silently clear the TS bit
893 * regardless of the eager/lazy FPU context switch
894 * mode.
895 */
896 stop_emulating();
897 } else {
898 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
899 printf(
900 "npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n",
901 PCPU_GET(fpcurthread),
902 PCPU_GET(fpcurthread)->td_proc->p_pid,
903 td, td->td_proc->p_pid);
904 panic("npxdna");
905 }
906 restore_npx_curthread(td, td->td_pcb);
907 }
908 critical_exit();
909 return (1);
910 }
911
912 /*
913 * Wrapper for fpusave() called from context switch routines.
914 *
915 * npxsave() must be called with interrupts disabled, so that it clears
916 * fpcurthread atomically with saving the state. We require callers to do the
917 * disabling, since most callers need to disable interrupts anyway to call
918 * npxsave() atomically with checking fpcurthread.
919 */
920 void
921 npxsave(union savefpu *addr)
922 {
923
924 stop_emulating();
925 npxsave_core(addr);
926 }
927
928 void npxswitch(struct thread *td, struct pcb *pcb);
929 void
930 npxswitch(struct thread *td, struct pcb *pcb)
931 {
932
933 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 ||
934 !PCB_USER_FPU(pcb)) {
935 start_emulating();
936 PCPU_SET(fpcurthread, NULL);
937 } else if (PCPU_GET(fpcurthread) != td) {
938 restore_npx_curthread(td, pcb);
939 }
940 }
941
942 /*
943 * Unconditionally save the current co-processor state across suspend and
944 * resume.
945 */
946 void
947 npxsuspend(union savefpu *addr)
948 {
949 register_t cr0;
950
951 if (!hw_float)
952 return;
953 if (PCPU_GET(fpcurthread) == NULL) {
954 bcopy(npx_initialstate, addr, cpu_max_ext_state_size);
955 return;
956 }
957 cr0 = rcr0();
958 stop_emulating();
959 fpusave(addr);
960 load_cr0(cr0);
961 }
962
963 void
964 npxresume(union savefpu *addr)
965 {
966 register_t cr0;
967
968 if (!hw_float)
969 return;
970
971 cr0 = rcr0();
972 npxinit(false);
973 stop_emulating();
974 fpurstor(addr);
975 load_cr0(cr0);
976 }
977
978 void
979 npxdrop(void)
980 {
981 struct thread *td;
982
983 /*
984 * Discard pending exceptions in the !cpu_fxsr case so that unmasked
985 * ones don't cause a panic on the next frstor.
986 */
987 if (!cpu_fxsr)
988 fnclex();
989
990 td = PCPU_GET(fpcurthread);
991 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
992 CRITICAL_ASSERT(td);
993 PCPU_SET(fpcurthread, NULL);
994 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
995 start_emulating();
996 }
997
998 /*
999 * Get the user state of the FPU into pcb->pcb_user_save without
1000 * dropping ownership (if possible). It returns the FPU ownership
1001 * status.
1002 */
1003 int
1004 npxgetregs(struct thread *td)
1005 {
1006 struct pcb *pcb;
1007 uint64_t *xstate_bv, bit;
1008 char *sa;
1009 int max_ext_n, i;
1010 int owned;
1011
1012 if (!hw_float)
1013 return (_MC_FPOWNED_NONE);
1014
1015 pcb = td->td_pcb;
1016 critical_enter();
1017 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
1018 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb),
1019 cpu_max_ext_state_size);
1020 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw);
1021 npxuserinited(td);
1022 critical_exit();
1023 return (_MC_FPOWNED_PCB);
1024 }
1025 if (td == PCPU_GET(fpcurthread)) {
1026 fpusave(get_pcb_user_save_pcb(pcb));
1027 if (!cpu_fxsr)
1028 /*
1029 * fnsave initializes the FPU and destroys whatever
1030 * context it contains. Make sure the FPU owner
1031 * starts with a clean state next time.
1032 */
1033 npxdrop();
1034 owned = _MC_FPOWNED_FPU;
1035 } else {
1036 owned = _MC_FPOWNED_PCB;
1037 }
1038 if (use_xsave) {
1039 /*
1040 * Handle partially saved state.
1041 */
1042 sa = (char *)get_pcb_user_save_pcb(pcb);
1043 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) +
1044 offsetof(struct xstate_hdr, xstate_bv));
1045 if (xsave_mask >> 32 != 0)
1046 max_ext_n = fls(xsave_mask >> 32) + 32;
1047 else
1048 max_ext_n = fls(xsave_mask);
1049 for (i = 0; i < max_ext_n; i++) {
1050 bit = 1ULL << i;
1051 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
1052 continue;
1053 bcopy((char *)npx_initialstate +
1054 xsave_area_desc[i].offset,
1055 sa + xsave_area_desc[i].offset,
1056 xsave_area_desc[i].size);
1057 *xstate_bv |= bit;
1058 }
1059 }
1060 critical_exit();
1061 return (owned);
1062 }
1063
1064 void
1065 npxuserinited(struct thread *td)
1066 {
1067 struct pcb *pcb;
1068
1069 CRITICAL_ASSERT(td);
1070 pcb = td->td_pcb;
1071 if (PCB_USER_FPU(pcb))
1072 pcb->pcb_flags |= PCB_NPXINITDONE;
1073 pcb->pcb_flags |= PCB_NPXUSERINITDONE;
1074 }
1075
1076 int
1077 npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
1078 {
1079 struct xstate_hdr *hdr, *ehdr;
1080 size_t len, max_len;
1081 uint64_t bv;
1082
1083 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
1084 if (xfpustate == NULL)
1085 return (0);
1086 if (!use_xsave)
1087 return (EOPNOTSUPP);
1088
1089 len = xfpustate_size;
1090 if (len < sizeof(struct xstate_hdr))
1091 return (EINVAL);
1092 max_len = cpu_max_ext_state_size - sizeof(union savefpu);
1093 if (len > max_len)
1094 return (EINVAL);
1095
1096 ehdr = (struct xstate_hdr *)xfpustate;
1097 bv = ehdr->xstate_bv;
1098
1099 /*
1100 * Avoid #gp.
1101 */
1102 if (bv & ~xsave_mask)
1103 return (EINVAL);
1104
1105 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
1106
1107 hdr->xstate_bv = bv;
1108 bcopy(xfpustate + sizeof(struct xstate_hdr),
1109 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
1110
1111 return (0);
1112 }
1113
1114 int
1115 npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate,
1116 size_t xfpustate_size)
1117 {
1118 struct pcb *pcb;
1119 int error;
1120
1121 if (!hw_float)
1122 return (ENXIO);
1123
1124 if (cpu_fxsr)
1125 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask;
1126 pcb = td->td_pcb;
1127 error = 0;
1128 critical_enter();
1129 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
1130 error = npxsetxstate(td, xfpustate, xfpustate_size);
1131 if (error == 0) {
1132 if (!cpu_fxsr)
1133 fnclex(); /* As in npxdrop(). */
1134 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1135 fpurstor(get_pcb_user_save_td(td));
1136 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE;
1137 }
1138 } else {
1139 error = npxsetxstate(td, xfpustate, xfpustate_size);
1140 if (error == 0) {
1141 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
1142 npxuserinited(td);
1143 }
1144 }
1145 critical_exit();
1146 return (error);
1147 }
1148
1149 static void
1150 npx_fill_fpregs_xmm1(struct savexmm *sv_xmm, struct save87 *sv_87)
1151 {
1152 struct env87 *penv_87;
1153 struct envxmm *penv_xmm;
1154 int i;
1155
1156 penv_87 = &sv_87->sv_env;
1157 penv_xmm = &sv_xmm->sv_env;
1158
1159 /* FPU control/status */
1160 penv_87->en_cw = penv_xmm->en_cw;
1161 penv_87->en_sw = penv_xmm->en_sw;
1162 penv_87->en_fip = penv_xmm->en_fip;
1163 penv_87->en_fcs = penv_xmm->en_fcs;
1164 penv_87->en_opcode = penv_xmm->en_opcode;
1165 penv_87->en_foo = penv_xmm->en_foo;
1166 penv_87->en_fos = penv_xmm->en_fos;
1167
1168 /* FPU registers and tags */
1169 penv_87->en_tw = 0xffff;
1170 for (i = 0; i < 8; ++i) {
1171 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc;
1172 if ((penv_xmm->en_tw & (1 << i)) != 0)
1173 /* zero and special are set as valid */
1174 penv_87->en_tw &= ~(3 << i * 2);
1175 }
1176 }
1177
1178 void
1179 npx_fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87)
1180 {
1181
1182 bzero(sv_87, sizeof(*sv_87));
1183 npx_fill_fpregs_xmm1(sv_xmm, sv_87);
1184 }
1185
1186 void
1187 npx_set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm)
1188 {
1189 struct env87 *penv_87;
1190 struct envxmm *penv_xmm;
1191 int i;
1192
1193 penv_87 = &sv_87->sv_env;
1194 penv_xmm = &sv_xmm->sv_env;
1195
1196 /* FPU control/status */
1197 penv_xmm->en_cw = penv_87->en_cw;
1198 penv_xmm->en_sw = penv_87->en_sw;
1199 penv_xmm->en_fip = penv_87->en_fip;
1200 penv_xmm->en_fcs = penv_87->en_fcs;
1201 penv_xmm->en_opcode = penv_87->en_opcode;
1202 penv_xmm->en_foo = penv_87->en_foo;
1203 penv_xmm->en_fos = penv_87->en_fos;
1204
1205 /*
1206 * FPU registers and tags.
1207 * Abridged / Full translation (values in binary), see FXSAVE spec.
1208 * 0 11
1209 * 1 00, 01, 10
1210 */
1211 penv_xmm->en_tw = 0;
1212 for (i = 0; i < 8; ++i) {
1213 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i];
1214 if ((penv_87->en_tw & (3 << i * 2)) != (3 << i * 2))
1215 penv_xmm->en_tw |= 1 << i;
1216 }
1217 }
1218
1219 void
1220 npx_get_fsave(void *addr)
1221 {
1222 struct thread *td;
1223 union savefpu *sv;
1224
1225 td = curthread;
1226 npxgetregs(td);
1227 sv = get_pcb_user_save_td(td);
1228 if (cpu_fxsr)
1229 npx_fill_fpregs_xmm1(&sv->sv_xmm, addr);
1230 else
1231 bcopy(sv, addr, sizeof(struct env87) +
1232 sizeof(struct fpacc87[8]));
1233 }
1234
1235 int
1236 npx_set_fsave(void *addr)
1237 {
1238 union savefpu sv;
1239 int error;
1240
1241 bzero(&sv, sizeof(sv));
1242 if (cpu_fxsr)
1243 npx_set_fpregs_xmm(addr, &sv.sv_xmm);
1244 else
1245 bcopy(addr, &sv, sizeof(struct env87) +
1246 sizeof(struct fpacc87[8]));
1247 error = npxsetregs(curthread, &sv, NULL, 0);
1248 return (error);
1249 }
1250
1251 /*
1252 * On AuthenticAMD processors, the fxrstor instruction does not restore
1253 * the x87's stored last instruction pointer, last data pointer, and last
1254 * opcode values, except in the rare case in which the exception summary
1255 * (ES) bit in the x87 status word is set to 1.
1256 *
1257 * In order to avoid leaking this information across processes, we clean
1258 * these values by performing a dummy load before executing fxrstor().
1259 */
1260 static void
1261 fpu_clean_state(void)
1262 {
1263 static float dummy_variable = 0.0;
1264 u_short status;
1265
1266 /*
1267 * Clear the ES bit in the x87 status word if it is currently
1268 * set, in order to avoid causing a fault in the upcoming load.
1269 */
1270 fnstsw(&status);
1271 if (status & 0x80)
1272 fnclex();
1273
1274 /*
1275 * Load the dummy variable into the x87 stack. This mangles
1276 * the x87 stack, but we don't care since we're about to call
1277 * fxrstor() anyway.
1278 */
1279 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
1280 }
1281
1282 static void
1283 fpurstor(union savefpu *addr)
1284 {
1285
1286 if (use_xsave)
1287 xrstor((char *)addr, xsave_mask);
1288 else if (cpu_fxsr)
1289 fxrstor(addr);
1290 else
1291 frstor(addr);
1292 }
1293
1294 #ifdef DEV_ISA
1295 /*
1296 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
1297 */
1298 static struct isa_pnp_id npxisa_ids[] = {
1299 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
1300 { 0 }
1301 };
1302
1303 static int
1304 npxisa_probe(device_t dev)
1305 {
1306 int result;
1307 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
1308 device_quiet(dev);
1309 }
1310 return(result);
1311 }
1312
1313 static int
1314 npxisa_attach(device_t dev)
1315 {
1316 return (0);
1317 }
1318
1319 static device_method_t npxisa_methods[] = {
1320 /* Device interface */
1321 DEVMETHOD(device_probe, npxisa_probe),
1322 DEVMETHOD(device_attach, npxisa_attach),
1323 DEVMETHOD(device_detach, bus_generic_detach),
1324 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1325 DEVMETHOD(device_suspend, bus_generic_suspend),
1326 DEVMETHOD(device_resume, bus_generic_resume),
1327
1328 { 0, 0 }
1329 };
1330
1331 static driver_t npxisa_driver = {
1332 "npxisa",
1333 npxisa_methods,
1334 1, /* no softc */
1335 };
1336
1337 static devclass_t npxisa_devclass;
1338
1339 DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1340 DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1341 ISA_PNP_INFO(npxisa_ids);
1342 #endif /* DEV_ISA */
1343
1344 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1345 "Kernel contexts for FPU state");
1346
1347 #define FPU_KERN_CTX_NPXINITDONE 0x01
1348 #define FPU_KERN_CTX_DUMMY 0x02
1349 #define FPU_KERN_CTX_INUSE 0x04
1350
1351 struct fpu_kern_ctx {
1352 union savefpu *prev;
1353 uint32_t flags;
1354 char hwstate1[];
1355 };
1356
1357 struct fpu_kern_ctx *
1358 fpu_kern_alloc_ctx(u_int flags)
1359 {
1360 struct fpu_kern_ctx *res;
1361 size_t sz;
1362
1363 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
1364 cpu_max_ext_state_size;
1365 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
1366 M_NOWAIT : M_WAITOK) | M_ZERO);
1367 return (res);
1368 }
1369
1370 void
1371 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1372 {
1373
1374 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
1375 /* XXXKIB clear the memory ? */
1376 free(ctx, M_FPUKERN_CTX);
1377 }
1378
1379 static union savefpu *
1380 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1381 {
1382 vm_offset_t p;
1383
1384 p = (vm_offset_t)&ctx->hwstate1;
1385 p = roundup2(p, XSAVE_AREA_ALIGN);
1386 return ((union savefpu *)p);
1387 }
1388
1389 void
1390 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1391 {
1392 struct pcb *pcb;
1393
1394 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("using inuse ctx"));
1395
1396 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1397 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1398 return;
1399 }
1400 pcb = td->td_pcb;
1401 critical_enter();
1402 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1403 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1404 ctx->flags = FPU_KERN_CTX_INUSE;
1405 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0)
1406 ctx->flags |= FPU_KERN_CTX_NPXINITDONE;
1407 npxexit(td);
1408 ctx->prev = pcb->pcb_save;
1409 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1410 pcb->pcb_flags |= PCB_KERNNPX;
1411 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1412 critical_exit();
1413 }
1414
1415 int
1416 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1417 {
1418 struct pcb *pcb;
1419
1420 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1421 ("leaving not inuse ctx"));
1422 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1423
1424 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1425 return (0);
1426 pcb = td->td_pcb;
1427 critical_enter();
1428 if (curthread == PCPU_GET(fpcurthread))
1429 npxdrop();
1430 pcb->pcb_save = ctx->prev;
1431 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1432 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0)
1433 pcb->pcb_flags |= PCB_NPXINITDONE;
1434 else
1435 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1436 pcb->pcb_flags &= ~PCB_KERNNPX;
1437 } else {
1438 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0)
1439 pcb->pcb_flags |= PCB_NPXINITDONE;
1440 else
1441 pcb->pcb_flags &= ~PCB_NPXINITDONE;
1442 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1443 }
1444 critical_exit();
1445 return (0);
1446 }
1447
1448 int
1449 fpu_kern_thread(u_int flags)
1450 {
1451
1452 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1453 ("Only kthread may use fpu_kern_thread"));
1454 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1455 ("mangled pcb_save"));
1456 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1457
1458 curpcb->pcb_flags |= PCB_KERNNPX;
1459 return (0);
1460 }
1461
1462 int
1463 is_fpu_kern_thread(u_int flags)
1464 {
1465
1466 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1467 return (0);
1468 return ((curpcb->pcb_flags & PCB_KERNNPX) != 0);
1469 }
1470
1471 /*
1472 * FPU save area alloc/free/init utility routines
1473 */
1474 union savefpu *
1475 fpu_save_area_alloc(void)
1476 {
1477
1478 return (uma_zalloc(fpu_save_area_zone, 0));
1479 }
1480
1481 void
1482 fpu_save_area_free(union savefpu *fsa)
1483 {
1484
1485 uma_zfree(fpu_save_area_zone, fsa);
1486 }
1487
1488 void
1489 fpu_save_area_reset(union savefpu *fsa)
1490 {
1491
1492 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size);
1493 }
Cache object: 4d514b8c8c5cfa13a3a46927a955a1c3
|