FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/fpu.c
1 /*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/10.1/sys/amd64/amd64/fpu.c 271999 2014-09-22 20:34:36Z jhb $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sysctl.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49 #include <sys/signalvar.h>
50 #include <vm/uma.h>
51
52 #include <machine/cputypes.h>
53 #include <machine/frame.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/md_var.h>
56 #include <machine/pcb.h>
57 #include <machine/psl.h>
58 #include <machine/resource.h>
59 #include <machine/specialreg.h>
60 #include <machine/segments.h>
61 #include <machine/ucontext.h>
62
63 /*
64 * Floating point support.
65 */
66
67 #if defined(__GNUCLIKE_ASM) && !defined(lint)
68
69 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
70 #define fnclex() __asm __volatile("fnclex")
71 #define fninit() __asm __volatile("fninit")
72 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
73 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
74 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
75 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
76 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
77 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
78
79 static __inline void
80 xrstor(char *addr, uint64_t mask)
81 {
82 uint32_t low, hi;
83
84 low = mask;
85 hi = mask >> 32;
86 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
87 }
88
89 static __inline void
90 xsave(char *addr, uint64_t mask)
91 {
92 uint32_t low, hi;
93
94 low = mask;
95 hi = mask >> 32;
96 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
97 "memory");
98 }
99
100 #else /* !(__GNUCLIKE_ASM && !lint) */
101
102 void fldcw(u_short cw);
103 void fnclex(void);
104 void fninit(void);
105 void fnstcw(caddr_t addr);
106 void fnstsw(caddr_t addr);
107 void fxsave(caddr_t addr);
108 void fxrstor(caddr_t addr);
109 void ldmxcsr(u_int csr);
110 void stmxcsr(u_int *csr);
111 void xrstor(char *addr, uint64_t mask);
112 void xsave(char *addr, uint64_t mask);
113
114 #endif /* __GNUCLIKE_ASM && !lint */
115
116 #define start_emulating() load_cr0(rcr0() | CR0_TS)
117 #define stop_emulating() clts()
118
119 CTASSERT(sizeof(struct savefpu) == 512);
120 CTASSERT(sizeof(struct xstate_hdr) == 64);
121 CTASSERT(sizeof(struct savefpu_ymm) == 832);
122
123 /*
124 * This requirement is to make it easier for asm code to calculate
125 * offset of the fpu save area from the pcb address. FPU save area
126 * must be 64-byte aligned.
127 */
128 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
129
130 static void fpu_clean_state(void);
131
132 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
133 NULL, 1, "Floating point instructions executed in hardware");
134
135 int use_xsave; /* non-static for cpu_switch.S */
136 uint64_t xsave_mask; /* the same */
137 static uma_zone_t fpu_save_area_zone;
138 static struct savefpu *fpu_initialstate;
139
140 struct xsave_area_elm_descr {
141 u_int offset;
142 u_int size;
143 } *xsave_area_desc;
144
145 void
146 fpusave(void *addr)
147 {
148
149 if (use_xsave)
150 xsave((char *)addr, xsave_mask);
151 else
152 fxsave((char *)addr);
153 }
154
155 void
156 fpurestore(void *addr)
157 {
158
159 if (use_xsave)
160 xrstor((char *)addr, xsave_mask);
161 else
162 fxrstor((char *)addr);
163 }
164
165 void
166 fpususpend(void *addr)
167 {
168 u_long cr0;
169
170 cr0 = rcr0();
171 stop_emulating();
172 fpusave(addr);
173 load_cr0(cr0);
174 }
175
176 void
177 fpuresume(void *addr)
178 {
179 u_long cr0;
180
181 cr0 = rcr0();
182 stop_emulating();
183 fninit();
184 if (use_xsave)
185 load_xcr(XCR0, xsave_mask);
186 fpurestore(addr);
187 load_cr0(cr0);
188 }
189
190 /*
191 * Enable XSAVE if supported and allowed by user.
192 * Calculate the xsave_mask.
193 */
194 static void
195 fpuinit_bsp1(void)
196 {
197 u_int cp[4];
198 uint64_t xsave_mask_user;
199
200 if ((cpu_feature2 & CPUID2_XSAVE) != 0) {
201 use_xsave = 1;
202 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
203 }
204 if (!use_xsave)
205 return;
206
207 cpuid_count(0xd, 0x0, cp);
208 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
209 if ((cp[0] & xsave_mask) != xsave_mask)
210 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
211 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
212 xsave_mask_user = xsave_mask;
213 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
214 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
215 xsave_mask &= xsave_mask_user;
216 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
217 xsave_mask &= ~XFEATURE_AVX512;
218 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
219 xsave_mask &= ~XFEATURE_MPX;
220
221 cpuid_count(0xd, 0x1, cp);
222 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
223 /*
224 * Patch the XSAVE instruction in the cpu_switch code
225 * to XSAVEOPT. We assume that XSAVE encoding used
226 * REX byte, and set the bit 4 of the r/m byte.
227 */
228 ctx_switch_xsave[3] |= 0x10;
229 }
230 }
231
232 /*
233 * Calculate the fpu save area size.
234 */
235 static void
236 fpuinit_bsp2(void)
237 {
238 u_int cp[4];
239
240 if (use_xsave) {
241 cpuid_count(0xd, 0x0, cp);
242 cpu_max_ext_state_size = cp[1];
243
244 /*
245 * Reload the cpu_feature2, since we enabled OSXSAVE.
246 */
247 do_cpuid(1, cp);
248 cpu_feature2 = cp[2];
249 } else
250 cpu_max_ext_state_size = sizeof(struct savefpu);
251 }
252
253 /*
254 * Initialize the floating point unit.
255 */
256 void
257 fpuinit(void)
258 {
259 register_t saveintr;
260 u_int mxcsr;
261 u_short control;
262
263 if (IS_BSP())
264 fpuinit_bsp1();
265
266 if (use_xsave) {
267 load_cr4(rcr4() | CR4_XSAVE);
268 load_xcr(XCR0, xsave_mask);
269 }
270
271 /*
272 * XCR0 shall be set up before CPU can report the save area size.
273 */
274 if (IS_BSP())
275 fpuinit_bsp2();
276
277 /*
278 * It is too early for critical_enter() to work on AP.
279 */
280 saveintr = intr_disable();
281 stop_emulating();
282 fninit();
283 control = __INITIAL_FPUCW__;
284 fldcw(control);
285 mxcsr = __INITIAL_MXCSR__;
286 ldmxcsr(mxcsr);
287 start_emulating();
288 intr_restore(saveintr);
289 }
290
291 /*
292 * On the boot CPU we generate a clean state that is used to
293 * initialize the floating point unit when it is first used by a
294 * process.
295 */
296 static void
297 fpuinitstate(void *arg __unused)
298 {
299 register_t saveintr;
300 int cp[4], i, max_ext_n;
301
302 fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
303 M_WAITOK | M_ZERO);
304 saveintr = intr_disable();
305 stop_emulating();
306
307 fpusave(fpu_initialstate);
308 if (fpu_initialstate->sv_env.en_mxcsr_mask)
309 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
310 else
311 cpu_mxcsr_mask = 0xFFBF;
312
313 /*
314 * The fninit instruction does not modify XMM registers. The
315 * fpusave call dumped the garbage contained in the registers
316 * after reset to the initial state saved. Clear XMM
317 * registers file image to make the startup program state and
318 * signal handler XMM register content predictable.
319 */
320 bzero(&fpu_initialstate->sv_xmm[0], sizeof(struct xmmacc));
321
322 /*
323 * Create a table describing the layout of the CPU Extended
324 * Save Area.
325 */
326 if (use_xsave) {
327 max_ext_n = flsl(xsave_mask);
328 xsave_area_desc = malloc(max_ext_n * sizeof(struct
329 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
330 /* x87 state */
331 xsave_area_desc[0].offset = 0;
332 xsave_area_desc[0].size = 160;
333 /* XMM */
334 xsave_area_desc[1].offset = 160;
335 xsave_area_desc[1].size = 288 - 160;
336
337 for (i = 2; i < max_ext_n; i++) {
338 cpuid_count(0xd, i, cp);
339 xsave_area_desc[i].offset = cp[1];
340 xsave_area_desc[i].size = cp[0];
341 }
342 }
343
344 fpu_save_area_zone = uma_zcreate("FPU_save_area",
345 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
346 XSAVE_AREA_ALIGN - 1, 0);
347
348 start_emulating();
349 intr_restore(saveintr);
350 }
351 SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, fpuinitstate, NULL);
352
353 /*
354 * Free coprocessor (if we have it).
355 */
356 void
357 fpuexit(struct thread *td)
358 {
359
360 critical_enter();
361 if (curthread == PCPU_GET(fpcurthread)) {
362 stop_emulating();
363 fpusave(curpcb->pcb_save);
364 start_emulating();
365 PCPU_SET(fpcurthread, 0);
366 }
367 critical_exit();
368 }
369
370 int
371 fpuformat()
372 {
373
374 return (_MC_FPFMT_XMM);
375 }
376
377 /*
378 * The following mechanism is used to ensure that the FPE_... value
379 * that is passed as a trapcode to the signal handler of the user
380 * process does not have more than one bit set.
381 *
382 * Multiple bits may be set if the user process modifies the control
383 * word while a status word bit is already set. While this is a sign
384 * of bad coding, we have no choise than to narrow them down to one
385 * bit, since we must not send a trapcode that is not exactly one of
386 * the FPE_ macros.
387 *
388 * The mechanism has a static table with 127 entries. Each combination
389 * of the 7 FPU status word exception bits directly translates to a
390 * position in this table, where a single FPE_... value is stored.
391 * This FPE_... value stored there is considered the "most important"
392 * of the exception bits and will be sent as the signal code. The
393 * precedence of the bits is based upon Intel Document "Numerical
394 * Applications", Chapter "Special Computational Situations".
395 *
396 * The macro to choose one of these values does these steps: 1) Throw
397 * away status word bits that cannot be masked. 2) Throw away the bits
398 * currently masked in the control word, assuming the user isn't
399 * interested in them anymore. 3) Reinsert status word bit 7 (stack
400 * fault) if it is set, which cannot be masked but must be presered.
401 * 4) Use the remaining bits to point into the trapcode table.
402 *
403 * The 6 maskable bits in order of their preference, as stated in the
404 * above referenced Intel manual:
405 * 1 Invalid operation (FP_X_INV)
406 * 1a Stack underflow
407 * 1b Stack overflow
408 * 1c Operand of unsupported format
409 * 1d SNaN operand.
410 * 2 QNaN operand (not an exception, irrelavant here)
411 * 3 Any other invalid-operation not mentioned above or zero divide
412 * (FP_X_INV, FP_X_DZ)
413 * 4 Denormal operand (FP_X_DNML)
414 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
415 * 6 Inexact result (FP_X_IMP)
416 */
417 static char fpetable[128] = {
418 0,
419 FPE_FLTINV, /* 1 - INV */
420 FPE_FLTUND, /* 2 - DNML */
421 FPE_FLTINV, /* 3 - INV | DNML */
422 FPE_FLTDIV, /* 4 - DZ */
423 FPE_FLTINV, /* 5 - INV | DZ */
424 FPE_FLTDIV, /* 6 - DNML | DZ */
425 FPE_FLTINV, /* 7 - INV | DNML | DZ */
426 FPE_FLTOVF, /* 8 - OFL */
427 FPE_FLTINV, /* 9 - INV | OFL */
428 FPE_FLTUND, /* A - DNML | OFL */
429 FPE_FLTINV, /* B - INV | DNML | OFL */
430 FPE_FLTDIV, /* C - DZ | OFL */
431 FPE_FLTINV, /* D - INV | DZ | OFL */
432 FPE_FLTDIV, /* E - DNML | DZ | OFL */
433 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
434 FPE_FLTUND, /* 10 - UFL */
435 FPE_FLTINV, /* 11 - INV | UFL */
436 FPE_FLTUND, /* 12 - DNML | UFL */
437 FPE_FLTINV, /* 13 - INV | DNML | UFL */
438 FPE_FLTDIV, /* 14 - DZ | UFL */
439 FPE_FLTINV, /* 15 - INV | DZ | UFL */
440 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
441 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
442 FPE_FLTOVF, /* 18 - OFL | UFL */
443 FPE_FLTINV, /* 19 - INV | OFL | UFL */
444 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
445 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
446 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
447 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
448 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
449 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
450 FPE_FLTRES, /* 20 - IMP */
451 FPE_FLTINV, /* 21 - INV | IMP */
452 FPE_FLTUND, /* 22 - DNML | IMP */
453 FPE_FLTINV, /* 23 - INV | DNML | IMP */
454 FPE_FLTDIV, /* 24 - DZ | IMP */
455 FPE_FLTINV, /* 25 - INV | DZ | IMP */
456 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
457 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
458 FPE_FLTOVF, /* 28 - OFL | IMP */
459 FPE_FLTINV, /* 29 - INV | OFL | IMP */
460 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
461 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
462 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
463 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
464 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
465 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
466 FPE_FLTUND, /* 30 - UFL | IMP */
467 FPE_FLTINV, /* 31 - INV | UFL | IMP */
468 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
469 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
470 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
471 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
472 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
473 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
474 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
475 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
476 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
477 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
478 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
479 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
480 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
481 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
482 FPE_FLTSUB, /* 40 - STK */
483 FPE_FLTSUB, /* 41 - INV | STK */
484 FPE_FLTUND, /* 42 - DNML | STK */
485 FPE_FLTSUB, /* 43 - INV | DNML | STK */
486 FPE_FLTDIV, /* 44 - DZ | STK */
487 FPE_FLTSUB, /* 45 - INV | DZ | STK */
488 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
489 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
490 FPE_FLTOVF, /* 48 - OFL | STK */
491 FPE_FLTSUB, /* 49 - INV | OFL | STK */
492 FPE_FLTUND, /* 4A - DNML | OFL | STK */
493 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
494 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
495 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
496 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
497 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
498 FPE_FLTUND, /* 50 - UFL | STK */
499 FPE_FLTSUB, /* 51 - INV | UFL | STK */
500 FPE_FLTUND, /* 52 - DNML | UFL | STK */
501 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
502 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
503 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
504 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
505 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
506 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
507 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
508 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
509 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
510 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
511 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
512 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
513 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
514 FPE_FLTRES, /* 60 - IMP | STK */
515 FPE_FLTSUB, /* 61 - INV | IMP | STK */
516 FPE_FLTUND, /* 62 - DNML | IMP | STK */
517 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
518 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
519 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
520 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
521 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
522 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
523 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
524 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
525 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
526 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
527 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
528 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
529 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
530 FPE_FLTUND, /* 70 - UFL | IMP | STK */
531 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
532 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
533 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
534 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
535 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
536 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
537 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
538 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
539 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
540 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
541 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
542 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
543 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
544 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
545 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
546 };
547
548 /*
549 * Read the FP status and control words, then generate si_code value
550 * for SIGFPE. The error code chosen will be one of the
551 * FPE_... macros. It will be sent as the second argument to old
552 * BSD-style signal handlers and as "siginfo_t->si_code" (second
553 * argument) to SA_SIGINFO signal handlers.
554 *
555 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
556 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
557 * usermode code which understands the FPU hardware enough to enable
558 * the exceptions, can also handle clearing the exception state in the
559 * handler. The only consequence of not clearing the exception is the
560 * rethrow of the SIGFPE on return from the signal handler and
561 * reexecution of the corresponding instruction.
562 *
563 * For XMM traps, the exceptions were never cleared.
564 */
565 int
566 fputrap_x87(void)
567 {
568 struct savefpu *pcb_save;
569 u_short control, status;
570
571 critical_enter();
572
573 /*
574 * Interrupt handling (for another interrupt) may have pushed the
575 * state to memory. Fetch the relevant parts of the state from
576 * wherever they are.
577 */
578 if (PCPU_GET(fpcurthread) != curthread) {
579 pcb_save = curpcb->pcb_save;
580 control = pcb_save->sv_env.en_cw;
581 status = pcb_save->sv_env.en_sw;
582 } else {
583 fnstcw(&control);
584 fnstsw(&status);
585 }
586
587 critical_exit();
588 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
589 }
590
591 int
592 fputrap_sse(void)
593 {
594 u_int mxcsr;
595
596 critical_enter();
597 if (PCPU_GET(fpcurthread) != curthread)
598 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
599 else
600 stmxcsr(&mxcsr);
601 critical_exit();
602 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
603 }
604
605 /*
606 * Implement device not available (DNA) exception
607 *
608 * It would be better to switch FP context here (if curthread != fpcurthread)
609 * and not necessarily for every context switch, but it is too hard to
610 * access foreign pcb's.
611 */
612
613 static int err_count = 0;
614
615 void
616 fpudna(void)
617 {
618
619 critical_enter();
620 if (PCPU_GET(fpcurthread) == curthread) {
621 printf("fpudna: fpcurthread == curthread %d times\n",
622 ++err_count);
623 stop_emulating();
624 critical_exit();
625 return;
626 }
627 if (PCPU_GET(fpcurthread) != NULL) {
628 printf("fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
629 PCPU_GET(fpcurthread),
630 PCPU_GET(fpcurthread)->td_proc->p_pid,
631 curthread, curthread->td_proc->p_pid);
632 panic("fpudna");
633 }
634 stop_emulating();
635 /*
636 * Record new context early in case frstor causes a trap.
637 */
638 PCPU_SET(fpcurthread, curthread);
639
640 fpu_clean_state();
641
642 if ((curpcb->pcb_flags & PCB_FPUINITDONE) == 0) {
643 /*
644 * This is the first time this thread has used the FPU or
645 * the PCB doesn't contain a clean FPU state. Explicitly
646 * load an initial state.
647 *
648 * We prefer to restore the state from the actual save
649 * area in PCB instead of directly loading from
650 * fpu_initialstate, to ignite the XSAVEOPT
651 * tracking engine.
652 */
653 bcopy(fpu_initialstate, curpcb->pcb_save, cpu_max_ext_state_size);
654 fpurestore(curpcb->pcb_save);
655 if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
656 fldcw(curpcb->pcb_initial_fpucw);
657 if (PCB_USER_FPU(curpcb))
658 set_pcb_flags(curpcb,
659 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
660 else
661 set_pcb_flags(curpcb, PCB_FPUINITDONE);
662 } else
663 fpurestore(curpcb->pcb_save);
664 critical_exit();
665 }
666
667 void
668 fpudrop()
669 {
670 struct thread *td;
671
672 td = PCPU_GET(fpcurthread);
673 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
674 CRITICAL_ASSERT(td);
675 PCPU_SET(fpcurthread, NULL);
676 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
677 start_emulating();
678 }
679
680 /*
681 * Get the user state of the FPU into pcb->pcb_user_save without
682 * dropping ownership (if possible). It returns the FPU ownership
683 * status.
684 */
685 int
686 fpugetregs(struct thread *td)
687 {
688 struct pcb *pcb;
689 uint64_t *xstate_bv, bit;
690 char *sa;
691 int max_ext_n, i, owned;
692
693 pcb = td->td_pcb;
694 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
695 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
696 cpu_max_ext_state_size);
697 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
698 pcb->pcb_initial_fpucw;
699 fpuuserinited(td);
700 return (_MC_FPOWNED_PCB);
701 }
702 critical_enter();
703 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
704 fpusave(get_pcb_user_save_pcb(pcb));
705 owned = _MC_FPOWNED_FPU;
706 } else {
707 owned = _MC_FPOWNED_PCB;
708 }
709 critical_exit();
710 if (use_xsave) {
711 /*
712 * Handle partially saved state.
713 */
714 sa = (char *)get_pcb_user_save_pcb(pcb);
715 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
716 offsetof(struct xstate_hdr, xstate_bv));
717 max_ext_n = flsl(xsave_mask);
718 for (i = 0; i < max_ext_n; i++) {
719 bit = 1ULL << i;
720 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
721 continue;
722 bcopy((char *)fpu_initialstate +
723 xsave_area_desc[i].offset,
724 sa + xsave_area_desc[i].offset,
725 xsave_area_desc[i].size);
726 *xstate_bv |= bit;
727 }
728 }
729 return (owned);
730 }
731
732 void
733 fpuuserinited(struct thread *td)
734 {
735 struct pcb *pcb;
736
737 pcb = td->td_pcb;
738 if (PCB_USER_FPU(pcb))
739 set_pcb_flags(pcb,
740 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
741 else
742 set_pcb_flags(pcb, PCB_FPUINITDONE);
743 }
744
745 int
746 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
747 {
748 struct xstate_hdr *hdr, *ehdr;
749 size_t len, max_len;
750 uint64_t bv;
751
752 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
753 if (xfpustate == NULL)
754 return (0);
755 if (!use_xsave)
756 return (EOPNOTSUPP);
757
758 len = xfpustate_size;
759 if (len < sizeof(struct xstate_hdr))
760 return (EINVAL);
761 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
762 if (len > max_len)
763 return (EINVAL);
764
765 ehdr = (struct xstate_hdr *)xfpustate;
766 bv = ehdr->xstate_bv;
767
768 /*
769 * Avoid #gp.
770 */
771 if (bv & ~xsave_mask)
772 return (EINVAL);
773
774 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
775
776 hdr->xstate_bv = bv;
777 bcopy(xfpustate + sizeof(struct xstate_hdr),
778 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
779
780 return (0);
781 }
782
783 /*
784 * Set the state of the FPU.
785 */
786 int
787 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
788 size_t xfpustate_size)
789 {
790 struct pcb *pcb;
791 int error;
792
793 pcb = td->td_pcb;
794 critical_enter();
795 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
796 error = fpusetxstate(td, xfpustate, xfpustate_size);
797 if (error != 0) {
798 critical_exit();
799 return (error);
800 }
801 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
802 fpurestore(get_pcb_user_save_td(td));
803 critical_exit();
804 set_pcb_flags(pcb, PCB_FPUINITDONE | PCB_USERFPUINITDONE);
805 } else {
806 critical_exit();
807 error = fpusetxstate(td, xfpustate, xfpustate_size);
808 if (error != 0)
809 return (error);
810 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
811 fpuuserinited(td);
812 }
813 return (0);
814 }
815
816 /*
817 * On AuthenticAMD processors, the fxrstor instruction does not restore
818 * the x87's stored last instruction pointer, last data pointer, and last
819 * opcode values, except in the rare case in which the exception summary
820 * (ES) bit in the x87 status word is set to 1.
821 *
822 * In order to avoid leaking this information across processes, we clean
823 * these values by performing a dummy load before executing fxrstor().
824 */
825 static void
826 fpu_clean_state(void)
827 {
828 static float dummy_variable = 0.0;
829 u_short status;
830
831 /*
832 * Clear the ES bit in the x87 status word if it is currently
833 * set, in order to avoid causing a fault in the upcoming load.
834 */
835 fnstsw(&status);
836 if (status & 0x80)
837 fnclex();
838
839 /*
840 * Load the dummy variable into the x87 stack. This mangles
841 * the x87 stack, but we don't care since we're about to call
842 * fxrstor() anyway.
843 */
844 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
845 }
846
847 /*
848 * This really sucks. We want the acpi version only, but it requires
849 * the isa_if.h file in order to get the definitions.
850 */
851 #include "opt_isa.h"
852 #ifdef DEV_ISA
853 #include <isa/isavar.h>
854 /*
855 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
856 */
857 static struct isa_pnp_id fpupnp_ids[] = {
858 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
859 { 0 }
860 };
861
862 static int
863 fpupnp_probe(device_t dev)
864 {
865 int result;
866
867 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
868 if (result <= 0)
869 device_quiet(dev);
870 return (result);
871 }
872
873 static int
874 fpupnp_attach(device_t dev)
875 {
876
877 return (0);
878 }
879
880 static device_method_t fpupnp_methods[] = {
881 /* Device interface */
882 DEVMETHOD(device_probe, fpupnp_probe),
883 DEVMETHOD(device_attach, fpupnp_attach),
884 DEVMETHOD(device_detach, bus_generic_detach),
885 DEVMETHOD(device_shutdown, bus_generic_shutdown),
886 DEVMETHOD(device_suspend, bus_generic_suspend),
887 DEVMETHOD(device_resume, bus_generic_resume),
888
889 { 0, 0 }
890 };
891
892 static driver_t fpupnp_driver = {
893 "fpupnp",
894 fpupnp_methods,
895 1, /* no softc */
896 };
897
898 static devclass_t fpupnp_devclass;
899
900 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
901 #endif /* DEV_ISA */
902
903 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
904 "Kernel contexts for FPU state");
905
906 #define FPU_KERN_CTX_FPUINITDONE 0x01
907 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
908
909 struct fpu_kern_ctx {
910 struct savefpu *prev;
911 uint32_t flags;
912 char hwstate1[];
913 };
914
915 struct fpu_kern_ctx *
916 fpu_kern_alloc_ctx(u_int flags)
917 {
918 struct fpu_kern_ctx *res;
919 size_t sz;
920
921 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
922 cpu_max_ext_state_size;
923 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
924 M_NOWAIT : M_WAITOK) | M_ZERO);
925 return (res);
926 }
927
928 void
929 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
930 {
931
932 /* XXXKIB clear the memory ? */
933 free(ctx, M_FPUKERN_CTX);
934 }
935
936 static struct savefpu *
937 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
938 {
939 vm_offset_t p;
940
941 p = (vm_offset_t)&ctx->hwstate1;
942 p = roundup2(p, XSAVE_AREA_ALIGN);
943 return ((struct savefpu *)p);
944 }
945
946 int
947 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
948 {
949 struct pcb *pcb;
950
951 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
952 ctx->flags = FPU_KERN_CTX_DUMMY;
953 return (0);
954 }
955 pcb = td->td_pcb;
956 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
957 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
958 ctx->flags = 0;
959 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
960 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
961 fpuexit(td);
962 ctx->prev = pcb->pcb_save;
963 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
964 set_pcb_flags(pcb, PCB_KERNFPU);
965 clear_pcb_flags(pcb, PCB_FPUINITDONE);
966 return (0);
967 }
968
969 int
970 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
971 {
972 struct pcb *pcb;
973
974 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
975 return (0);
976 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
977 pcb = td->td_pcb;
978 critical_enter();
979 if (curthread == PCPU_GET(fpcurthread))
980 fpudrop();
981 critical_exit();
982 pcb->pcb_save = ctx->prev;
983 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
984 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
985 set_pcb_flags(pcb, PCB_FPUINITDONE);
986 clear_pcb_flags(pcb, PCB_KERNFPU);
987 } else
988 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
989 } else {
990 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
991 set_pcb_flags(pcb, PCB_FPUINITDONE);
992 else
993 clear_pcb_flags(pcb, PCB_FPUINITDONE);
994 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
995 }
996 return (0);
997 }
998
999 int
1000 fpu_kern_thread(u_int flags)
1001 {
1002
1003 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1004 ("Only kthread may use fpu_kern_thread"));
1005 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1006 ("mangled pcb_save"));
1007 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1008
1009 set_pcb_flags(curpcb, PCB_KERNFPU);
1010 return (0);
1011 }
1012
1013 int
1014 is_fpu_kern_thread(u_int flags)
1015 {
1016
1017 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1018 return (0);
1019 return ((curpcb->pcb_flags & PCB_KERNFPU) != 0);
1020 }
1021
1022 /*
1023 * FPU save area alloc/free/init utility routines
1024 */
1025 struct savefpu *
1026 fpu_save_area_alloc(void)
1027 {
1028
1029 return (uma_zalloc(fpu_save_area_zone, 0));
1030 }
1031
1032 void
1033 fpu_save_area_free(struct savefpu *fsa)
1034 {
1035
1036 uma_zfree(fpu_save_area_zone, fsa);
1037 }
1038
1039 void
1040 fpu_save_area_reset(struct savefpu *fsa)
1041 {
1042
1043 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);
1044 }
Cache object: 81b868b97922b835b7d98f30de4371ed
|