FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/fpu.c
1 /*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD: releng/10.2/sys/amd64/amd64/fpu.c 279211 2015-02-23 18:38:41Z jhb $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sysctl.h>
47 #include <machine/bus.h>
48 #include <sys/rman.h>
49 #include <sys/signalvar.h>
50 #include <vm/uma.h>
51
52 #include <machine/cputypes.h>
53 #include <machine/frame.h>
54 #include <machine/intr_machdep.h>
55 #include <machine/md_var.h>
56 #include <machine/pcb.h>
57 #include <machine/psl.h>
58 #include <machine/resource.h>
59 #include <machine/specialreg.h>
60 #include <machine/segments.h>
61 #include <machine/ucontext.h>
62
63 /*
64 * Floating point support.
65 */
66
67 #if defined(__GNUCLIKE_ASM) && !defined(lint)
68
69 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
70 #define fnclex() __asm __volatile("fnclex")
71 #define fninit() __asm __volatile("fninit")
72 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
73 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
74 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
75 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
76 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
77 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr)))
78
79 static __inline void
80 xrstor(char *addr, uint64_t mask)
81 {
82 uint32_t low, hi;
83
84 low = mask;
85 hi = mask >> 32;
86 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
87 }
88
89 static __inline void
90 xsave(char *addr, uint64_t mask)
91 {
92 uint32_t low, hi;
93
94 low = mask;
95 hi = mask >> 32;
96 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
97 "memory");
98 }
99
100 #else /* !(__GNUCLIKE_ASM && !lint) */
101
102 void fldcw(u_short cw);
103 void fnclex(void);
104 void fninit(void);
105 void fnstcw(caddr_t addr);
106 void fnstsw(caddr_t addr);
107 void fxsave(caddr_t addr);
108 void fxrstor(caddr_t addr);
109 void ldmxcsr(u_int csr);
110 void stmxcsr(u_int *csr);
111 void xrstor(char *addr, uint64_t mask);
112 void xsave(char *addr, uint64_t mask);
113
114 #endif /* __GNUCLIKE_ASM && !lint */
115
116 #define start_emulating() load_cr0(rcr0() | CR0_TS)
117 #define stop_emulating() clts()
118
119 CTASSERT(sizeof(struct savefpu) == 512);
120 CTASSERT(sizeof(struct xstate_hdr) == 64);
121 CTASSERT(sizeof(struct savefpu_ymm) == 832);
122
123 /*
124 * This requirement is to make it easier for asm code to calculate
125 * offset of the fpu save area from the pcb address. FPU save area
126 * must be 64-byte aligned.
127 */
128 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
129
130 /*
131 * Ensure the copy of XCR0 saved in a core is contained in the padding
132 * area.
133 */
134 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
135 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
136
137 static void fpu_clean_state(void);
138
139 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
140 SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
141
142 int use_xsave; /* non-static for cpu_switch.S */
143 uint64_t xsave_mask; /* the same */
144 static uma_zone_t fpu_save_area_zone;
145 static struct savefpu *fpu_initialstate;
146
147 struct xsave_area_elm_descr {
148 u_int offset;
149 u_int size;
150 } *xsave_area_desc;
151
152 void
153 fpusave(void *addr)
154 {
155
156 if (use_xsave)
157 xsave((char *)addr, xsave_mask);
158 else
159 fxsave((char *)addr);
160 }
161
162 void
163 fpurestore(void *addr)
164 {
165
166 if (use_xsave)
167 xrstor((char *)addr, xsave_mask);
168 else
169 fxrstor((char *)addr);
170 }
171
172 void
173 fpususpend(void *addr)
174 {
175 u_long cr0;
176
177 cr0 = rcr0();
178 stop_emulating();
179 fpusave(addr);
180 load_cr0(cr0);
181 }
182
183 void
184 fpuresume(void *addr)
185 {
186 u_long cr0;
187
188 cr0 = rcr0();
189 stop_emulating();
190 fninit();
191 if (use_xsave)
192 load_xcr(XCR0, xsave_mask);
193 fpurestore(addr);
194 load_cr0(cr0);
195 }
196
197 /*
198 * Enable XSAVE if supported and allowed by user.
199 * Calculate the xsave_mask.
200 */
201 static void
202 fpuinit_bsp1(void)
203 {
204 u_int cp[4];
205 uint64_t xsave_mask_user;
206
207 if ((cpu_feature2 & CPUID2_XSAVE) != 0) {
208 use_xsave = 1;
209 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
210 }
211 if (!use_xsave)
212 return;
213
214 cpuid_count(0xd, 0x0, cp);
215 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
216 if ((cp[0] & xsave_mask) != xsave_mask)
217 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
218 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
219 xsave_mask_user = xsave_mask;
220 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
221 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
222 xsave_mask &= xsave_mask_user;
223 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
224 xsave_mask &= ~XFEATURE_AVX512;
225 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
226 xsave_mask &= ~XFEATURE_MPX;
227
228 cpuid_count(0xd, 0x1, cp);
229 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
230 /*
231 * Patch the XSAVE instruction in the cpu_switch code
232 * to XSAVEOPT. We assume that XSAVE encoding used
233 * REX byte, and set the bit 4 of the r/m byte.
234 */
235 ctx_switch_xsave[3] |= 0x10;
236 }
237 }
238
239 /*
240 * Calculate the fpu save area size.
241 */
242 static void
243 fpuinit_bsp2(void)
244 {
245 u_int cp[4];
246
247 if (use_xsave) {
248 cpuid_count(0xd, 0x0, cp);
249 cpu_max_ext_state_size = cp[1];
250
251 /*
252 * Reload the cpu_feature2, since we enabled OSXSAVE.
253 */
254 do_cpuid(1, cp);
255 cpu_feature2 = cp[2];
256 } else
257 cpu_max_ext_state_size = sizeof(struct savefpu);
258 }
259
260 /*
261 * Initialize the floating point unit.
262 */
263 void
264 fpuinit(void)
265 {
266 register_t saveintr;
267 u_int mxcsr;
268 u_short control;
269
270 if (IS_BSP())
271 fpuinit_bsp1();
272
273 if (use_xsave) {
274 load_cr4(rcr4() | CR4_XSAVE);
275 load_xcr(XCR0, xsave_mask);
276 }
277
278 /*
279 * XCR0 shall be set up before CPU can report the save area size.
280 */
281 if (IS_BSP())
282 fpuinit_bsp2();
283
284 /*
285 * It is too early for critical_enter() to work on AP.
286 */
287 saveintr = intr_disable();
288 stop_emulating();
289 fninit();
290 control = __INITIAL_FPUCW__;
291 fldcw(control);
292 mxcsr = __INITIAL_MXCSR__;
293 ldmxcsr(mxcsr);
294 start_emulating();
295 intr_restore(saveintr);
296 }
297
298 /*
299 * On the boot CPU we generate a clean state that is used to
300 * initialize the floating point unit when it is first used by a
301 * process.
302 */
303 static void
304 fpuinitstate(void *arg __unused)
305 {
306 register_t saveintr;
307 int cp[4], i, max_ext_n;
308
309 fpu_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF,
310 M_WAITOK | M_ZERO);
311 saveintr = intr_disable();
312 stop_emulating();
313
314 fpusave(fpu_initialstate);
315 if (fpu_initialstate->sv_env.en_mxcsr_mask)
316 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
317 else
318 cpu_mxcsr_mask = 0xFFBF;
319
320 /*
321 * The fninit instruction does not modify XMM registers. The
322 * fpusave call dumped the garbage contained in the registers
323 * after reset to the initial state saved. Clear XMM
324 * registers file image to make the startup program state and
325 * signal handler XMM register content predictable.
326 */
327 bzero(&fpu_initialstate->sv_xmm[0], sizeof(struct xmmacc));
328
329 /*
330 * Create a table describing the layout of the CPU Extended
331 * Save Area.
332 */
333 if (use_xsave) {
334 max_ext_n = flsl(xsave_mask);
335 xsave_area_desc = malloc(max_ext_n * sizeof(struct
336 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
337 /* x87 state */
338 xsave_area_desc[0].offset = 0;
339 xsave_area_desc[0].size = 160;
340 /* XMM */
341 xsave_area_desc[1].offset = 160;
342 xsave_area_desc[1].size = 288 - 160;
343
344 for (i = 2; i < max_ext_n; i++) {
345 cpuid_count(0xd, i, cp);
346 xsave_area_desc[i].offset = cp[1];
347 xsave_area_desc[i].size = cp[0];
348 }
349 }
350
351 fpu_save_area_zone = uma_zcreate("FPU_save_area",
352 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
353 XSAVE_AREA_ALIGN - 1, 0);
354
355 start_emulating();
356 intr_restore(saveintr);
357 }
358 SYSINIT(fpuinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, fpuinitstate, NULL);
359
360 /*
361 * Free coprocessor (if we have it).
362 */
363 void
364 fpuexit(struct thread *td)
365 {
366
367 critical_enter();
368 if (curthread == PCPU_GET(fpcurthread)) {
369 stop_emulating();
370 fpusave(curpcb->pcb_save);
371 start_emulating();
372 PCPU_SET(fpcurthread, NULL);
373 }
374 critical_exit();
375 }
376
377 int
378 fpuformat()
379 {
380
381 return (_MC_FPFMT_XMM);
382 }
383
384 /*
385 * The following mechanism is used to ensure that the FPE_... value
386 * that is passed as a trapcode to the signal handler of the user
387 * process does not have more than one bit set.
388 *
389 * Multiple bits may be set if the user process modifies the control
390 * word while a status word bit is already set. While this is a sign
391 * of bad coding, we have no choise than to narrow them down to one
392 * bit, since we must not send a trapcode that is not exactly one of
393 * the FPE_ macros.
394 *
395 * The mechanism has a static table with 127 entries. Each combination
396 * of the 7 FPU status word exception bits directly translates to a
397 * position in this table, where a single FPE_... value is stored.
398 * This FPE_... value stored there is considered the "most important"
399 * of the exception bits and will be sent as the signal code. The
400 * precedence of the bits is based upon Intel Document "Numerical
401 * Applications", Chapter "Special Computational Situations".
402 *
403 * The macro to choose one of these values does these steps: 1) Throw
404 * away status word bits that cannot be masked. 2) Throw away the bits
405 * currently masked in the control word, assuming the user isn't
406 * interested in them anymore. 3) Reinsert status word bit 7 (stack
407 * fault) if it is set, which cannot be masked but must be presered.
408 * 4) Use the remaining bits to point into the trapcode table.
409 *
410 * The 6 maskable bits in order of their preference, as stated in the
411 * above referenced Intel manual:
412 * 1 Invalid operation (FP_X_INV)
413 * 1a Stack underflow
414 * 1b Stack overflow
415 * 1c Operand of unsupported format
416 * 1d SNaN operand.
417 * 2 QNaN operand (not an exception, irrelavant here)
418 * 3 Any other invalid-operation not mentioned above or zero divide
419 * (FP_X_INV, FP_X_DZ)
420 * 4 Denormal operand (FP_X_DNML)
421 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
422 * 6 Inexact result (FP_X_IMP)
423 */
424 static char fpetable[128] = {
425 0,
426 FPE_FLTINV, /* 1 - INV */
427 FPE_FLTUND, /* 2 - DNML */
428 FPE_FLTINV, /* 3 - INV | DNML */
429 FPE_FLTDIV, /* 4 - DZ */
430 FPE_FLTINV, /* 5 - INV | DZ */
431 FPE_FLTDIV, /* 6 - DNML | DZ */
432 FPE_FLTINV, /* 7 - INV | DNML | DZ */
433 FPE_FLTOVF, /* 8 - OFL */
434 FPE_FLTINV, /* 9 - INV | OFL */
435 FPE_FLTUND, /* A - DNML | OFL */
436 FPE_FLTINV, /* B - INV | DNML | OFL */
437 FPE_FLTDIV, /* C - DZ | OFL */
438 FPE_FLTINV, /* D - INV | DZ | OFL */
439 FPE_FLTDIV, /* E - DNML | DZ | OFL */
440 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
441 FPE_FLTUND, /* 10 - UFL */
442 FPE_FLTINV, /* 11 - INV | UFL */
443 FPE_FLTUND, /* 12 - DNML | UFL */
444 FPE_FLTINV, /* 13 - INV | DNML | UFL */
445 FPE_FLTDIV, /* 14 - DZ | UFL */
446 FPE_FLTINV, /* 15 - INV | DZ | UFL */
447 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
448 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
449 FPE_FLTOVF, /* 18 - OFL | UFL */
450 FPE_FLTINV, /* 19 - INV | OFL | UFL */
451 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
452 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
453 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
454 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
455 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
456 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
457 FPE_FLTRES, /* 20 - IMP */
458 FPE_FLTINV, /* 21 - INV | IMP */
459 FPE_FLTUND, /* 22 - DNML | IMP */
460 FPE_FLTINV, /* 23 - INV | DNML | IMP */
461 FPE_FLTDIV, /* 24 - DZ | IMP */
462 FPE_FLTINV, /* 25 - INV | DZ | IMP */
463 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
464 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
465 FPE_FLTOVF, /* 28 - OFL | IMP */
466 FPE_FLTINV, /* 29 - INV | OFL | IMP */
467 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
468 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
469 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
470 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
471 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
472 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
473 FPE_FLTUND, /* 30 - UFL | IMP */
474 FPE_FLTINV, /* 31 - INV | UFL | IMP */
475 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
476 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
477 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
478 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
479 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
480 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
481 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
482 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
483 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
484 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
485 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
486 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
487 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
488 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
489 FPE_FLTSUB, /* 40 - STK */
490 FPE_FLTSUB, /* 41 - INV | STK */
491 FPE_FLTUND, /* 42 - DNML | STK */
492 FPE_FLTSUB, /* 43 - INV | DNML | STK */
493 FPE_FLTDIV, /* 44 - DZ | STK */
494 FPE_FLTSUB, /* 45 - INV | DZ | STK */
495 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
496 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
497 FPE_FLTOVF, /* 48 - OFL | STK */
498 FPE_FLTSUB, /* 49 - INV | OFL | STK */
499 FPE_FLTUND, /* 4A - DNML | OFL | STK */
500 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
501 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
502 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
503 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
504 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
505 FPE_FLTUND, /* 50 - UFL | STK */
506 FPE_FLTSUB, /* 51 - INV | UFL | STK */
507 FPE_FLTUND, /* 52 - DNML | UFL | STK */
508 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
509 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
510 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
511 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
512 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
513 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
514 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
515 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
516 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
517 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
518 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
519 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
520 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
521 FPE_FLTRES, /* 60 - IMP | STK */
522 FPE_FLTSUB, /* 61 - INV | IMP | STK */
523 FPE_FLTUND, /* 62 - DNML | IMP | STK */
524 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
525 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
526 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
527 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
528 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
529 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
530 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
531 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
532 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
533 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
534 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
535 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
536 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
537 FPE_FLTUND, /* 70 - UFL | IMP | STK */
538 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
539 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
540 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
541 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
542 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
543 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
544 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
545 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
546 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
547 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
548 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
549 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
550 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
551 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
552 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
553 };
554
555 /*
556 * Read the FP status and control words, then generate si_code value
557 * for SIGFPE. The error code chosen will be one of the
558 * FPE_... macros. It will be sent as the second argument to old
559 * BSD-style signal handlers and as "siginfo_t->si_code" (second
560 * argument) to SA_SIGINFO signal handlers.
561 *
562 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
563 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
564 * usermode code which understands the FPU hardware enough to enable
565 * the exceptions, can also handle clearing the exception state in the
566 * handler. The only consequence of not clearing the exception is the
567 * rethrow of the SIGFPE on return from the signal handler and
568 * reexecution of the corresponding instruction.
569 *
570 * For XMM traps, the exceptions were never cleared.
571 */
572 int
573 fputrap_x87(void)
574 {
575 struct savefpu *pcb_save;
576 u_short control, status;
577
578 critical_enter();
579
580 /*
581 * Interrupt handling (for another interrupt) may have pushed the
582 * state to memory. Fetch the relevant parts of the state from
583 * wherever they are.
584 */
585 if (PCPU_GET(fpcurthread) != curthread) {
586 pcb_save = curpcb->pcb_save;
587 control = pcb_save->sv_env.en_cw;
588 status = pcb_save->sv_env.en_sw;
589 } else {
590 fnstcw(&control);
591 fnstsw(&status);
592 }
593
594 critical_exit();
595 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
596 }
597
598 int
599 fputrap_sse(void)
600 {
601 u_int mxcsr;
602
603 critical_enter();
604 if (PCPU_GET(fpcurthread) != curthread)
605 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
606 else
607 stmxcsr(&mxcsr);
608 critical_exit();
609 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
610 }
611
612 /*
613 * Device Not Available (DNA, #NM) exception handler.
614 *
615 * It would be better to switch FP context here (if curthread !=
616 * fpcurthread) and not necessarily for every context switch, but it
617 * is too hard to access foreign pcb's.
618 */
619 void
620 fpudna(void)
621 {
622
623 /*
624 * This handler is entered with interrupts enabled, so context
625 * switches may occur before critical_enter() is executed. If
626 * a context switch occurs, then when we regain control, our
627 * state will have been completely restored. The CPU may
628 * change underneath us, but the only part of our context that
629 * lives in the CPU is CR0.TS and that will be "restored" by
630 * setting it on the new CPU.
631 */
632 critical_enter();
633
634 if (PCPU_GET(fpcurthread) == curthread) {
635 printf("fpudna: fpcurthread == curthread\n");
636 stop_emulating();
637 critical_exit();
638 return;
639 }
640 if (PCPU_GET(fpcurthread) != NULL) {
641 panic("fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
642 PCPU_GET(fpcurthread), PCPU_GET(fpcurthread)->td_tid,
643 curthread, curthread->td_tid);
644 }
645 stop_emulating();
646 /*
647 * Record new context early in case frstor causes a trap.
648 */
649 PCPU_SET(fpcurthread, curthread);
650
651 fpu_clean_state();
652
653 if ((curpcb->pcb_flags & PCB_FPUINITDONE) == 0) {
654 /*
655 * This is the first time this thread has used the FPU or
656 * the PCB doesn't contain a clean FPU state. Explicitly
657 * load an initial state.
658 *
659 * We prefer to restore the state from the actual save
660 * area in PCB instead of directly loading from
661 * fpu_initialstate, to ignite the XSAVEOPT
662 * tracking engine.
663 */
664 bcopy(fpu_initialstate, curpcb->pcb_save, cpu_max_ext_state_size);
665 fpurestore(curpcb->pcb_save);
666 if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
667 fldcw(curpcb->pcb_initial_fpucw);
668 if (PCB_USER_FPU(curpcb))
669 set_pcb_flags(curpcb,
670 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
671 else
672 set_pcb_flags(curpcb, PCB_FPUINITDONE);
673 } else
674 fpurestore(curpcb->pcb_save);
675 critical_exit();
676 }
677
678 void
679 fpudrop()
680 {
681 struct thread *td;
682
683 td = PCPU_GET(fpcurthread);
684 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
685 CRITICAL_ASSERT(td);
686 PCPU_SET(fpcurthread, NULL);
687 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
688 start_emulating();
689 }
690
691 /*
692 * Get the user state of the FPU into pcb->pcb_user_save without
693 * dropping ownership (if possible). It returns the FPU ownership
694 * status.
695 */
696 int
697 fpugetregs(struct thread *td)
698 {
699 struct pcb *pcb;
700 uint64_t *xstate_bv, bit;
701 char *sa;
702 int max_ext_n, i, owned;
703
704 pcb = td->td_pcb;
705 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
706 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
707 cpu_max_ext_state_size);
708 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
709 pcb->pcb_initial_fpucw;
710 fpuuserinited(td);
711 return (_MC_FPOWNED_PCB);
712 }
713 critical_enter();
714 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
715 fpusave(get_pcb_user_save_pcb(pcb));
716 owned = _MC_FPOWNED_FPU;
717 } else {
718 owned = _MC_FPOWNED_PCB;
719 }
720 critical_exit();
721 if (use_xsave) {
722 /*
723 * Handle partially saved state.
724 */
725 sa = (char *)get_pcb_user_save_pcb(pcb);
726 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
727 offsetof(struct xstate_hdr, xstate_bv));
728 max_ext_n = flsl(xsave_mask);
729 for (i = 0; i < max_ext_n; i++) {
730 bit = 1ULL << i;
731 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
732 continue;
733 bcopy((char *)fpu_initialstate +
734 xsave_area_desc[i].offset,
735 sa + xsave_area_desc[i].offset,
736 xsave_area_desc[i].size);
737 *xstate_bv |= bit;
738 }
739 }
740 return (owned);
741 }
742
743 void
744 fpuuserinited(struct thread *td)
745 {
746 struct pcb *pcb;
747
748 pcb = td->td_pcb;
749 if (PCB_USER_FPU(pcb))
750 set_pcb_flags(pcb,
751 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
752 else
753 set_pcb_flags(pcb, PCB_FPUINITDONE);
754 }
755
756 int
757 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
758 {
759 struct xstate_hdr *hdr, *ehdr;
760 size_t len, max_len;
761 uint64_t bv;
762
763 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
764 if (xfpustate == NULL)
765 return (0);
766 if (!use_xsave)
767 return (EOPNOTSUPP);
768
769 len = xfpustate_size;
770 if (len < sizeof(struct xstate_hdr))
771 return (EINVAL);
772 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
773 if (len > max_len)
774 return (EINVAL);
775
776 ehdr = (struct xstate_hdr *)xfpustate;
777 bv = ehdr->xstate_bv;
778
779 /*
780 * Avoid #gp.
781 */
782 if (bv & ~xsave_mask)
783 return (EINVAL);
784
785 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
786
787 hdr->xstate_bv = bv;
788 bcopy(xfpustate + sizeof(struct xstate_hdr),
789 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
790
791 return (0);
792 }
793
794 /*
795 * Set the state of the FPU.
796 */
797 int
798 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
799 size_t xfpustate_size)
800 {
801 struct pcb *pcb;
802 int error;
803
804 pcb = td->td_pcb;
805 critical_enter();
806 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
807 error = fpusetxstate(td, xfpustate, xfpustate_size);
808 if (error != 0) {
809 critical_exit();
810 return (error);
811 }
812 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
813 fpurestore(get_pcb_user_save_td(td));
814 critical_exit();
815 set_pcb_flags(pcb, PCB_FPUINITDONE | PCB_USERFPUINITDONE);
816 } else {
817 critical_exit();
818 error = fpusetxstate(td, xfpustate, xfpustate_size);
819 if (error != 0)
820 return (error);
821 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
822 fpuuserinited(td);
823 }
824 return (0);
825 }
826
827 /*
828 * On AuthenticAMD processors, the fxrstor instruction does not restore
829 * the x87's stored last instruction pointer, last data pointer, and last
830 * opcode values, except in the rare case in which the exception summary
831 * (ES) bit in the x87 status word is set to 1.
832 *
833 * In order to avoid leaking this information across processes, we clean
834 * these values by performing a dummy load before executing fxrstor().
835 */
836 static void
837 fpu_clean_state(void)
838 {
839 static float dummy_variable = 0.0;
840 u_short status;
841
842 /*
843 * Clear the ES bit in the x87 status word if it is currently
844 * set, in order to avoid causing a fault in the upcoming load.
845 */
846 fnstsw(&status);
847 if (status & 0x80)
848 fnclex();
849
850 /*
851 * Load the dummy variable into the x87 stack. This mangles
852 * the x87 stack, but we don't care since we're about to call
853 * fxrstor() anyway.
854 */
855 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
856 }
857
858 /*
859 * This really sucks. We want the acpi version only, but it requires
860 * the isa_if.h file in order to get the definitions.
861 */
862 #include "opt_isa.h"
863 #ifdef DEV_ISA
864 #include <isa/isavar.h>
865 /*
866 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
867 */
868 static struct isa_pnp_id fpupnp_ids[] = {
869 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
870 { 0 }
871 };
872
873 static int
874 fpupnp_probe(device_t dev)
875 {
876 int result;
877
878 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
879 if (result <= 0)
880 device_quiet(dev);
881 return (result);
882 }
883
884 static int
885 fpupnp_attach(device_t dev)
886 {
887
888 return (0);
889 }
890
891 static device_method_t fpupnp_methods[] = {
892 /* Device interface */
893 DEVMETHOD(device_probe, fpupnp_probe),
894 DEVMETHOD(device_attach, fpupnp_attach),
895 DEVMETHOD(device_detach, bus_generic_detach),
896 DEVMETHOD(device_shutdown, bus_generic_shutdown),
897 DEVMETHOD(device_suspend, bus_generic_suspend),
898 DEVMETHOD(device_resume, bus_generic_resume),
899
900 { 0, 0 }
901 };
902
903 static driver_t fpupnp_driver = {
904 "fpupnp",
905 fpupnp_methods,
906 1, /* no softc */
907 };
908
909 static devclass_t fpupnp_devclass;
910
911 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, fpupnp_devclass, 0, 0);
912 #endif /* DEV_ISA */
913
914 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
915 "Kernel contexts for FPU state");
916
917 #define FPU_KERN_CTX_FPUINITDONE 0x01
918 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
919
920 struct fpu_kern_ctx {
921 struct savefpu *prev;
922 uint32_t flags;
923 char hwstate1[];
924 };
925
926 struct fpu_kern_ctx *
927 fpu_kern_alloc_ctx(u_int flags)
928 {
929 struct fpu_kern_ctx *res;
930 size_t sz;
931
932 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN +
933 cpu_max_ext_state_size;
934 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ?
935 M_NOWAIT : M_WAITOK) | M_ZERO);
936 return (res);
937 }
938
939 void
940 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
941 {
942
943 /* XXXKIB clear the memory ? */
944 free(ctx, M_FPUKERN_CTX);
945 }
946
947 static struct savefpu *
948 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
949 {
950 vm_offset_t p;
951
952 p = (vm_offset_t)&ctx->hwstate1;
953 p = roundup2(p, XSAVE_AREA_ALIGN);
954 return ((struct savefpu *)p);
955 }
956
957 int
958 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
959 {
960 struct pcb *pcb;
961
962 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
963 ctx->flags = FPU_KERN_CTX_DUMMY;
964 return (0);
965 }
966 pcb = td->td_pcb;
967 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
968 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
969 ctx->flags = 0;
970 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
971 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
972 fpuexit(td);
973 ctx->prev = pcb->pcb_save;
974 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
975 set_pcb_flags(pcb, PCB_KERNFPU);
976 clear_pcb_flags(pcb, PCB_FPUINITDONE);
977 return (0);
978 }
979
980 int
981 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
982 {
983 struct pcb *pcb;
984
985 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
986 return (0);
987 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
988 pcb = td->td_pcb;
989 critical_enter();
990 if (curthread == PCPU_GET(fpcurthread))
991 fpudrop();
992 critical_exit();
993 pcb->pcb_save = ctx->prev;
994 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
995 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
996 set_pcb_flags(pcb, PCB_FPUINITDONE);
997 clear_pcb_flags(pcb, PCB_KERNFPU);
998 } else
999 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
1000 } else {
1001 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
1002 set_pcb_flags(pcb, PCB_FPUINITDONE);
1003 else
1004 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1005 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1006 }
1007 return (0);
1008 }
1009
1010 int
1011 fpu_kern_thread(u_int flags)
1012 {
1013
1014 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1015 ("Only kthread may use fpu_kern_thread"));
1016 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1017 ("mangled pcb_save"));
1018 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1019
1020 set_pcb_flags(curpcb, PCB_KERNFPU);
1021 return (0);
1022 }
1023
1024 int
1025 is_fpu_kern_thread(u_int flags)
1026 {
1027
1028 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1029 return (0);
1030 return ((curpcb->pcb_flags & PCB_KERNFPU) != 0);
1031 }
1032
1033 /*
1034 * FPU save area alloc/free/init utility routines
1035 */
1036 struct savefpu *
1037 fpu_save_area_alloc(void)
1038 {
1039
1040 return (uma_zalloc(fpu_save_area_zone, 0));
1041 }
1042
1043 void
1044 fpu_save_area_free(struct savefpu *fsa)
1045 {
1046
1047 uma_zfree(fpu_save_area_zone, fsa);
1048 }
1049
1050 void
1051 fpu_save_area_reset(struct savefpu *fsa)
1052 {
1053
1054 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);
1055 }
Cache object: bbf8211c0cab7252269df01539696a8d
|