FreeBSD/Linux Kernel Cross Reference
sys/amd64/amd64/fpu.c
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1990 William Jolitz.
5 * Copyright (c) 1991 The Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91
33 */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/domainset.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/module.h>
46 #include <sys/mutex.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/sysctl.h>
50 #include <sys/sysent.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 #include <sys/signalvar.h>
54 #include <vm/uma.h>
55
56 #include <machine/cputypes.h>
57 #include <machine/frame.h>
58 #include <machine/intr_machdep.h>
59 #include <machine/md_var.h>
60 #include <machine/pcb.h>
61 #include <machine/psl.h>
62 #include <machine/resource.h>
63 #include <machine/specialreg.h>
64 #include <machine/segments.h>
65 #include <machine/ucontext.h>
66 #include <x86/ifunc.h>
67
68 /*
69 * Floating point support.
70 */
71
72 #define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw))
73 #define fnclex() __asm __volatile("fnclex")
74 #define fninit() __asm __volatile("fninit")
75 #define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr)))
76 #define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr)))
77 #define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr)))
78 #define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr)))
79 #define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr))
80 #define stmxcsr(addr) __asm __volatile("stmxcsr %0" : "=m" (*(addr)))
81
82 static __inline void
83 xrstor32(char *addr, uint64_t mask)
84 {
85 uint32_t low, hi;
86
87 low = mask;
88 hi = mask >> 32;
89 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi));
90 }
91
92 static __inline void
93 xrstor64(char *addr, uint64_t mask)
94 {
95 uint32_t low, hi;
96
97 low = mask;
98 hi = mask >> 32;
99 __asm __volatile("xrstor64 %0" : : "m" (*addr), "a" (low), "d" (hi));
100 }
101
102 static __inline void
103 xsave32(char *addr, uint64_t mask)
104 {
105 uint32_t low, hi;
106
107 low = mask;
108 hi = mask >> 32;
109 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) :
110 "memory");
111 }
112
113 static __inline void
114 xsave64(char *addr, uint64_t mask)
115 {
116 uint32_t low, hi;
117
118 low = mask;
119 hi = mask >> 32;
120 __asm __volatile("xsave64 %0" : "=m" (*addr) : "a" (low), "d" (hi) :
121 "memory");
122 }
123
124 static __inline void
125 xsaveopt32(char *addr, uint64_t mask)
126 {
127 uint32_t low, hi;
128
129 low = mask;
130 hi = mask >> 32;
131 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) :
132 "memory");
133 }
134
135 static __inline void
136 xsaveopt64(char *addr, uint64_t mask)
137 {
138 uint32_t low, hi;
139
140 low = mask;
141 hi = mask >> 32;
142 __asm __volatile("xsaveopt64 %0" : "=m" (*addr) : "a" (low), "d" (hi) :
143 "memory");
144 }
145
146 #define start_emulating() load_cr0(rcr0() | CR0_TS)
147 #define stop_emulating() clts()
148
149 CTASSERT(sizeof(struct savefpu) == 512);
150 CTASSERT(sizeof(struct xstate_hdr) == 64);
151 CTASSERT(sizeof(struct savefpu_ymm) == 832);
152
153 /*
154 * This requirement is to make it easier for asm code to calculate
155 * offset of the fpu save area from the pcb address. FPU save area
156 * must be 64-byte aligned.
157 */
158 CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0);
159
160 /*
161 * Ensure the copy of XCR0 saved in a core is contained in the padding
162 * area.
163 */
164 CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savefpu, sv_pad) &&
165 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savefpu));
166
167 static void fpu_clean_state(void);
168
169 SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
170 SYSCTL_NULL_INT_PTR, 1, "Floating point instructions executed in hardware");
171
172 int use_xsave; /* non-static for cpu_switch.S */
173 uint64_t xsave_mask; /* the same */
174 static uma_zone_t fpu_save_area_zone;
175 static struct savefpu *fpu_initialstate;
176
177 static struct xsave_area_elm_descr {
178 u_int offset;
179 u_int size;
180 } *xsave_area_desc;
181
182 static void
183 fpusave_xsaveopt64(void *addr)
184 {
185 xsaveopt64((char *)addr, xsave_mask);
186 }
187
188 static void
189 fpusave_xsaveopt3264(void *addr)
190 {
191 if (SV_CURPROC_FLAG(SV_ILP32))
192 xsaveopt32((char *)addr, xsave_mask);
193 else
194 xsaveopt64((char *)addr, xsave_mask);
195 }
196
197 static void
198 fpusave_xsave64(void *addr)
199 {
200 xsave64((char *)addr, xsave_mask);
201 }
202
203 static void
204 fpusave_xsave3264(void *addr)
205 {
206 if (SV_CURPROC_FLAG(SV_ILP32))
207 xsave32((char *)addr, xsave_mask);
208 else
209 xsave64((char *)addr, xsave_mask);
210 }
211
212 static void
213 fpurestore_xrstor64(void *addr)
214 {
215 xrstor64((char *)addr, xsave_mask);
216 }
217
218 static void
219 fpurestore_xrstor3264(void *addr)
220 {
221 if (SV_CURPROC_FLAG(SV_ILP32))
222 xrstor32((char *)addr, xsave_mask);
223 else
224 xrstor64((char *)addr, xsave_mask);
225 }
226
227 static void
228 fpusave_fxsave(void *addr)
229 {
230
231 fxsave((char *)addr);
232 }
233
234 static void
235 fpurestore_fxrstor(void *addr)
236 {
237
238 fxrstor((char *)addr);
239 }
240
241 static void
242 init_xsave(void)
243 {
244
245 if (use_xsave)
246 return;
247 if ((cpu_feature2 & CPUID2_XSAVE) == 0)
248 return;
249 use_xsave = 1;
250 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave);
251 }
252
253 DEFINE_IFUNC(, void, fpusave, (void *))
254 {
255
256 init_xsave();
257 if (!use_xsave)
258 return (fpusave_fxsave);
259 if ((cpu_stdext_feature & CPUID_EXTSTATE_XSAVEOPT) != 0) {
260 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
261 fpusave_xsaveopt64 : fpusave_xsaveopt3264);
262 }
263 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
264 fpusave_xsave64 : fpusave_xsave3264);
265 }
266
267 DEFINE_IFUNC(, void, fpurestore, (void *))
268 {
269
270 init_xsave();
271 if (!use_xsave)
272 return (fpurestore_fxrstor);
273 return ((cpu_stdext_feature & CPUID_STDEXT_NFPUSG) != 0 ?
274 fpurestore_xrstor64 : fpurestore_xrstor3264);
275 }
276
277 void
278 fpususpend(void *addr)
279 {
280 u_long cr0;
281
282 cr0 = rcr0();
283 stop_emulating();
284 fpusave(addr);
285 load_cr0(cr0);
286 }
287
288 void
289 fpuresume(void *addr)
290 {
291 u_long cr0;
292
293 cr0 = rcr0();
294 stop_emulating();
295 fninit();
296 if (use_xsave)
297 load_xcr(XCR0, xsave_mask);
298 fpurestore(addr);
299 load_cr0(cr0);
300 }
301
302 /*
303 * Enable XSAVE if supported and allowed by user.
304 * Calculate the xsave_mask.
305 */
306 static void
307 fpuinit_bsp1(void)
308 {
309 u_int cp[4];
310 uint64_t xsave_mask_user;
311 bool old_wp;
312
313 if (!use_xsave)
314 return;
315 cpuid_count(0xd, 0x0, cp);
316 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
317 if ((cp[0] & xsave_mask) != xsave_mask)
318 panic("CPU0 does not support X87 or SSE: %x", cp[0]);
319 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0];
320 xsave_mask_user = xsave_mask;
321 TUNABLE_ULONG_FETCH("hw.xsave_mask", &xsave_mask_user);
322 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
323 xsave_mask &= xsave_mask_user;
324 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512)
325 xsave_mask &= ~XFEATURE_AVX512;
326 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
327 xsave_mask &= ~XFEATURE_MPX;
328
329 cpuid_count(0xd, 0x1, cp);
330 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
331 /*
332 * Patch the XSAVE instruction in the cpu_switch code
333 * to XSAVEOPT. We assume that XSAVE encoding used
334 * REX byte, and set the bit 4 of the r/m byte.
335 *
336 * It seems that some BIOSes give control to the OS
337 * with CR0.WP already set, making the kernel text
338 * read-only before cpu_startup().
339 */
340 old_wp = disable_wp();
341 ctx_switch_xsave32[3] |= 0x10;
342 ctx_switch_xsave[3] |= 0x10;
343 restore_wp(old_wp);
344 }
345 }
346
347 /*
348 * Calculate the fpu save area size.
349 */
350 static void
351 fpuinit_bsp2(void)
352 {
353 u_int cp[4];
354
355 if (use_xsave) {
356 cpuid_count(0xd, 0x0, cp);
357 cpu_max_ext_state_size = cp[1];
358
359 /*
360 * Reload the cpu_feature2, since we enabled OSXSAVE.
361 */
362 do_cpuid(1, cp);
363 cpu_feature2 = cp[2];
364 } else
365 cpu_max_ext_state_size = sizeof(struct savefpu);
366 }
367
368 /*
369 * Initialize the floating point unit.
370 */
371 void
372 fpuinit(void)
373 {
374 register_t saveintr;
375 uint64_t cr4;
376 u_int mxcsr;
377 u_short control;
378
379 if (IS_BSP())
380 fpuinit_bsp1();
381
382 if (use_xsave) {
383 cr4 = rcr4();
384
385 /*
386 * Revert enablement of PKRU if user disabled its
387 * saving on context switches by clearing the bit in
388 * the xsave mask. Also redundantly clear the bit in
389 * cpu_stdext_feature2 to prevent pmap from ever
390 * trying to set the page table bits.
391 */
392 if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0 &&
393 (xsave_mask & XFEATURE_ENABLED_PKRU) == 0) {
394 cr4 &= ~CR4_PKE;
395 cpu_stdext_feature2 &= ~CPUID_STDEXT2_PKU;
396 }
397
398 load_cr4(cr4 | CR4_XSAVE);
399 load_xcr(XCR0, xsave_mask);
400 }
401
402 /*
403 * XCR0 shall be set up before CPU can report the save area size.
404 */
405 if (IS_BSP())
406 fpuinit_bsp2();
407
408 /*
409 * It is too early for critical_enter() to work on AP.
410 */
411 saveintr = intr_disable();
412 stop_emulating();
413 fninit();
414 control = __INITIAL_FPUCW__;
415 fldcw(control);
416 mxcsr = __INITIAL_MXCSR__;
417 ldmxcsr(mxcsr);
418 start_emulating();
419 intr_restore(saveintr);
420 }
421
422 /*
423 * On the boot CPU we generate a clean state that is used to
424 * initialize the floating point unit when it is first used by a
425 * process.
426 */
427 static void
428 fpuinitstate(void *arg __unused)
429 {
430 uint64_t *xstate_bv;
431 register_t saveintr;
432 int cp[4], i, max_ext_n;
433
434 /* Do potentially blocking operations before disabling interrupts. */
435 fpu_save_area_zone = uma_zcreate("FPU_save_area",
436 cpu_max_ext_state_size, NULL, NULL, NULL, NULL,
437 XSAVE_AREA_ALIGN - 1, 0);
438 fpu_initialstate = uma_zalloc(fpu_save_area_zone, M_WAITOK | M_ZERO);
439 if (use_xsave) {
440 max_ext_n = flsl(xsave_mask);
441 xsave_area_desc = malloc(max_ext_n * sizeof(struct
442 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO);
443 }
444
445 cpu_thread_alloc(&thread0);
446
447 saveintr = intr_disable();
448 stop_emulating();
449
450 fpusave_fxsave(fpu_initialstate);
451 if (fpu_initialstate->sv_env.en_mxcsr_mask)
452 cpu_mxcsr_mask = fpu_initialstate->sv_env.en_mxcsr_mask;
453 else
454 cpu_mxcsr_mask = 0xFFBF;
455
456 /*
457 * The fninit instruction does not modify XMM registers or x87
458 * registers (MM/ST). The fpusave call dumped the garbage
459 * contained in the registers after reset to the initial state
460 * saved. Clear XMM and x87 registers file image to make the
461 * startup program state and signal handler XMM/x87 register
462 * content predictable.
463 */
464 bzero(fpu_initialstate->sv_fp, sizeof(fpu_initialstate->sv_fp));
465 bzero(fpu_initialstate->sv_xmm, sizeof(fpu_initialstate->sv_xmm));
466
467 /*
468 * Create a table describing the layout of the CPU Extended
469 * Save Area. See Intel SDM rev. 075 Vol. 1 13.4.1 "Legacy
470 * Region of an XSAVE Area" for the source of offsets/sizes.
471 */
472 if (use_xsave) {
473 xstate_bv = (uint64_t *)((char *)(fpu_initialstate + 1) +
474 offsetof(struct xstate_hdr, xstate_bv));
475 *xstate_bv = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE;
476
477 /* x87 state */
478 xsave_area_desc[0].offset = 0;
479 xsave_area_desc[0].size = 160;
480 /* XMM */
481 xsave_area_desc[1].offset = 160;
482 xsave_area_desc[1].size = 416 - 160;
483
484 for (i = 2; i < max_ext_n; i++) {
485 cpuid_count(0xd, i, cp);
486 xsave_area_desc[i].offset = cp[1];
487 xsave_area_desc[i].size = cp[0];
488 }
489 }
490
491 start_emulating();
492 intr_restore(saveintr);
493 }
494 /* EFIRT needs this to be initialized before we can enter our EFI environment */
495 SYSINIT(fpuinitstate, SI_SUB_CPU, SI_ORDER_ANY, fpuinitstate, NULL);
496
497 /*
498 * Free coprocessor (if we have it).
499 */
500 void
501 fpuexit(struct thread *td)
502 {
503
504 critical_enter();
505 if (curthread == PCPU_GET(fpcurthread)) {
506 stop_emulating();
507 fpusave(curpcb->pcb_save);
508 start_emulating();
509 PCPU_SET(fpcurthread, NULL);
510 }
511 critical_exit();
512 }
513
514 int
515 fpuformat(void)
516 {
517
518 return (_MC_FPFMT_XMM);
519 }
520
521 /*
522 * The following mechanism is used to ensure that the FPE_... value
523 * that is passed as a trapcode to the signal handler of the user
524 * process does not have more than one bit set.
525 *
526 * Multiple bits may be set if the user process modifies the control
527 * word while a status word bit is already set. While this is a sign
528 * of bad coding, we have no choice than to narrow them down to one
529 * bit, since we must not send a trapcode that is not exactly one of
530 * the FPE_ macros.
531 *
532 * The mechanism has a static table with 127 entries. Each combination
533 * of the 7 FPU status word exception bits directly translates to a
534 * position in this table, where a single FPE_... value is stored.
535 * This FPE_... value stored there is considered the "most important"
536 * of the exception bits and will be sent as the signal code. The
537 * precedence of the bits is based upon Intel Document "Numerical
538 * Applications", Chapter "Special Computational Situations".
539 *
540 * The macro to choose one of these values does these steps: 1) Throw
541 * away status word bits that cannot be masked. 2) Throw away the bits
542 * currently masked in the control word, assuming the user isn't
543 * interested in them anymore. 3) Reinsert status word bit 7 (stack
544 * fault) if it is set, which cannot be masked but must be presered.
545 * 4) Use the remaining bits to point into the trapcode table.
546 *
547 * The 6 maskable bits in order of their preference, as stated in the
548 * above referenced Intel manual:
549 * 1 Invalid operation (FP_X_INV)
550 * 1a Stack underflow
551 * 1b Stack overflow
552 * 1c Operand of unsupported format
553 * 1d SNaN operand.
554 * 2 QNaN operand (not an exception, irrelavant here)
555 * 3 Any other invalid-operation not mentioned above or zero divide
556 * (FP_X_INV, FP_X_DZ)
557 * 4 Denormal operand (FP_X_DNML)
558 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL)
559 * 6 Inexact result (FP_X_IMP)
560 */
561 static char fpetable[128] = {
562 0,
563 FPE_FLTINV, /* 1 - INV */
564 FPE_FLTUND, /* 2 - DNML */
565 FPE_FLTINV, /* 3 - INV | DNML */
566 FPE_FLTDIV, /* 4 - DZ */
567 FPE_FLTINV, /* 5 - INV | DZ */
568 FPE_FLTDIV, /* 6 - DNML | DZ */
569 FPE_FLTINV, /* 7 - INV | DNML | DZ */
570 FPE_FLTOVF, /* 8 - OFL */
571 FPE_FLTINV, /* 9 - INV | OFL */
572 FPE_FLTUND, /* A - DNML | OFL */
573 FPE_FLTINV, /* B - INV | DNML | OFL */
574 FPE_FLTDIV, /* C - DZ | OFL */
575 FPE_FLTINV, /* D - INV | DZ | OFL */
576 FPE_FLTDIV, /* E - DNML | DZ | OFL */
577 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */
578 FPE_FLTUND, /* 10 - UFL */
579 FPE_FLTINV, /* 11 - INV | UFL */
580 FPE_FLTUND, /* 12 - DNML | UFL */
581 FPE_FLTINV, /* 13 - INV | DNML | UFL */
582 FPE_FLTDIV, /* 14 - DZ | UFL */
583 FPE_FLTINV, /* 15 - INV | DZ | UFL */
584 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */
585 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */
586 FPE_FLTOVF, /* 18 - OFL | UFL */
587 FPE_FLTINV, /* 19 - INV | OFL | UFL */
588 FPE_FLTUND, /* 1A - DNML | OFL | UFL */
589 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */
590 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */
591 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */
592 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */
593 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */
594 FPE_FLTRES, /* 20 - IMP */
595 FPE_FLTINV, /* 21 - INV | IMP */
596 FPE_FLTUND, /* 22 - DNML | IMP */
597 FPE_FLTINV, /* 23 - INV | DNML | IMP */
598 FPE_FLTDIV, /* 24 - DZ | IMP */
599 FPE_FLTINV, /* 25 - INV | DZ | IMP */
600 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */
601 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */
602 FPE_FLTOVF, /* 28 - OFL | IMP */
603 FPE_FLTINV, /* 29 - INV | OFL | IMP */
604 FPE_FLTUND, /* 2A - DNML | OFL | IMP */
605 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */
606 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */
607 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */
608 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */
609 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */
610 FPE_FLTUND, /* 30 - UFL | IMP */
611 FPE_FLTINV, /* 31 - INV | UFL | IMP */
612 FPE_FLTUND, /* 32 - DNML | UFL | IMP */
613 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */
614 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */
615 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */
616 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */
617 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */
618 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */
619 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */
620 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */
621 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */
622 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */
623 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */
624 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */
625 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */
626 FPE_FLTSUB, /* 40 - STK */
627 FPE_FLTSUB, /* 41 - INV | STK */
628 FPE_FLTUND, /* 42 - DNML | STK */
629 FPE_FLTSUB, /* 43 - INV | DNML | STK */
630 FPE_FLTDIV, /* 44 - DZ | STK */
631 FPE_FLTSUB, /* 45 - INV | DZ | STK */
632 FPE_FLTDIV, /* 46 - DNML | DZ | STK */
633 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */
634 FPE_FLTOVF, /* 48 - OFL | STK */
635 FPE_FLTSUB, /* 49 - INV | OFL | STK */
636 FPE_FLTUND, /* 4A - DNML | OFL | STK */
637 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */
638 FPE_FLTDIV, /* 4C - DZ | OFL | STK */
639 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */
640 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */
641 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */
642 FPE_FLTUND, /* 50 - UFL | STK */
643 FPE_FLTSUB, /* 51 - INV | UFL | STK */
644 FPE_FLTUND, /* 52 - DNML | UFL | STK */
645 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */
646 FPE_FLTDIV, /* 54 - DZ | UFL | STK */
647 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */
648 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */
649 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */
650 FPE_FLTOVF, /* 58 - OFL | UFL | STK */
651 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */
652 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */
653 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */
654 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */
655 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */
656 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */
657 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */
658 FPE_FLTRES, /* 60 - IMP | STK */
659 FPE_FLTSUB, /* 61 - INV | IMP | STK */
660 FPE_FLTUND, /* 62 - DNML | IMP | STK */
661 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */
662 FPE_FLTDIV, /* 64 - DZ | IMP | STK */
663 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */
664 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */
665 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */
666 FPE_FLTOVF, /* 68 - OFL | IMP | STK */
667 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */
668 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */
669 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */
670 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */
671 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */
672 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */
673 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */
674 FPE_FLTUND, /* 70 - UFL | IMP | STK */
675 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */
676 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */
677 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */
678 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */
679 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */
680 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */
681 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */
682 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */
683 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */
684 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */
685 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */
686 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */
687 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */
688 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */
689 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
690 };
691
692 /*
693 * Read the FP status and control words, then generate si_code value
694 * for SIGFPE. The error code chosen will be one of the
695 * FPE_... macros. It will be sent as the second argument to old
696 * BSD-style signal handlers and as "siginfo_t->si_code" (second
697 * argument) to SA_SIGINFO signal handlers.
698 *
699 * Some time ago, we cleared the x87 exceptions with FNCLEX there.
700 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The
701 * usermode code which understands the FPU hardware enough to enable
702 * the exceptions, can also handle clearing the exception state in the
703 * handler. The only consequence of not clearing the exception is the
704 * rethrow of the SIGFPE on return from the signal handler and
705 * reexecution of the corresponding instruction.
706 *
707 * For XMM traps, the exceptions were never cleared.
708 */
709 int
710 fputrap_x87(void)
711 {
712 struct savefpu *pcb_save;
713 u_short control, status;
714
715 critical_enter();
716
717 /*
718 * Interrupt handling (for another interrupt) may have pushed the
719 * state to memory. Fetch the relevant parts of the state from
720 * wherever they are.
721 */
722 if (PCPU_GET(fpcurthread) != curthread) {
723 pcb_save = curpcb->pcb_save;
724 control = pcb_save->sv_env.en_cw;
725 status = pcb_save->sv_env.en_sw;
726 } else {
727 fnstcw(&control);
728 fnstsw(&status);
729 }
730
731 critical_exit();
732 return (fpetable[status & ((~control & 0x3f) | 0x40)]);
733 }
734
735 int
736 fputrap_sse(void)
737 {
738 u_int mxcsr;
739
740 critical_enter();
741 if (PCPU_GET(fpcurthread) != curthread)
742 mxcsr = curpcb->pcb_save->sv_env.en_mxcsr;
743 else
744 stmxcsr(&mxcsr);
745 critical_exit();
746 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]);
747 }
748
749 static void
750 restore_fpu_curthread(struct thread *td)
751 {
752 struct pcb *pcb;
753
754 /*
755 * Record new context early in case frstor causes a trap.
756 */
757 PCPU_SET(fpcurthread, td);
758
759 stop_emulating();
760 fpu_clean_state();
761 pcb = td->td_pcb;
762
763 if ((pcb->pcb_flags & PCB_FPUINITDONE) == 0) {
764 /*
765 * This is the first time this thread has used the FPU or
766 * the PCB doesn't contain a clean FPU state. Explicitly
767 * load an initial state.
768 *
769 * We prefer to restore the state from the actual save
770 * area in PCB instead of directly loading from
771 * fpu_initialstate, to ignite the XSAVEOPT
772 * tracking engine.
773 */
774 bcopy(fpu_initialstate, pcb->pcb_save,
775 cpu_max_ext_state_size);
776 fpurestore(pcb->pcb_save);
777 if (pcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
778 fldcw(pcb->pcb_initial_fpucw);
779 if (PCB_USER_FPU(pcb))
780 set_pcb_flags(pcb, PCB_FPUINITDONE |
781 PCB_USERFPUINITDONE);
782 else
783 set_pcb_flags(pcb, PCB_FPUINITDONE);
784 } else
785 fpurestore(pcb->pcb_save);
786 }
787
788 /*
789 * Device Not Available (DNA, #NM) exception handler.
790 *
791 * It would be better to switch FP context here (if curthread !=
792 * fpcurthread) and not necessarily for every context switch, but it
793 * is too hard to access foreign pcb's.
794 */
795 void
796 fpudna(void)
797 {
798 struct thread *td;
799
800 td = curthread;
801 /*
802 * This handler is entered with interrupts enabled, so context
803 * switches may occur before critical_enter() is executed. If
804 * a context switch occurs, then when we regain control, our
805 * state will have been completely restored. The CPU may
806 * change underneath us, but the only part of our context that
807 * lives in the CPU is CR0.TS and that will be "restored" by
808 * setting it on the new CPU.
809 */
810 critical_enter();
811
812 KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
813 ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
814 if (__predict_false(PCPU_GET(fpcurthread) == td)) {
815 /*
816 * Some virtual machines seems to set %cr0.TS at
817 * arbitrary moments. Silently clear the TS bit
818 * regardless of the eager/lazy FPU context switch
819 * mode.
820 */
821 stop_emulating();
822 } else {
823 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) {
824 panic(
825 "fpudna: fpcurthread = %p (%d), curthread = %p (%d)\n",
826 PCPU_GET(fpcurthread),
827 PCPU_GET(fpcurthread)->td_tid, td, td->td_tid);
828 }
829 restore_fpu_curthread(td);
830 }
831 critical_exit();
832 }
833
834 void fpu_activate_sw(struct thread *td); /* Called from the context switch */
835 void
836 fpu_activate_sw(struct thread *td)
837 {
838
839 if ((td->td_pflags & TDP_KTHREAD) != 0 || !PCB_USER_FPU(td->td_pcb)) {
840 PCPU_SET(fpcurthread, NULL);
841 start_emulating();
842 } else if (PCPU_GET(fpcurthread) != td) {
843 restore_fpu_curthread(td);
844 }
845 }
846
847 void
848 fpudrop(void)
849 {
850 struct thread *td;
851
852 td = PCPU_GET(fpcurthread);
853 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
854 CRITICAL_ASSERT(td);
855 PCPU_SET(fpcurthread, NULL);
856 clear_pcb_flags(td->td_pcb, PCB_FPUINITDONE);
857 start_emulating();
858 }
859
860 /*
861 * Get the user state of the FPU into pcb->pcb_user_save without
862 * dropping ownership (if possible). It returns the FPU ownership
863 * status.
864 */
865 int
866 fpugetregs(struct thread *td)
867 {
868 struct pcb *pcb;
869 uint64_t *xstate_bv, bit;
870 char *sa;
871 int max_ext_n, i, owned;
872
873 pcb = td->td_pcb;
874 critical_enter();
875 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) == 0) {
876 bcopy(fpu_initialstate, get_pcb_user_save_pcb(pcb),
877 cpu_max_ext_state_size);
878 get_pcb_user_save_pcb(pcb)->sv_env.en_cw =
879 pcb->pcb_initial_fpucw;
880 fpuuserinited(td);
881 critical_exit();
882 return (_MC_FPOWNED_PCB);
883 }
884 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
885 fpusave(get_pcb_user_save_pcb(pcb));
886 owned = _MC_FPOWNED_FPU;
887 } else {
888 owned = _MC_FPOWNED_PCB;
889 }
890 if (use_xsave) {
891 /*
892 * Handle partially saved state.
893 */
894 sa = (char *)get_pcb_user_save_pcb(pcb);
895 xstate_bv = (uint64_t *)(sa + sizeof(struct savefpu) +
896 offsetof(struct xstate_hdr, xstate_bv));
897 max_ext_n = flsl(xsave_mask);
898 for (i = 0; i < max_ext_n; i++) {
899 bit = 1ULL << i;
900 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0)
901 continue;
902 bcopy((char *)fpu_initialstate +
903 xsave_area_desc[i].offset,
904 sa + xsave_area_desc[i].offset,
905 xsave_area_desc[i].size);
906 *xstate_bv |= bit;
907 }
908 }
909 critical_exit();
910 return (owned);
911 }
912
913 void
914 fpuuserinited(struct thread *td)
915 {
916 struct pcb *pcb;
917
918 CRITICAL_ASSERT(td);
919 pcb = td->td_pcb;
920 if (PCB_USER_FPU(pcb))
921 set_pcb_flags(pcb,
922 PCB_FPUINITDONE | PCB_USERFPUINITDONE);
923 else
924 set_pcb_flags(pcb, PCB_FPUINITDONE);
925 }
926
927 int
928 fpusetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size)
929 {
930 struct xstate_hdr *hdr, *ehdr;
931 size_t len, max_len;
932 uint64_t bv;
933
934 /* XXXKIB should we clear all extended state in xstate_bv instead ? */
935 if (xfpustate == NULL)
936 return (0);
937 if (!use_xsave)
938 return (EOPNOTSUPP);
939
940 len = xfpustate_size;
941 if (len < sizeof(struct xstate_hdr))
942 return (EINVAL);
943 max_len = cpu_max_ext_state_size - sizeof(struct savefpu);
944 if (len > max_len)
945 return (EINVAL);
946
947 ehdr = (struct xstate_hdr *)xfpustate;
948 bv = ehdr->xstate_bv;
949
950 /*
951 * Avoid #gp.
952 */
953 if (bv & ~xsave_mask)
954 return (EINVAL);
955
956 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1);
957
958 hdr->xstate_bv = bv;
959 bcopy(xfpustate + sizeof(struct xstate_hdr),
960 (char *)(hdr + 1), len - sizeof(struct xstate_hdr));
961
962 return (0);
963 }
964
965 /*
966 * Set the state of the FPU.
967 */
968 int
969 fpusetregs(struct thread *td, struct savefpu *addr, char *xfpustate,
970 size_t xfpustate_size)
971 {
972 struct pcb *pcb;
973 int error;
974
975 addr->sv_env.en_mxcsr &= cpu_mxcsr_mask;
976 pcb = td->td_pcb;
977 error = 0;
978 critical_enter();
979 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
980 error = fpusetxstate(td, xfpustate, xfpustate_size);
981 if (error == 0) {
982 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
983 fpurestore(get_pcb_user_save_td(td));
984 set_pcb_flags(pcb, PCB_FPUINITDONE |
985 PCB_USERFPUINITDONE);
986 }
987 } else {
988 error = fpusetxstate(td, xfpustate, xfpustate_size);
989 if (error == 0) {
990 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr));
991 fpuuserinited(td);
992 }
993 }
994 critical_exit();
995 return (error);
996 }
997
998 /*
999 * On AuthenticAMD processors, the fxrstor instruction does not restore
1000 * the x87's stored last instruction pointer, last data pointer, and last
1001 * opcode values, except in the rare case in which the exception summary
1002 * (ES) bit in the x87 status word is set to 1.
1003 *
1004 * In order to avoid leaking this information across processes, we clean
1005 * these values by performing a dummy load before executing fxrstor().
1006 */
1007 static void
1008 fpu_clean_state(void)
1009 {
1010 static float dummy_variable = 0.0;
1011 u_short status;
1012
1013 /*
1014 * Clear the ES bit in the x87 status word if it is currently
1015 * set, in order to avoid causing a fault in the upcoming load.
1016 */
1017 fnstsw(&status);
1018 if (status & 0x80)
1019 fnclex();
1020
1021 /*
1022 * Load the dummy variable into the x87 stack. This mangles
1023 * the x87 stack, but we don't care since we're about to call
1024 * fxrstor() anyway.
1025 */
1026 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable));
1027 }
1028
1029 /*
1030 * This really sucks. We want the acpi version only, but it requires
1031 * the isa_if.h file in order to get the definitions.
1032 */
1033 #include "opt_isa.h"
1034 #ifdef DEV_ISA
1035 #include <isa/isavar.h>
1036 /*
1037 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
1038 */
1039 static struct isa_pnp_id fpupnp_ids[] = {
1040 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
1041 { 0 }
1042 };
1043
1044 static int
1045 fpupnp_probe(device_t dev)
1046 {
1047 int result;
1048
1049 result = ISA_PNP_PROBE(device_get_parent(dev), dev, fpupnp_ids);
1050 if (result <= 0)
1051 device_quiet(dev);
1052 return (result);
1053 }
1054
1055 static int
1056 fpupnp_attach(device_t dev)
1057 {
1058
1059 return (0);
1060 }
1061
1062 static device_method_t fpupnp_methods[] = {
1063 /* Device interface */
1064 DEVMETHOD(device_probe, fpupnp_probe),
1065 DEVMETHOD(device_attach, fpupnp_attach),
1066 DEVMETHOD(device_detach, bus_generic_detach),
1067 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1068 DEVMETHOD(device_suspend, bus_generic_suspend),
1069 DEVMETHOD(device_resume, bus_generic_resume),
1070 { 0, 0 }
1071 };
1072
1073 static driver_t fpupnp_driver = {
1074 "fpupnp",
1075 fpupnp_methods,
1076 1, /* no softc */
1077 };
1078
1079 DRIVER_MODULE(fpupnp, acpi, fpupnp_driver, 0, 0);
1080 ISA_PNP_INFO(fpupnp_ids);
1081 #endif /* DEV_ISA */
1082
1083 static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx",
1084 "Kernel contexts for FPU state");
1085
1086 #define FPU_KERN_CTX_FPUINITDONE 0x01
1087 #define FPU_KERN_CTX_DUMMY 0x02 /* avoided save for the kern thread */
1088 #define FPU_KERN_CTX_INUSE 0x04
1089
1090 struct fpu_kern_ctx {
1091 struct savefpu *prev;
1092 uint32_t flags;
1093 char hwstate1[];
1094 };
1095
1096 static inline size_t __pure2
1097 fpu_kern_alloc_sz(u_int max_est)
1098 {
1099 return (sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + max_est);
1100 }
1101
1102 static inline int __pure2
1103 fpu_kern_malloc_flags(u_int fpflags)
1104 {
1105 return (((fpflags & FPU_KERN_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO);
1106 }
1107
1108 struct fpu_kern_ctx *
1109 fpu_kern_alloc_ctx_domain(int domain, u_int flags)
1110 {
1111 return (malloc_domainset(fpu_kern_alloc_sz(cpu_max_ext_state_size),
1112 M_FPUKERN_CTX, DOMAINSET_PREF(domain),
1113 fpu_kern_malloc_flags(flags)));
1114 }
1115
1116 struct fpu_kern_ctx *
1117 fpu_kern_alloc_ctx(u_int flags)
1118 {
1119 return (malloc(fpu_kern_alloc_sz(cpu_max_ext_state_size),
1120 M_FPUKERN_CTX, fpu_kern_malloc_flags(flags)));
1121 }
1122
1123 void
1124 fpu_kern_free_ctx(struct fpu_kern_ctx *ctx)
1125 {
1126
1127 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx"));
1128 /* XXXKIB clear the memory ? */
1129 free(ctx, M_FPUKERN_CTX);
1130 }
1131
1132 static struct savefpu *
1133 fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx)
1134 {
1135 vm_offset_t p;
1136
1137 p = (vm_offset_t)&ctx->hwstate1;
1138 p = roundup2(p, XSAVE_AREA_ALIGN);
1139 return ((struct savefpu *)p);
1140 }
1141
1142 void
1143 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1144 {
1145 struct pcb *pcb;
1146
1147 pcb = td->td_pcb;
1148 KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
1149 ("ctx is required when !FPU_KERN_NOCTX"));
1150 KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0,
1151 ("using inuse ctx"));
1152 KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
1153 ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
1154
1155 if ((flags & FPU_KERN_NOCTX) != 0) {
1156 critical_enter();
1157 stop_emulating();
1158 if (curthread == PCPU_GET(fpcurthread)) {
1159 fpusave(curpcb->pcb_save);
1160 PCPU_SET(fpcurthread, NULL);
1161 } else {
1162 KASSERT(PCPU_GET(fpcurthread) == NULL,
1163 ("invalid fpcurthread"));
1164 }
1165
1166 /*
1167 * This breaks XSAVEOPT tracker, but
1168 * PCB_FPUNOSAVE state is supposed to never need to
1169 * save FPU context at all.
1170 */
1171 fpurestore(fpu_initialstate);
1172 set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
1173 PCB_FPUINITDONE);
1174 return;
1175 }
1176 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
1177 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE;
1178 return;
1179 }
1180 critical_enter();
1181 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
1182 get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
1183 ctx->flags = FPU_KERN_CTX_INUSE;
1184 if ((pcb->pcb_flags & PCB_FPUINITDONE) != 0)
1185 ctx->flags |= FPU_KERN_CTX_FPUINITDONE;
1186 fpuexit(td);
1187 ctx->prev = pcb->pcb_save;
1188 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx);
1189 set_pcb_flags(pcb, PCB_KERNFPU);
1190 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1191 critical_exit();
1192 }
1193
1194 int
1195 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1196 {
1197 struct pcb *pcb;
1198
1199 pcb = td->td_pcb;
1200
1201 if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
1202 KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1203 KASSERT(PCPU_GET(fpcurthread) == NULL,
1204 ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
1205 CRITICAL_ASSERT(td);
1206
1207 clear_pcb_flags(pcb, PCB_FPUNOSAVE | PCB_FPUINITDONE);
1208 start_emulating();
1209 } else {
1210 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0,
1211 ("leaving not inuse ctx"));
1212 ctx->flags &= ~FPU_KERN_CTX_INUSE;
1213
1214 if (is_fpu_kern_thread(0) &&
1215 (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1216 return (0);
1217 KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0,
1218 ("dummy ctx"));
1219 critical_enter();
1220 if (curthread == PCPU_GET(fpcurthread))
1221 fpudrop();
1222 pcb->pcb_save = ctx->prev;
1223 }
1224
1225 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1226 if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1227 set_pcb_flags(pcb, PCB_FPUINITDONE);
1228 if ((pcb->pcb_flags & PCB_KERNFPU_THR) == 0)
1229 clear_pcb_flags(pcb, PCB_KERNFPU);
1230 } else if ((pcb->pcb_flags & PCB_KERNFPU_THR) == 0)
1231 clear_pcb_flags(pcb, PCB_FPUINITDONE | PCB_KERNFPU);
1232 } else {
1233 if ((ctx->flags & FPU_KERN_CTX_FPUINITDONE) != 0)
1234 set_pcb_flags(pcb, PCB_FPUINITDONE);
1235 else
1236 clear_pcb_flags(pcb, PCB_FPUINITDONE);
1237 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1238 }
1239 critical_exit();
1240 return (0);
1241 }
1242
1243 int
1244 fpu_kern_thread(u_int flags)
1245 {
1246
1247 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1248 ("Only kthread may use fpu_kern_thread"));
1249 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb),
1250 ("mangled pcb_save"));
1251 KASSERT(PCB_USER_FPU(curpcb), ("recursive call"));
1252
1253 set_pcb_flags(curpcb, PCB_KERNFPU | PCB_KERNFPU_THR);
1254 return (0);
1255 }
1256
1257 int
1258 is_fpu_kern_thread(u_int flags)
1259 {
1260
1261 if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1262 return (0);
1263 return ((curpcb->pcb_flags & PCB_KERNFPU_THR) != 0);
1264 }
1265
1266 /*
1267 * FPU save area alloc/free/init utility routines
1268 */
1269 struct savefpu *
1270 fpu_save_area_alloc(void)
1271 {
1272
1273 return (uma_zalloc(fpu_save_area_zone, M_WAITOK));
1274 }
1275
1276 void
1277 fpu_save_area_free(struct savefpu *fsa)
1278 {
1279
1280 uma_zfree(fpu_save_area_zone, fsa);
1281 }
1282
1283 void
1284 fpu_save_area_reset(struct savefpu *fsa)
1285 {
1286
1287 bcopy(fpu_initialstate, fsa, cpu_max_ext_state_size);
1288 }
Cache object: 8ae5cefd6f2503855f99a28015a46708
|