1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (C) 2001 Benno Rice
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 #include "opt_fpu_emu.h"
63
64 #include <sys/param.h>
65 #include <sys/proc.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/bus.h>
70 #include <sys/cons.h>
71 #include <sys/cpu.h>
72 #include <sys/exec.h>
73 #include <sys/imgact.h>
74 #include <sys/kernel.h>
75 #include <sys/ktr.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/mutex.h>
79 #include <sys/reg.h>
80 #include <sys/signalvar.h>
81 #include <sys/syscallsubr.h>
82 #include <sys/syscall.h>
83 #include <sys/sysent.h>
84 #include <sys/sysproto.h>
85 #include <sys/ucontext.h>
86 #include <sys/uio.h>
87
88 #include <machine/altivec.h>
89 #include <machine/cpu.h>
90 #include <machine/elf.h>
91 #include <machine/fpu.h>
92 #include <machine/pcb.h>
93 #include <machine/sigframe.h>
94 #include <machine/trap.h>
95 #include <machine/vmparam.h>
96
97 #include <vm/vm.h>
98 #include <vm/vm_param.h>
99 #include <vm/pmap.h>
100 #include <vm/vm_map.h>
101
102 #ifdef FPU_EMU
103 #include <powerpc/fpu/fpu_extern.h>
104 #endif
105
106 #ifdef COMPAT_FREEBSD32
107 #include <compat/freebsd32/freebsd32_signal.h>
108 #include <compat/freebsd32/freebsd32_util.h>
109 #include <compat/freebsd32/freebsd32_proto.h>
110
111 typedef struct __ucontext32 {
112 sigset_t uc_sigmask;
113 mcontext32_t uc_mcontext;
114 uint32_t uc_link;
115 struct sigaltstack32 uc_stack;
116 uint32_t uc_flags;
117 uint32_t __spare__[4];
118 } ucontext32_t;
119
120 struct sigframe32 {
121 ucontext32_t sf_uc;
122 struct siginfo32 sf_si;
123 };
124
125 static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
126 #endif
127
128 static int grab_mcontext(struct thread *, mcontext_t *, int);
129
130 static void cleanup_power_extras(struct thread *);
131
132 #ifdef __powerpc64__
133 extern struct sysentvec elf64_freebsd_sysvec_v2;
134 #endif
135
136 #ifdef __powerpc64__
137 _Static_assert(sizeof(mcontext_t) == 1392, "mcontext_t size incorrect");
138 _Static_assert(sizeof(ucontext_t) == 1472, "ucontext_t size incorrect");
139 _Static_assert(sizeof(siginfo_t) == 80, "siginfo_t size incorrect");
140 #ifdef COMPAT_FREEBSD32
141 _Static_assert(sizeof(mcontext32_t) == 1224, "mcontext32_t size incorrect");
142 _Static_assert(sizeof(ucontext32_t) == 1280, "ucontext32_t size incorrect");
143 _Static_assert(sizeof(struct siginfo32) == 64, "struct siginfo32 size incorrect");
144 #endif /* COMPAT_FREEBSD32 */
145 #else /* powerpc */
146 _Static_assert(sizeof(mcontext_t) == 1224, "mcontext_t size incorrect");
147 _Static_assert(sizeof(ucontext_t) == 1280, "ucontext_t size incorrect");
148 _Static_assert(sizeof(siginfo_t) == 64, "siginfo_t size incorrect");
149 #endif
150
151 void
152 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
153 {
154 struct trapframe *tf;
155 struct sigacts *psp;
156 struct sigframe sf;
157 struct thread *td;
158 struct proc *p;
159 #ifdef COMPAT_FREEBSD32
160 struct siginfo32 siginfo32;
161 struct sigframe32 sf32;
162 #endif
163 size_t sfpsize;
164 caddr_t sfp, usfp;
165 register_t sp;
166 int oonstack, rndfsize;
167 int sig;
168 int code;
169
170 td = curthread;
171 p = td->td_proc;
172 PROC_LOCK_ASSERT(p, MA_OWNED);
173
174 psp = p->p_sigacts;
175 mtx_assert(&psp->ps_mtx, MA_OWNED);
176 tf = td->td_frame;
177
178 /*
179 * Fill siginfo structure.
180 */
181 ksi->ksi_info.si_signo = ksi->ksi_signo;
182 ksi->ksi_info.si_addr =
183 (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ?
184 tf->dar : tf->srr0);
185
186 #ifdef COMPAT_FREEBSD32
187 if (SV_PROC_FLAG(p, SV_ILP32)) {
188 siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
189 sig = siginfo32.si_signo;
190 code = siginfo32.si_code;
191 sfp = (caddr_t)&sf32;
192 sfpsize = sizeof(sf32);
193 rndfsize = roundup(sizeof(sf32), 16);
194 sp = (uint32_t)tf->fixreg[1];
195 oonstack = sigonstack(sp);
196
197 /*
198 * Save user context
199 */
200
201 memset(&sf32, 0, sizeof(sf32));
202 grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
203
204 sf32.sf_uc.uc_sigmask = *mask;
205 sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
206 sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
207 sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
208 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
209
210 sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
211 } else {
212 #endif
213 sig = ksi->ksi_signo;
214 code = ksi->ksi_code;
215 sfp = (caddr_t)&sf;
216 sfpsize = sizeof(sf);
217 #ifdef __powerpc64__
218 /*
219 * 64-bit PPC defines a 288 byte scratch region
220 * below the stack.
221 */
222 rndfsize = 288 + roundup(sizeof(sf), 48);
223 #else
224 rndfsize = roundup(sizeof(sf), 16);
225 #endif
226 sp = tf->fixreg[1];
227 oonstack = sigonstack(sp);
228
229 /*
230 * Save user context
231 */
232
233 memset(&sf, 0, sizeof(sf));
234 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
235
236 sf.sf_uc.uc_sigmask = *mask;
237 sf.sf_uc.uc_stack = td->td_sigstk;
238 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
239 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
240
241 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
242 #ifdef COMPAT_FREEBSD32
243 }
244 #endif
245
246 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
247 catcher, sig);
248
249 /*
250 * Allocate and validate space for the signal handler context.
251 */
252 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
253 SIGISMEMBER(psp->ps_sigonstack, sig)) {
254 usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp +
255 td->td_sigstk.ss_size - rndfsize) & ~0xFul);
256 } else {
257 usfp = (void *)((sp - rndfsize) & ~0xFul);
258 }
259
260 /*
261 * Set Floating Point facility to "Ignore Exceptions Mode" so signal
262 * handler can run.
263 */
264 if (td->td_pcb->pcb_flags & PCB_FPU)
265 tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1);
266
267 /*
268 * Set up the registers to return to sigcode.
269 *
270 * r1/sp - sigframe ptr
271 * lr - sig function, dispatched to by blrl in trampoline
272 * r3 - sig number
273 * r4 - SIGINFO ? &siginfo : exception code
274 * r5 - user context
275 * srr0 - trampoline function addr
276 */
277 tf->lr = (register_t)catcher;
278 tf->fixreg[1] = (register_t)usfp;
279 tf->fixreg[FIRSTARG] = sig;
280 #ifdef COMPAT_FREEBSD32
281 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
282 ((SV_PROC_FLAG(p, SV_ILP32)) ?
283 offsetof(struct sigframe32, sf_uc) :
284 offsetof(struct sigframe, sf_uc));
285 #else
286 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
287 offsetof(struct sigframe, sf_uc);
288 #endif
289 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
290 /*
291 * Signal handler installed with SA_SIGINFO.
292 */
293 #ifdef COMPAT_FREEBSD32
294 if (SV_PROC_FLAG(p, SV_ILP32)) {
295 sf32.sf_si = siginfo32;
296 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
297 offsetof(struct sigframe32, sf_si);
298 sf32.sf_si = siginfo32;
299 } else {
300 #endif
301 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
302 offsetof(struct sigframe, sf_si);
303 sf.sf_si = ksi->ksi_info;
304 #ifdef COMPAT_FREEBSD32
305 }
306 #endif
307 } else {
308 /* Old FreeBSD-style arguments. */
309 tf->fixreg[FIRSTARG+1] = code;
310 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
311 tf->dar : tf->srr0;
312 }
313 mtx_unlock(&psp->ps_mtx);
314 PROC_UNLOCK(p);
315
316 tf->srr0 = (register_t)PROC_SIGCODE(p);
317
318 /*
319 * copy the frame out to userland.
320 */
321 if (copyout(sfp, usfp, sfpsize) != 0) {
322 /*
323 * Process has trashed its stack. Kill it.
324 */
325 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
326 PROC_LOCK(p);
327 sigexit(td, SIGILL);
328 }
329
330 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
331 tf->srr0, tf->fixreg[1]);
332
333 PROC_LOCK(p);
334 mtx_lock(&psp->ps_mtx);
335 }
336
337 int
338 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
339 {
340 ucontext_t uc;
341 int error;
342
343 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
344
345 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
346 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
347 return (EFAULT);
348 }
349
350 error = set_mcontext(td, &uc.uc_mcontext);
351 if (error != 0)
352 return (error);
353
354 /*
355 * Save FPU state if needed. User may have changed it on
356 * signal handler
357 */
358 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
359 save_fpu(td);
360
361 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
362
363 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
364 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
365
366 return (EJUSTRETURN);
367 }
368
369 #ifdef COMPAT_FREEBSD4
370 int
371 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
372 {
373
374 return sys_sigreturn(td, (struct sigreturn_args *)uap);
375 }
376 #endif
377
378 /*
379 * Construct a PCB from a trapframe. This is called from kdb_trap() where
380 * we want to start a backtrace from the function that caused us to enter
381 * the debugger. We have the context in the trapframe, but base the trace
382 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
383 * enough for a backtrace.
384 */
385 void
386 makectx(struct trapframe *tf, struct pcb *pcb)
387 {
388
389 pcb->pcb_lr = tf->srr0;
390 pcb->pcb_sp = tf->fixreg[1];
391 }
392
393 /*
394 * get_mcontext/sendsig helper routine that doesn't touch the
395 * proc lock
396 */
397 static int
398 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
399 {
400 struct pcb *pcb;
401 int i;
402
403 pcb = td->td_pcb;
404
405 memset(mcp, 0, sizeof(mcontext_t));
406
407 mcp->mc_vers = _MC_VERSION;
408 mcp->mc_flags = 0;
409 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
410 if (flags & GET_MC_CLEAR_RET) {
411 mcp->mc_gpr[3] = 0;
412 mcp->mc_gpr[4] = 0;
413 }
414
415 /*
416 * This assumes that floating-point context is *not* lazy,
417 * so if the thread has used FP there would have been a
418 * FP-unavailable exception that would have set things up
419 * correctly.
420 */
421 if (pcb->pcb_flags & PCB_FPREGS) {
422 if (pcb->pcb_flags & PCB_FPU) {
423 KASSERT(td == curthread,
424 ("get_mcontext: fp save not curthread"));
425 critical_enter();
426 save_fpu(td);
427 critical_exit();
428 }
429 mcp->mc_flags |= _MC_FP_VALID;
430 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
431 for (i = 0; i < 32; i++)
432 memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
433 sizeof(double));
434 }
435
436 if (pcb->pcb_flags & PCB_VSX) {
437 for (i = 0; i < 32; i++)
438 memcpy(&mcp->mc_vsxfpreg[i],
439 &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
440 }
441
442 /*
443 * Repeat for Altivec context
444 */
445
446 if (pcb->pcb_flags & PCB_VEC) {
447 KASSERT(td == curthread,
448 ("get_mcontext: fp save not curthread"));
449 critical_enter();
450 save_vec(td);
451 critical_exit();
452 mcp->mc_flags |= _MC_AV_VALID;
453 mcp->mc_vscr = pcb->pcb_vec.vscr;
454 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
455 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
456 }
457
458 mcp->mc_len = sizeof(*mcp);
459
460 return (0);
461 }
462
463 int
464 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
465 {
466 int error;
467
468 error = grab_mcontext(td, mcp, flags);
469 if (error == 0) {
470 PROC_LOCK(curthread->td_proc);
471 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
472 PROC_UNLOCK(curthread->td_proc);
473 }
474
475 return (error);
476 }
477
478 int
479 set_mcontext(struct thread *td, mcontext_t *mcp)
480 {
481 struct pcb *pcb;
482 struct trapframe *tf;
483 register_t tls;
484 int i;
485
486 pcb = td->td_pcb;
487 tf = td->td_frame;
488
489 if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp))
490 return (EINVAL);
491
492 /*
493 * Don't let the user change privileged MSR bits.
494 *
495 * psl_userstatic is used here to mask off any bits that can
496 * legitimately vary between user contexts (Floating point
497 * exception control and any facilities that we are using the
498 * "enable on first use" pattern with.)
499 *
500 * All other bits are required to match psl_userset(32).
501 *
502 * Remember to update the platform cpu_init code when implementing
503 * support for a new conditional facility!
504 */
505 if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) {
506 return (EINVAL);
507 }
508
509 /* Copy trapframe, preserving TLS pointer across context change */
510 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
511 tls = tf->fixreg[13];
512 else
513 tls = tf->fixreg[2];
514 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
515 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
516 tf->fixreg[13] = tls;
517 else
518 tf->fixreg[2] = tls;
519
520 /*
521 * Force the FPU back off to ensure the new context will not bypass
522 * the enable_fpu() setup code accidentally.
523 *
524 * This prevents an issue where a process that uses floating point
525 * inside a signal handler could end up in a state where the MSR
526 * did not match pcb_flags.
527 *
528 * Additionally, ensure VSX is disabled as well, as it is illegal
529 * to leave it turned on when FP or VEC are off.
530 */
531 tf->srr1 &= ~(PSL_FP | PSL_VSX);
532 pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX);
533
534 if (mcp->mc_flags & _MC_FP_VALID) {
535 /* enable_fpu() will happen lazily on a fault */
536 pcb->pcb_flags |= PCB_FPREGS;
537 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
538 bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr));
539 for (i = 0; i < 32; i++) {
540 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
541 sizeof(double));
542 memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
543 &mcp->mc_vsxfpreg[i], sizeof(double));
544 }
545 }
546
547 if (mcp->mc_flags & _MC_AV_VALID) {
548 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
549 critical_enter();
550 enable_vec(td);
551 critical_exit();
552 }
553 pcb->pcb_vec.vscr = mcp->mc_vscr;
554 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
555 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
556 } else {
557 tf->srr1 &= ~PSL_VEC;
558 pcb->pcb_flags &= ~PCB_VEC;
559 }
560
561 return (0);
562 }
563
564 /*
565 * Clean up extra POWER state. Some per-process registers and states are not
566 * managed by the MSR, so must be cleaned up explicitly on thread exit.
567 *
568 * Currently this includes:
569 * DSCR -- Data stream control register (PowerISA 2.06+)
570 * FSCR -- Facility Status and Control Register (PowerISA 2.07+)
571 */
572 static void
573 cleanup_power_extras(struct thread *td)
574 {
575 uint32_t pcb_flags;
576
577 if (td != curthread)
578 return;
579
580 pcb_flags = td->td_pcb->pcb_flags;
581 /* Clean up registers not managed by MSR. */
582 if (pcb_flags & PCB_CFSCR)
583 mtspr(SPR_FSCR, 0);
584 if (pcb_flags & PCB_CDSCR)
585 mtspr(SPR_DSCRP, 0);
586
587 if (pcb_flags & PCB_FPU)
588 cleanup_fpscr();
589 }
590
591 /*
592 * Ensure the PCB has been updated in preparation for copying a thread.
593 *
594 * This is needed because normally this only happens during switching tasks,
595 * but when we are cloning a thread, we need the updated state before doing
596 * the actual copy, so the new thread inherits the current state instead of
597 * the state at the last task switch.
598 *
599 * Keep this in sync with the assembly code in cpu_switch()!
600 */
601 void
602 cpu_save_thread_regs(struct thread *td)
603 {
604 uint32_t pcb_flags;
605 struct pcb *pcb;
606
607 KASSERT(td == curthread,
608 ("cpu_save_thread_regs: td is not curthread"));
609
610 pcb = td->td_pcb;
611
612 pcb_flags = pcb->pcb_flags;
613
614 #if defined(__powerpc64__)
615 /* Are *any* FSCR flags in use? */
616 if (pcb_flags & PCB_CFSCR) {
617 pcb->pcb_fscr = mfspr(SPR_FSCR);
618
619 if (pcb->pcb_fscr & FSCR_EBB) {
620 pcb->pcb_ebb.ebbhr = mfspr(SPR_EBBHR);
621 pcb->pcb_ebb.ebbrr = mfspr(SPR_EBBRR);
622 pcb->pcb_ebb.bescr = mfspr(SPR_BESCR);
623 }
624 if (pcb->pcb_fscr & FSCR_LM) {
625 pcb->pcb_lm.lmrr = mfspr(SPR_LMRR);
626 pcb->pcb_lm.lmser = mfspr(SPR_LMSER);
627 }
628 if (pcb->pcb_fscr & FSCR_TAR)
629 pcb->pcb_tar = mfspr(SPR_TAR);
630 }
631
632 /*
633 * This is outside of the PCB_CFSCR check because it can be set
634 * independently when running on POWER7/POWER8.
635 */
636 if (pcb_flags & PCB_CDSCR)
637 pcb->pcb_dscr = mfspr(SPR_DSCRP);
638 #endif
639
640 #if defined(__SPE__)
641 /*
642 * On E500v2, single-precision scalar instructions and access to
643 * SPEFSCR may be used without PSL_VEC turned on, as long as they
644 * limit themselves to the low word of the registers.
645 *
646 * As such, we need to unconditionally save SPEFSCR, even though
647 * it is also updated in save_vec_nodrop().
648 */
649 pcb->pcb_vec.vscr = mfspr(SPR_SPEFSCR);
650 #endif
651
652 if (pcb_flags & PCB_FPU)
653 save_fpu_nodrop(td);
654
655 if (pcb_flags & PCB_VEC)
656 save_vec_nodrop(td);
657 }
658
659 /*
660 * Set set up registers on exec.
661 */
662 void
663 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
664 {
665 struct trapframe *tf;
666 register_t argc;
667
668 tf = trapframe(td);
669 bzero(tf, sizeof *tf);
670 #ifdef __powerpc64__
671 tf->fixreg[1] = -roundup(-stack + 48, 16);
672 #else
673 tf->fixreg[1] = -roundup(-stack + 8, 16);
674 #endif
675
676 /*
677 * Set up arguments for _start():
678 * _start(argc, argv, envp, obj, cleanup, ps_strings);
679 *
680 * Notes:
681 * - obj and cleanup are the auxilliary and termination
682 * vectors. They are fixed up by ld.elf_so.
683 * - ps_strings is a NetBSD extention, and will be
684 * ignored by executables which are strictly
685 * compliant with the SVR4 ABI.
686 */
687
688 /* Collect argc from the user stack */
689 argc = fuword((void *)stack);
690
691 tf->fixreg[3] = argc;
692 tf->fixreg[4] = stack + sizeof(register_t);
693 tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t);
694 tf->fixreg[6] = 0; /* auxiliary vector */
695 tf->fixreg[7] = 0; /* termination vector */
696 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
697
698 tf->srr0 = imgp->entry_addr;
699 #ifdef __powerpc64__
700 tf->fixreg[12] = imgp->entry_addr;
701 #endif
702 tf->srr1 = psl_userset | PSL_FE_DFLT;
703 cleanup_power_extras(td);
704 td->td_pcb->pcb_flags = 0;
705 }
706
707 #ifdef COMPAT_FREEBSD32
708 void
709 ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
710 {
711 struct trapframe *tf;
712 uint32_t argc;
713
714 tf = trapframe(td);
715 bzero(tf, sizeof *tf);
716 tf->fixreg[1] = -roundup(-stack + 8, 16);
717
718 argc = fuword32((void *)stack);
719
720 tf->fixreg[3] = argc;
721 tf->fixreg[4] = stack + sizeof(uint32_t);
722 tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
723 tf->fixreg[6] = 0; /* auxiliary vector */
724 tf->fixreg[7] = 0; /* termination vector */
725 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
726
727 tf->srr0 = imgp->entry_addr;
728 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
729 cleanup_power_extras(td);
730 td->td_pcb->pcb_flags = 0;
731 }
732 #endif
733
734 int
735 fill_regs(struct thread *td, struct reg *regs)
736 {
737 struct trapframe *tf;
738
739 tf = td->td_frame;
740 memcpy(regs, tf, sizeof(struct reg));
741
742 return (0);
743 }
744
745 int
746 fill_dbregs(struct thread *td, struct dbreg *dbregs)
747 {
748 /* No debug registers on PowerPC */
749 return (ENOSYS);
750 }
751
752 int
753 fill_fpregs(struct thread *td, struct fpreg *fpregs)
754 {
755 struct pcb *pcb;
756 int i;
757
758 pcb = td->td_pcb;
759
760 if ((pcb->pcb_flags & PCB_FPREGS) == 0)
761 memset(fpregs, 0, sizeof(struct fpreg));
762 else {
763 memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
764 for (i = 0; i < 32; i++)
765 memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
766 sizeof(double));
767 }
768
769 return (0);
770 }
771
772 int
773 set_regs(struct thread *td, struct reg *regs)
774 {
775 struct trapframe *tf;
776
777 tf = td->td_frame;
778 memcpy(tf, regs, sizeof(struct reg));
779
780 return (0);
781 }
782
783 int
784 set_dbregs(struct thread *td, struct dbreg *dbregs)
785 {
786 /* No debug registers on PowerPC */
787 return (ENOSYS);
788 }
789
790 int
791 set_fpregs(struct thread *td, struct fpreg *fpregs)
792 {
793 struct pcb *pcb;
794 int i;
795
796 pcb = td->td_pcb;
797 pcb->pcb_flags |= PCB_FPREGS;
798 memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double));
799 for (i = 0; i < 32; i++) {
800 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i],
801 sizeof(double));
802 }
803
804 return (0);
805 }
806
807 #ifdef COMPAT_FREEBSD32
808 int
809 set_regs32(struct thread *td, struct reg32 *regs)
810 {
811 struct trapframe *tf;
812 int i;
813
814 tf = td->td_frame;
815 for (i = 0; i < 32; i++)
816 tf->fixreg[i] = regs->fixreg[i];
817 tf->lr = regs->lr;
818 tf->cr = regs->cr;
819 tf->xer = regs->xer;
820 tf->ctr = regs->ctr;
821 tf->srr0 = regs->pc;
822
823 return (0);
824 }
825
826 int
827 fill_regs32(struct thread *td, struct reg32 *regs)
828 {
829 struct trapframe *tf;
830 int i;
831
832 tf = td->td_frame;
833 for (i = 0; i < 32; i++)
834 regs->fixreg[i] = tf->fixreg[i];
835 regs->lr = tf->lr;
836 regs->cr = tf->cr;
837 regs->xer = tf->xer;
838 regs->ctr = tf->ctr;
839 regs->pc = tf->srr0;
840
841 return (0);
842 }
843
844 static int
845 grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
846 {
847 mcontext_t mcp64;
848 int i, error;
849
850 error = grab_mcontext(td, &mcp64, flags);
851 if (error != 0)
852 return (error);
853
854 mcp->mc_vers = mcp64.mc_vers;
855 mcp->mc_flags = mcp64.mc_flags;
856 mcp->mc_onstack = mcp64.mc_onstack;
857 mcp->mc_len = mcp64.mc_len;
858 memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
859 memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
860 for (i = 0; i < 42; i++)
861 mcp->mc_frame[i] = mcp64.mc_frame[i];
862 memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
863 memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
864
865 return (0);
866 }
867
868 static int
869 get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
870 {
871 int error;
872
873 error = grab_mcontext32(td, mcp, flags);
874 if (error == 0) {
875 PROC_LOCK(curthread->td_proc);
876 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
877 PROC_UNLOCK(curthread->td_proc);
878 }
879
880 return (error);
881 }
882
883 static int
884 set_mcontext32(struct thread *td, mcontext32_t *mcp)
885 {
886 mcontext_t mcp64;
887 int i, error;
888
889 mcp64.mc_vers = mcp->mc_vers;
890 mcp64.mc_flags = mcp->mc_flags;
891 mcp64.mc_onstack = mcp->mc_onstack;
892 mcp64.mc_len = mcp->mc_len;
893 memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
894 memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
895 for (i = 0; i < 42; i++)
896 mcp64.mc_frame[i] = mcp->mc_frame[i];
897 mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL);
898 memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
899 memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
900
901 error = set_mcontext(td, &mcp64);
902
903 return (error);
904 }
905 #endif
906
907 #ifdef COMPAT_FREEBSD32
908 int
909 freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
910 {
911 ucontext32_t uc;
912 int error;
913
914 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
915
916 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
917 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
918 return (EFAULT);
919 }
920
921 error = set_mcontext32(td, &uc.uc_mcontext);
922 if (error != 0)
923 return (error);
924
925 /*
926 * Save FPU state if needed. User may have changed it on
927 * signal handler
928 */
929 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
930 save_fpu(td);
931
932 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
933
934 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
935 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
936
937 return (EJUSTRETURN);
938 }
939
940 /*
941 * The first two fields of a ucontext_t are the signal mask and the machine
942 * context. The next field is uc_link; we want to avoid destroying the link
943 * when copying out contexts.
944 */
945 #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
946
947 int
948 freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
949 {
950 ucontext32_t uc;
951 int ret;
952
953 if (uap->ucp == NULL)
954 ret = EINVAL;
955 else {
956 bzero(&uc, sizeof(uc));
957 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
958 PROC_LOCK(td->td_proc);
959 uc.uc_sigmask = td->td_sigmask;
960 PROC_UNLOCK(td->td_proc);
961 ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
962 }
963 return (ret);
964 }
965
966 int
967 freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
968 {
969 ucontext32_t uc;
970 int ret;
971
972 if (uap->ucp == NULL)
973 ret = EINVAL;
974 else {
975 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
976 if (ret == 0) {
977 ret = set_mcontext32(td, &uc.uc_mcontext);
978 if (ret == 0) {
979 kern_sigprocmask(td, SIG_SETMASK,
980 &uc.uc_sigmask, NULL, 0);
981 }
982 }
983 }
984 return (ret == 0 ? EJUSTRETURN : ret);
985 }
986
987 int
988 freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
989 {
990 ucontext32_t uc;
991 int ret;
992
993 if (uap->oucp == NULL || uap->ucp == NULL)
994 ret = EINVAL;
995 else {
996 bzero(&uc, sizeof(uc));
997 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
998 PROC_LOCK(td->td_proc);
999 uc.uc_sigmask = td->td_sigmask;
1000 PROC_UNLOCK(td->td_proc);
1001 ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
1002 if (ret == 0) {
1003 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
1004 if (ret == 0) {
1005 ret = set_mcontext32(td, &uc.uc_mcontext);
1006 if (ret == 0) {
1007 kern_sigprocmask(td, SIG_SETMASK,
1008 &uc.uc_sigmask, NULL, 0);
1009 }
1010 }
1011 }
1012 }
1013 return (ret == 0 ? EJUSTRETURN : ret);
1014 }
1015
1016 #endif
1017
1018 void
1019 cpu_set_syscall_retval(struct thread *td, int error)
1020 {
1021 struct proc *p;
1022 struct trapframe *tf;
1023 int fixup;
1024
1025 if (error == EJUSTRETURN)
1026 return;
1027
1028 p = td->td_proc;
1029 tf = td->td_frame;
1030
1031 if (tf->fixreg[0] == SYS___syscall &&
1032 (SV_PROC_FLAG(p, SV_ILP32))) {
1033 int code = tf->fixreg[FIRSTARG + 1];
1034 fixup = (
1035 #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek)
1036 code != SYS_freebsd6_lseek &&
1037 #endif
1038 code != SYS_lseek) ? 1 : 0;
1039 } else
1040 fixup = 0;
1041
1042 switch (error) {
1043 case 0:
1044 if (fixup) {
1045 /*
1046 * 64-bit return, 32-bit syscall. Fixup byte order
1047 */
1048 tf->fixreg[FIRSTARG] = 0;
1049 tf->fixreg[FIRSTARG + 1] = td->td_retval[0];
1050 } else {
1051 tf->fixreg[FIRSTARG] = td->td_retval[0];
1052 tf->fixreg[FIRSTARG + 1] = td->td_retval[1];
1053 }
1054 tf->cr &= ~0x10000000; /* Unset summary overflow */
1055 break;
1056 case ERESTART:
1057 /*
1058 * Set user's pc back to redo the system call.
1059 */
1060 tf->srr0 -= 4;
1061 break;
1062 default:
1063 tf->fixreg[FIRSTARG] = error;
1064 tf->cr |= 0x10000000; /* Set summary overflow */
1065 break;
1066 }
1067 }
1068
1069 /*
1070 * Threading functions
1071 */
1072 void
1073 cpu_thread_exit(struct thread *td)
1074 {
1075 cleanup_power_extras(td);
1076 }
1077
1078 void
1079 cpu_thread_clean(struct thread *td)
1080 {
1081 }
1082
1083 void
1084 cpu_thread_alloc(struct thread *td)
1085 {
1086 struct pcb *pcb;
1087
1088 pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
1089 sizeof(struct pcb)) & ~0x2fUL);
1090 td->td_pcb = pcb;
1091 td->td_frame = (struct trapframe *)pcb - 1;
1092 }
1093
1094 void
1095 cpu_thread_free(struct thread *td)
1096 {
1097 }
1098
1099 int
1100 cpu_set_user_tls(struct thread *td, void *tls_base)
1101 {
1102
1103 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
1104 td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
1105 else
1106 td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
1107 return (0);
1108 }
1109
1110 void
1111 cpu_copy_thread(struct thread *td, struct thread *td0)
1112 {
1113 struct pcb *pcb2;
1114 struct trapframe *tf;
1115 struct callframe *cf;
1116
1117 /* Ensure td0 pcb is up to date. */
1118 if (td0 == curthread)
1119 cpu_save_thread_regs(td0);
1120
1121 pcb2 = td->td_pcb;
1122
1123 /* Copy the upcall pcb */
1124 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
1125
1126 /* Create a stack for the new thread */
1127 tf = td->td_frame;
1128 bcopy(td0->td_frame, tf, sizeof(struct trapframe));
1129 tf->fixreg[FIRSTARG] = 0;
1130 tf->fixreg[FIRSTARG + 1] = 0;
1131 tf->cr &= ~0x10000000;
1132
1133 /* Set registers for trampoline to user mode. */
1134 cf = (struct callframe *)tf - 1;
1135 memset(cf, 0, sizeof(struct callframe));
1136 cf->cf_func = (register_t)fork_return;
1137 cf->cf_arg0 = (register_t)td;
1138 cf->cf_arg1 = (register_t)tf;
1139
1140 pcb2->pcb_sp = (register_t)cf;
1141 #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
1142 pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
1143 pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
1144 #else
1145 pcb2->pcb_lr = (register_t)fork_trampoline;
1146 pcb2->pcb_context[0] = pcb2->pcb_lr;
1147 #endif
1148 pcb2->pcb_cpu.aim.usr_vsid = 0;
1149 #ifdef __SPE__
1150 pcb2->pcb_vec.vscr = SPEFSCR_DFLT;
1151 #endif
1152
1153 /* Setup to release spin count in fork_exit(). */
1154 td->td_md.md_spinlock_count = 1;
1155 td->td_md.md_saved_msr = psl_kernset;
1156 }
1157
1158 void
1159 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
1160 stack_t *stack)
1161 {
1162 struct trapframe *tf;
1163 uintptr_t sp;
1164
1165 tf = td->td_frame;
1166 /* align stack and alloc space for frame ptr and saved LR */
1167 #ifdef __powerpc64__
1168 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
1169 ~0x1f;
1170 #else
1171 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
1172 ~0x1f;
1173 #endif
1174 bzero(tf, sizeof(struct trapframe));
1175
1176 tf->fixreg[1] = (register_t)sp;
1177 tf->fixreg[3] = (register_t)arg;
1178 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1179 tf->srr0 = (register_t)entry;
1180 #ifdef __powerpc64__
1181 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
1182 #else
1183 tf->srr1 = psl_userset | PSL_FE_DFLT;
1184 #endif
1185 } else {
1186 #ifdef __powerpc64__
1187 if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) {
1188 tf->srr0 = (register_t)entry;
1189 /* ELFv2 ABI requires that the global entry point be in r12. */
1190 tf->fixreg[12] = (register_t)entry;
1191 }
1192 else {
1193 register_t entry_desc[3];
1194 (void)copyin((void *)entry, entry_desc, sizeof(entry_desc));
1195 tf->srr0 = entry_desc[0];
1196 tf->fixreg[2] = entry_desc[1];
1197 tf->fixreg[11] = entry_desc[2];
1198 }
1199 tf->srr1 = psl_userset | PSL_FE_DFLT;
1200 #endif
1201 }
1202
1203 td->td_pcb->pcb_flags = 0;
1204 #ifdef __SPE__
1205 td->td_pcb->pcb_vec.vscr = SPEFSCR_DFLT;
1206 #endif
1207
1208 td->td_retval[0] = (register_t)entry;
1209 td->td_retval[1] = 0;
1210 }
1211
1212 static int
1213 emulate_mfspr(int spr, int reg, struct trapframe *frame){
1214 struct thread *td;
1215
1216 td = curthread;
1217
1218 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1219 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1220 return (SIGILL);
1221 // If DSCR was never set, get the default DSCR
1222 if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0)
1223 td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP);
1224
1225 frame->fixreg[reg] = td->td_pcb->pcb_dscr;
1226 frame->srr0 += 4;
1227 return (0);
1228 } else
1229 return (SIGILL);
1230 }
1231
1232 static int
1233 emulate_mtspr(int spr, int reg, struct trapframe *frame){
1234 struct thread *td;
1235
1236 td = curthread;
1237
1238 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1239 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1240 return (SIGILL);
1241 td->td_pcb->pcb_flags |= PCB_CDSCR;
1242 td->td_pcb->pcb_dscr = frame->fixreg[reg];
1243 mtspr(SPR_DSCRP, frame->fixreg[reg]);
1244 frame->srr0 += 4;
1245 return (0);
1246 } else
1247 return (SIGILL);
1248 }
1249
1250 #define XFX 0xFC0007FF
1251 int
1252 ppc_instr_emulate(struct trapframe *frame, struct thread *td)
1253 {
1254 struct pcb *pcb;
1255 uint32_t instr;
1256 int reg, sig;
1257 int rs, spr;
1258
1259 instr = fuword32((void *)frame->srr0);
1260 sig = SIGILL;
1261
1262 if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */
1263 reg = (instr & ~0xfc1fffff) >> 21;
1264 frame->fixreg[reg] = mfpvr();
1265 frame->srr0 += 4;
1266 return (0);
1267 } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */
1268 rs = (instr & 0x3e00000) >> 21;
1269 spr = (instr & 0x1ff800) >> 16;
1270 return emulate_mfspr(spr, rs, frame);
1271 } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */
1272 rs = (instr & 0x3e00000) >> 21;
1273 spr = (instr & 0x1ff800) >> 16;
1274 return emulate_mtspr(spr, rs, frame);
1275 } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */
1276 powerpc_sync(); /* Do a heavy-weight sync */
1277 frame->srr0 += 4;
1278 return (0);
1279 }
1280
1281 pcb = td->td_pcb;
1282 #ifdef FPU_EMU
1283 if (!(pcb->pcb_flags & PCB_FPREGS)) {
1284 bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu));
1285 pcb->pcb_flags |= PCB_FPREGS;
1286 } else if (pcb->pcb_flags & PCB_FPU)
1287 save_fpu(td);
1288 sig = fpu_emulate(frame, &pcb->pcb_fpu);
1289 if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU)
1290 enable_fpu(td);
1291 #endif
1292 if (sig == SIGILL) {
1293 if (pcb->pcb_lastill != frame->srr0) {
1294 /* Allow a second chance, in case of cache sync issues. */
1295 sig = 0;
1296 pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4);
1297 pcb->pcb_lastill = frame->srr0;
1298 }
1299 }
1300
1301 return (sig);
1302 }
Cache object: 591cd1f18facc6a189b7ab2b3a99a90c
|