1 /*-
2 * SPDX-License-Identifier: BSD-4-Clause AND BSD-2-Clause-FreeBSD
3 *
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33 /*-
34 * Copyright (C) 2001 Benno Rice
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
57 */
58
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
61
62 #include "opt_fpu_emu.h"
63
64 #include <sys/param.h>
65 #include <sys/proc.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/bus.h>
70 #include <sys/cons.h>
71 #include <sys/cpu.h>
72 #include <sys/exec.h>
73 #include <sys/imgact.h>
74 #include <sys/kernel.h>
75 #include <sys/ktr.h>
76 #include <sys/lock.h>
77 #include <sys/malloc.h>
78 #include <sys/mutex.h>
79 #include <sys/signalvar.h>
80 #include <sys/syscallsubr.h>
81 #include <sys/syscall.h>
82 #include <sys/sysent.h>
83 #include <sys/sysproto.h>
84 #include <sys/ucontext.h>
85 #include <sys/uio.h>
86
87 #include <machine/altivec.h>
88 #include <machine/cpu.h>
89 #include <machine/elf.h>
90 #include <machine/fpu.h>
91 #include <machine/pcb.h>
92 #include <machine/reg.h>
93 #include <machine/sigframe.h>
94 #include <machine/trap.h>
95 #include <machine/vmparam.h>
96
97 #include <vm/pmap.h>
98
99 #ifdef FPU_EMU
100 #include <powerpc/fpu/fpu_extern.h>
101 #endif
102
103 #ifdef COMPAT_FREEBSD32
104 #include <compat/freebsd32/freebsd32_signal.h>
105 #include <compat/freebsd32/freebsd32_util.h>
106 #include <compat/freebsd32/freebsd32_proto.h>
107
108 typedef struct __ucontext32 {
109 sigset_t uc_sigmask;
110 mcontext32_t uc_mcontext;
111 uint32_t uc_link;
112 struct sigaltstack32 uc_stack;
113 uint32_t uc_flags;
114 uint32_t __spare__[4];
115 } ucontext32_t;
116
117 struct sigframe32 {
118 ucontext32_t sf_uc;
119 struct siginfo32 sf_si;
120 };
121
122 static int grab_mcontext32(struct thread *td, mcontext32_t *, int flags);
123 #endif
124
125 static int grab_mcontext(struct thread *, mcontext_t *, int);
126
127 static void cleanup_power_extras(struct thread *);
128
129 #ifdef __powerpc64__
130 extern struct sysentvec elf64_freebsd_sysvec_v2;
131 #endif
132
133 void
134 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
135 {
136 struct trapframe *tf;
137 struct sigacts *psp;
138 struct sigframe sf;
139 struct thread *td;
140 struct proc *p;
141 #ifdef COMPAT_FREEBSD32
142 struct siginfo32 siginfo32;
143 struct sigframe32 sf32;
144 #endif
145 size_t sfpsize;
146 caddr_t sfp, usfp;
147 register_t sp;
148 int oonstack, rndfsize;
149 int sig;
150 int code;
151
152 td = curthread;
153 p = td->td_proc;
154 PROC_LOCK_ASSERT(p, MA_OWNED);
155
156 psp = p->p_sigacts;
157 mtx_assert(&psp->ps_mtx, MA_OWNED);
158 tf = td->td_frame;
159
160 /*
161 * Fill siginfo structure.
162 */
163 ksi->ksi_info.si_signo = ksi->ksi_signo;
164 ksi->ksi_info.si_addr =
165 (void *)((tf->exc == EXC_DSI || tf->exc == EXC_DSE) ?
166 tf->dar : tf->srr0);
167
168 #ifdef COMPAT_FREEBSD32
169 if (SV_PROC_FLAG(p, SV_ILP32)) {
170 siginfo_to_siginfo32(&ksi->ksi_info, &siginfo32);
171 sig = siginfo32.si_signo;
172 code = siginfo32.si_code;
173 sfp = (caddr_t)&sf32;
174 sfpsize = sizeof(sf32);
175 rndfsize = roundup(sizeof(sf32), 16);
176 sp = (uint32_t)tf->fixreg[1];
177 oonstack = sigonstack(sp);
178
179 /*
180 * Save user context
181 */
182
183 memset(&sf32, 0, sizeof(sf32));
184 grab_mcontext32(td, &sf32.sf_uc.uc_mcontext, 0);
185
186 sf32.sf_uc.uc_sigmask = *mask;
187 sf32.sf_uc.uc_stack.ss_sp = (uintptr_t)td->td_sigstk.ss_sp;
188 sf32.sf_uc.uc_stack.ss_size = (uint32_t)td->td_sigstk.ss_size;
189 sf32.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
190 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
191
192 sf32.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
193 } else {
194 #endif
195 sig = ksi->ksi_signo;
196 code = ksi->ksi_code;
197 sfp = (caddr_t)&sf;
198 sfpsize = sizeof(sf);
199 #ifdef __powerpc64__
200 /*
201 * 64-bit PPC defines a 288 byte scratch region
202 * below the stack.
203 */
204 rndfsize = 288 + roundup(sizeof(sf), 48);
205 #else
206 rndfsize = roundup(sizeof(sf), 16);
207 #endif
208 sp = tf->fixreg[1];
209 oonstack = sigonstack(sp);
210
211 /*
212 * Save user context
213 */
214
215 memset(&sf, 0, sizeof(sf));
216 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
217
218 sf.sf_uc.uc_sigmask = *mask;
219 sf.sf_uc.uc_stack = td->td_sigstk;
220 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
221 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
222
223 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
224 #ifdef COMPAT_FREEBSD32
225 }
226 #endif
227
228 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
229 catcher, sig);
230
231 /*
232 * Allocate and validate space for the signal handler context.
233 */
234 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
235 SIGISMEMBER(psp->ps_sigonstack, sig)) {
236 usfp = (void *)(((uintptr_t)td->td_sigstk.ss_sp +
237 td->td_sigstk.ss_size - rndfsize) & ~0xFul);
238 } else {
239 usfp = (void *)((sp - rndfsize) & ~0xFul);
240 }
241
242 /*
243 * Set Floating Point facility to "Ignore Exceptions Mode" so signal
244 * handler can run.
245 */
246 if (td->td_pcb->pcb_flags & PCB_FPU)
247 tf->srr1 = tf->srr1 & ~(PSL_FE0 | PSL_FE1);
248
249 /*
250 * Set up the registers to return to sigcode.
251 *
252 * r1/sp - sigframe ptr
253 * lr - sig function, dispatched to by blrl in trampoline
254 * r3 - sig number
255 * r4 - SIGINFO ? &siginfo : exception code
256 * r5 - user context
257 * srr0 - trampoline function addr
258 */
259 tf->lr = (register_t)catcher;
260 tf->fixreg[1] = (register_t)usfp;
261 tf->fixreg[FIRSTARG] = sig;
262 #ifdef COMPAT_FREEBSD32
263 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
264 ((SV_PROC_FLAG(p, SV_ILP32)) ?
265 offsetof(struct sigframe32, sf_uc) :
266 offsetof(struct sigframe, sf_uc));
267 #else
268 tf->fixreg[FIRSTARG+2] = (register_t)usfp +
269 offsetof(struct sigframe, sf_uc);
270 #endif
271 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
272 /*
273 * Signal handler installed with SA_SIGINFO.
274 */
275 #ifdef COMPAT_FREEBSD32
276 if (SV_PROC_FLAG(p, SV_ILP32)) {
277 sf32.sf_si = siginfo32;
278 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
279 offsetof(struct sigframe32, sf_si);
280 sf32.sf_si = siginfo32;
281 } else {
282 #endif
283 tf->fixreg[FIRSTARG+1] = (register_t)usfp +
284 offsetof(struct sigframe, sf_si);
285 sf.sf_si = ksi->ksi_info;
286 #ifdef COMPAT_FREEBSD32
287 }
288 #endif
289 } else {
290 /* Old FreeBSD-style arguments. */
291 tf->fixreg[FIRSTARG+1] = code;
292 tf->fixreg[FIRSTARG+3] = (tf->exc == EXC_DSI) ?
293 tf->dar : tf->srr0;
294 }
295 mtx_unlock(&psp->ps_mtx);
296 PROC_UNLOCK(p);
297
298 tf->srr0 = (register_t)p->p_sysent->sv_sigcode_base;
299
300 /*
301 * copy the frame out to userland.
302 */
303 if (copyout(sfp, usfp, sfpsize) != 0) {
304 /*
305 * Process has trashed its stack. Kill it.
306 */
307 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
308 PROC_LOCK(p);
309 sigexit(td, SIGILL);
310 }
311
312 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
313 tf->srr0, tf->fixreg[1]);
314
315 PROC_LOCK(p);
316 mtx_lock(&psp->ps_mtx);
317 }
318
319 int
320 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
321 {
322 ucontext_t uc;
323 int error;
324
325 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
326
327 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
328 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
329 return (EFAULT);
330 }
331
332 error = set_mcontext(td, &uc.uc_mcontext);
333 if (error != 0)
334 return (error);
335
336 /*
337 * Save FPU state if needed. User may have changed it on
338 * signal handler
339 */
340 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
341 save_fpu(td);
342
343 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
344
345 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
346 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
347
348 return (EJUSTRETURN);
349 }
350
351 #ifdef COMPAT_FREEBSD4
352 int
353 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
354 {
355
356 return sys_sigreturn(td, (struct sigreturn_args *)uap);
357 }
358 #endif
359
360 /*
361 * Construct a PCB from a trapframe. This is called from kdb_trap() where
362 * we want to start a backtrace from the function that caused us to enter
363 * the debugger. We have the context in the trapframe, but base the trace
364 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
365 * enough for a backtrace.
366 */
367 void
368 makectx(struct trapframe *tf, struct pcb *pcb)
369 {
370
371 pcb->pcb_lr = tf->srr0;
372 pcb->pcb_sp = tf->fixreg[1];
373 }
374
375 /*
376 * get_mcontext/sendsig helper routine that doesn't touch the
377 * proc lock
378 */
379 static int
380 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
381 {
382 struct pcb *pcb;
383 int i;
384
385 pcb = td->td_pcb;
386
387 memset(mcp, 0, sizeof(mcontext_t));
388
389 mcp->mc_vers = _MC_VERSION;
390 mcp->mc_flags = 0;
391 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
392 if (flags & GET_MC_CLEAR_RET) {
393 mcp->mc_gpr[3] = 0;
394 mcp->mc_gpr[4] = 0;
395 }
396
397 /*
398 * This assumes that floating-point context is *not* lazy,
399 * so if the thread has used FP there would have been a
400 * FP-unavailable exception that would have set things up
401 * correctly.
402 */
403 if (pcb->pcb_flags & PCB_FPREGS) {
404 if (pcb->pcb_flags & PCB_FPU) {
405 KASSERT(td == curthread,
406 ("get_mcontext: fp save not curthread"));
407 critical_enter();
408 save_fpu(td);
409 critical_exit();
410 }
411 mcp->mc_flags |= _MC_FP_VALID;
412 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
413 for (i = 0; i < 32; i++)
414 memcpy(&mcp->mc_fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
415 sizeof(double));
416 }
417
418 if (pcb->pcb_flags & PCB_VSX) {
419 for (i = 0; i < 32; i++)
420 memcpy(&mcp->mc_vsxfpreg[i],
421 &pcb->pcb_fpu.fpr[i].vsr[2], sizeof(double));
422 }
423
424 /*
425 * Repeat for Altivec context
426 */
427
428 if (pcb->pcb_flags & PCB_VEC) {
429 KASSERT(td == curthread,
430 ("get_mcontext: fp save not curthread"));
431 critical_enter();
432 save_vec(td);
433 critical_exit();
434 mcp->mc_flags |= _MC_AV_VALID;
435 mcp->mc_vscr = pcb->pcb_vec.vscr;
436 mcp->mc_vrsave = pcb->pcb_vec.vrsave;
437 memcpy(mcp->mc_avec, pcb->pcb_vec.vr, sizeof(mcp->mc_avec));
438 }
439
440 mcp->mc_len = sizeof(*mcp);
441
442 return (0);
443 }
444
445 int
446 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
447 {
448 int error;
449
450 error = grab_mcontext(td, mcp, flags);
451 if (error == 0) {
452 PROC_LOCK(curthread->td_proc);
453 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
454 PROC_UNLOCK(curthread->td_proc);
455 }
456
457 return (error);
458 }
459
460 int
461 set_mcontext(struct thread *td, mcontext_t *mcp)
462 {
463 struct pcb *pcb;
464 struct trapframe *tf;
465 register_t tls;
466 int i;
467
468 pcb = td->td_pcb;
469 tf = td->td_frame;
470
471 if (mcp->mc_vers != _MC_VERSION || mcp->mc_len != sizeof(*mcp))
472 return (EINVAL);
473
474 /*
475 * Don't let the user change privileged MSR bits.
476 *
477 * psl_userstatic is used here to mask off any bits that can
478 * legitimately vary between user contexts (Floating point
479 * exception control and any facilities that we are using the
480 * "enable on first use" pattern with.)
481 *
482 * All other bits are required to match psl_userset(32).
483 *
484 * Remember to update the platform cpu_init code when implementing
485 * support for a new conditional facility!
486 */
487 if ((mcp->mc_srr1 & psl_userstatic) != (tf->srr1 & psl_userstatic)) {
488 return (EINVAL);
489 }
490
491 /* Copy trapframe, preserving TLS pointer across context change */
492 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
493 tls = tf->fixreg[13];
494 else
495 tls = tf->fixreg[2];
496 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
497 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
498 tf->fixreg[13] = tls;
499 else
500 tf->fixreg[2] = tls;
501
502 /*
503 * Force the FPU back off to ensure the new context will not bypass
504 * the enable_fpu() setup code accidentally.
505 *
506 * This prevents an issue where a process that uses floating point
507 * inside a signal handler could end up in a state where the MSR
508 * did not match pcb_flags.
509 *
510 * Additionally, ensure VSX is disabled as well, as it is illegal
511 * to leave it turned on when FP or VEC are off.
512 */
513 tf->srr1 &= ~(PSL_FP | PSL_VSX);
514 pcb->pcb_flags &= ~(PCB_FPU | PCB_VSX);
515
516 if (mcp->mc_flags & _MC_FP_VALID) {
517 /* enable_fpu() will happen lazily on a fault */
518 pcb->pcb_flags |= PCB_FPREGS;
519 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
520 bzero(pcb->pcb_fpu.fpr, sizeof(pcb->pcb_fpu.fpr));
521 for (i = 0; i < 32; i++) {
522 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &mcp->mc_fpreg[i],
523 sizeof(double));
524 memcpy(&pcb->pcb_fpu.fpr[i].vsr[2],
525 &mcp->mc_vsxfpreg[i], sizeof(double));
526 }
527 }
528
529 if (mcp->mc_flags & _MC_AV_VALID) {
530 if ((pcb->pcb_flags & PCB_VEC) != PCB_VEC) {
531 critical_enter();
532 enable_vec(td);
533 critical_exit();
534 }
535 pcb->pcb_vec.vscr = mcp->mc_vscr;
536 pcb->pcb_vec.vrsave = mcp->mc_vrsave;
537 memcpy(pcb->pcb_vec.vr, mcp->mc_avec, sizeof(mcp->mc_avec));
538 } else {
539 tf->srr1 &= ~PSL_VEC;
540 pcb->pcb_flags &= ~PCB_VEC;
541 }
542
543 return (0);
544 }
545
546 /*
547 * Clean up extra POWER state. Some per-process registers and states are not
548 * managed by the MSR, so must be cleaned up explicitly on thread exit.
549 *
550 * Currently this includes:
551 * DSCR -- Data stream control register (PowerISA 2.06+)
552 * FSCR -- Facility Status and Control Register (PowerISA 2.07+)
553 */
554 static void
555 cleanup_power_extras(struct thread *td)
556 {
557 uint32_t pcb_flags;
558
559 if (td != curthread)
560 return;
561
562 pcb_flags = td->td_pcb->pcb_flags;
563 /* Clean up registers not managed by MSR. */
564 if (pcb_flags & PCB_CFSCR)
565 mtspr(SPR_FSCR, 0);
566 if (pcb_flags & PCB_CDSCR)
567 mtspr(SPR_DSCRP, 0);
568
569 cleanup_fpscr();
570 }
571
572 /*
573 * Set set up registers on exec.
574 */
575 void
576 exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
577 {
578 struct trapframe *tf;
579 register_t argc;
580
581 tf = trapframe(td);
582 bzero(tf, sizeof *tf);
583 #ifdef __powerpc64__
584 tf->fixreg[1] = -roundup(-stack + 48, 16);
585 #else
586 tf->fixreg[1] = -roundup(-stack + 8, 16);
587 #endif
588
589 /*
590 * Set up arguments for _start():
591 * _start(argc, argv, envp, obj, cleanup, ps_strings);
592 *
593 * Notes:
594 * - obj and cleanup are the auxilliary and termination
595 * vectors. They are fixed up by ld.elf_so.
596 * - ps_strings is a NetBSD extention, and will be
597 * ignored by executables which are strictly
598 * compliant with the SVR4 ABI.
599 */
600
601 /* Collect argc from the user stack */
602 argc = fuword((void *)stack);
603
604 tf->fixreg[3] = argc;
605 tf->fixreg[4] = stack + sizeof(register_t);
606 tf->fixreg[5] = stack + (2 + argc)*sizeof(register_t);
607 tf->fixreg[6] = 0; /* auxillary vector */
608 tf->fixreg[7] = 0; /* termination vector */
609 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
610
611 tf->srr0 = imgp->entry_addr;
612 #ifdef __powerpc64__
613 tf->fixreg[12] = imgp->entry_addr;
614 #endif
615 tf->srr1 = psl_userset | PSL_FE_DFLT;
616 cleanup_power_extras(td);
617 td->td_pcb->pcb_flags = 0;
618 }
619
620 #ifdef COMPAT_FREEBSD32
621 void
622 ppc32_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
623 {
624 struct trapframe *tf;
625 uint32_t argc;
626
627 tf = trapframe(td);
628 bzero(tf, sizeof *tf);
629 tf->fixreg[1] = -roundup(-stack + 8, 16);
630
631 argc = fuword32((void *)stack);
632
633 tf->fixreg[3] = argc;
634 tf->fixreg[4] = stack + sizeof(uint32_t);
635 tf->fixreg[5] = stack + (2 + argc)*sizeof(uint32_t);
636 tf->fixreg[6] = 0; /* auxillary vector */
637 tf->fixreg[7] = 0; /* termination vector */
638 tf->fixreg[8] = (register_t)imgp->ps_strings; /* NetBSD extension */
639
640 tf->srr0 = imgp->entry_addr;
641 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
642 cleanup_power_extras(td);
643 td->td_pcb->pcb_flags = 0;
644 }
645 #endif
646
647 int
648 fill_regs(struct thread *td, struct reg *regs)
649 {
650 struct trapframe *tf;
651
652 tf = td->td_frame;
653 memcpy(regs, tf, sizeof(struct reg));
654
655 return (0);
656 }
657
658 int
659 fill_dbregs(struct thread *td, struct dbreg *dbregs)
660 {
661 /* No debug registers on PowerPC */
662 return (ENOSYS);
663 }
664
665 int
666 fill_fpregs(struct thread *td, struct fpreg *fpregs)
667 {
668 struct pcb *pcb;
669 int i;
670
671 pcb = td->td_pcb;
672
673 if ((pcb->pcb_flags & PCB_FPREGS) == 0)
674 memset(fpregs, 0, sizeof(struct fpreg));
675 else {
676 memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
677 for (i = 0; i < 32; i++)
678 memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr,
679 sizeof(double));
680 }
681
682 return (0);
683 }
684
685 int
686 set_regs(struct thread *td, struct reg *regs)
687 {
688 struct trapframe *tf;
689
690 tf = td->td_frame;
691 memcpy(tf, regs, sizeof(struct reg));
692
693 return (0);
694 }
695
696 int
697 set_dbregs(struct thread *td, struct dbreg *dbregs)
698 {
699 /* No debug registers on PowerPC */
700 return (ENOSYS);
701 }
702
703 int
704 set_fpregs(struct thread *td, struct fpreg *fpregs)
705 {
706 struct pcb *pcb;
707 int i;
708
709 pcb = td->td_pcb;
710 pcb->pcb_flags |= PCB_FPREGS;
711 memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double));
712 for (i = 0; i < 32; i++) {
713 memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i],
714 sizeof(double));
715 }
716
717 return (0);
718 }
719
720 #ifdef COMPAT_FREEBSD32
721 int
722 set_regs32(struct thread *td, struct reg32 *regs)
723 {
724 struct trapframe *tf;
725 int i;
726
727 tf = td->td_frame;
728 for (i = 0; i < 32; i++)
729 tf->fixreg[i] = regs->fixreg[i];
730 tf->lr = regs->lr;
731 tf->cr = regs->cr;
732 tf->xer = regs->xer;
733 tf->ctr = regs->ctr;
734 tf->srr0 = regs->pc;
735
736 return (0);
737 }
738
739 int
740 fill_regs32(struct thread *td, struct reg32 *regs)
741 {
742 struct trapframe *tf;
743 int i;
744
745 tf = td->td_frame;
746 for (i = 0; i < 32; i++)
747 regs->fixreg[i] = tf->fixreg[i];
748 regs->lr = tf->lr;
749 regs->cr = tf->cr;
750 regs->xer = tf->xer;
751 regs->ctr = tf->ctr;
752 regs->pc = tf->srr0;
753
754 return (0);
755 }
756
757 static int
758 grab_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
759 {
760 mcontext_t mcp64;
761 int i, error;
762
763 error = grab_mcontext(td, &mcp64, flags);
764 if (error != 0)
765 return (error);
766
767 mcp->mc_vers = mcp64.mc_vers;
768 mcp->mc_flags = mcp64.mc_flags;
769 mcp->mc_onstack = mcp64.mc_onstack;
770 mcp->mc_len = mcp64.mc_len;
771 memcpy(mcp->mc_avec,mcp64.mc_avec,sizeof(mcp64.mc_avec));
772 memcpy(mcp->mc_av,mcp64.mc_av,sizeof(mcp64.mc_av));
773 for (i = 0; i < 42; i++)
774 mcp->mc_frame[i] = mcp64.mc_frame[i];
775 memcpy(mcp->mc_fpreg,mcp64.mc_fpreg,sizeof(mcp64.mc_fpreg));
776 memcpy(mcp->mc_vsxfpreg,mcp64.mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
777
778 return (0);
779 }
780
781 static int
782 get_mcontext32(struct thread *td, mcontext32_t *mcp, int flags)
783 {
784 int error;
785
786 error = grab_mcontext32(td, mcp, flags);
787 if (error == 0) {
788 PROC_LOCK(curthread->td_proc);
789 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
790 PROC_UNLOCK(curthread->td_proc);
791 }
792
793 return (error);
794 }
795
796 static int
797 set_mcontext32(struct thread *td, mcontext32_t *mcp)
798 {
799 mcontext_t mcp64;
800 int i, error;
801
802 mcp64.mc_vers = mcp->mc_vers;
803 mcp64.mc_flags = mcp->mc_flags;
804 mcp64.mc_onstack = mcp->mc_onstack;
805 mcp64.mc_len = mcp->mc_len;
806 memcpy(mcp64.mc_avec,mcp->mc_avec,sizeof(mcp64.mc_avec));
807 memcpy(mcp64.mc_av,mcp->mc_av,sizeof(mcp64.mc_av));
808 for (i = 0; i < 42; i++)
809 mcp64.mc_frame[i] = mcp->mc_frame[i];
810 mcp64.mc_srr1 |= (td->td_frame->srr1 & 0xFFFFFFFF00000000ULL);
811 memcpy(mcp64.mc_fpreg,mcp->mc_fpreg,sizeof(mcp64.mc_fpreg));
812 memcpy(mcp64.mc_vsxfpreg,mcp->mc_vsxfpreg,sizeof(mcp64.mc_vsxfpreg));
813
814 error = set_mcontext(td, &mcp64);
815
816 return (error);
817 }
818 #endif
819
820 #ifdef COMPAT_FREEBSD32
821 int
822 freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap)
823 {
824 ucontext32_t uc;
825 int error;
826
827 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
828
829 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
830 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
831 return (EFAULT);
832 }
833
834 error = set_mcontext32(td, &uc.uc_mcontext);
835 if (error != 0)
836 return (error);
837
838 /*
839 * Save FPU state if needed. User may have changed it on
840 * signal handler
841 */
842 if (uc.uc_mcontext.mc_srr1 & PSL_FP)
843 save_fpu(td);
844
845 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
846
847 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
848 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
849
850 return (EJUSTRETURN);
851 }
852
853 /*
854 * The first two fields of a ucontext_t are the signal mask and the machine
855 * context. The next field is uc_link; we want to avoid destroying the link
856 * when copying out contexts.
857 */
858 #define UC32_COPY_SIZE offsetof(ucontext32_t, uc_link)
859
860 int
861 freebsd32_getcontext(struct thread *td, struct freebsd32_getcontext_args *uap)
862 {
863 ucontext32_t uc;
864 int ret;
865
866 if (uap->ucp == NULL)
867 ret = EINVAL;
868 else {
869 bzero(&uc, sizeof(uc));
870 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
871 PROC_LOCK(td->td_proc);
872 uc.uc_sigmask = td->td_sigmask;
873 PROC_UNLOCK(td->td_proc);
874 ret = copyout(&uc, uap->ucp, UC32_COPY_SIZE);
875 }
876 return (ret);
877 }
878
879 int
880 freebsd32_setcontext(struct thread *td, struct freebsd32_setcontext_args *uap)
881 {
882 ucontext32_t uc;
883 int ret;
884
885 if (uap->ucp == NULL)
886 ret = EINVAL;
887 else {
888 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
889 if (ret == 0) {
890 ret = set_mcontext32(td, &uc.uc_mcontext);
891 if (ret == 0) {
892 kern_sigprocmask(td, SIG_SETMASK,
893 &uc.uc_sigmask, NULL, 0);
894 }
895 }
896 }
897 return (ret == 0 ? EJUSTRETURN : ret);
898 }
899
900 int
901 freebsd32_swapcontext(struct thread *td, struct freebsd32_swapcontext_args *uap)
902 {
903 ucontext32_t uc;
904 int ret;
905
906 if (uap->oucp == NULL || uap->ucp == NULL)
907 ret = EINVAL;
908 else {
909 bzero(&uc, sizeof(uc));
910 get_mcontext32(td, &uc.uc_mcontext, GET_MC_CLEAR_RET);
911 PROC_LOCK(td->td_proc);
912 uc.uc_sigmask = td->td_sigmask;
913 PROC_UNLOCK(td->td_proc);
914 ret = copyout(&uc, uap->oucp, UC32_COPY_SIZE);
915 if (ret == 0) {
916 ret = copyin(uap->ucp, &uc, UC32_COPY_SIZE);
917 if (ret == 0) {
918 ret = set_mcontext32(td, &uc.uc_mcontext);
919 if (ret == 0) {
920 kern_sigprocmask(td, SIG_SETMASK,
921 &uc.uc_sigmask, NULL, 0);
922 }
923 }
924 }
925 }
926 return (ret == 0 ? EJUSTRETURN : ret);
927 }
928
929 #endif
930
931 void
932 cpu_set_syscall_retval(struct thread *td, int error)
933 {
934 struct proc *p;
935 struct trapframe *tf;
936 int fixup;
937
938 if (error == EJUSTRETURN)
939 return;
940
941 p = td->td_proc;
942 tf = td->td_frame;
943
944 if (tf->fixreg[0] == SYS___syscall &&
945 (SV_PROC_FLAG(p, SV_ILP32))) {
946 int code = tf->fixreg[FIRSTARG + 1];
947 fixup = (
948 #if defined(COMPAT_FREEBSD6) && defined(SYS_freebsd6_lseek)
949 code != SYS_freebsd6_lseek &&
950 #endif
951 code != SYS_lseek) ? 1 : 0;
952 } else
953 fixup = 0;
954
955 switch (error) {
956 case 0:
957 if (fixup) {
958 /*
959 * 64-bit return, 32-bit syscall. Fixup byte order
960 */
961 tf->fixreg[FIRSTARG] = 0;
962 tf->fixreg[FIRSTARG + 1] = td->td_retval[0];
963 } else {
964 tf->fixreg[FIRSTARG] = td->td_retval[0];
965 tf->fixreg[FIRSTARG + 1] = td->td_retval[1];
966 }
967 tf->cr &= ~0x10000000; /* Unset summary overflow */
968 break;
969 case ERESTART:
970 /*
971 * Set user's pc back to redo the system call.
972 */
973 tf->srr0 -= 4;
974 break;
975 default:
976 tf->fixreg[FIRSTARG] = error;
977 tf->cr |= 0x10000000; /* Set summary overflow */
978 break;
979 }
980 }
981
982 /*
983 * Threading functions
984 */
985 void
986 cpu_thread_exit(struct thread *td)
987 {
988 cleanup_power_extras(td);
989 }
990
991 void
992 cpu_thread_clean(struct thread *td)
993 {
994 }
995
996 void
997 cpu_thread_alloc(struct thread *td)
998 {
999 struct pcb *pcb;
1000
1001 pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
1002 sizeof(struct pcb)) & ~0x2fUL);
1003 td->td_pcb = pcb;
1004 td->td_frame = (struct trapframe *)pcb - 1;
1005 }
1006
1007 void
1008 cpu_thread_free(struct thread *td)
1009 {
1010 }
1011
1012 int
1013 cpu_set_user_tls(struct thread *td, void *tls_base)
1014 {
1015
1016 if (SV_PROC_FLAG(td->td_proc, SV_LP64))
1017 td->td_frame->fixreg[13] = (register_t)tls_base + 0x7010;
1018 else
1019 td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
1020 return (0);
1021 }
1022
1023 void
1024 cpu_copy_thread(struct thread *td, struct thread *td0)
1025 {
1026 struct pcb *pcb2;
1027 struct trapframe *tf;
1028 struct callframe *cf;
1029
1030 pcb2 = td->td_pcb;
1031
1032 /* Copy the upcall pcb */
1033 bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
1034
1035 /* Create a stack for the new thread */
1036 tf = td->td_frame;
1037 bcopy(td0->td_frame, tf, sizeof(struct trapframe));
1038 tf->fixreg[FIRSTARG] = 0;
1039 tf->fixreg[FIRSTARG + 1] = 0;
1040 tf->cr &= ~0x10000000;
1041
1042 /* Set registers for trampoline to user mode. */
1043 cf = (struct callframe *)tf - 1;
1044 memset(cf, 0, sizeof(struct callframe));
1045 cf->cf_func = (register_t)fork_return;
1046 cf->cf_arg0 = (register_t)td;
1047 cf->cf_arg1 = (register_t)tf;
1048
1049 pcb2->pcb_sp = (register_t)cf;
1050 #if defined(__powerpc64__) && (!defined(_CALL_ELF) || _CALL_ELF == 1)
1051 pcb2->pcb_lr = ((register_t *)fork_trampoline)[0];
1052 pcb2->pcb_toc = ((register_t *)fork_trampoline)[1];
1053 #else
1054 pcb2->pcb_lr = (register_t)fork_trampoline;
1055 pcb2->pcb_context[0] = pcb2->pcb_lr;
1056 #endif
1057 pcb2->pcb_cpu.aim.usr_vsid = 0;
1058 #ifdef __SPE__
1059 pcb2->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE |
1060 SPEFSCR_FUNFE | SPEFSCR_FOVFE;
1061 #endif
1062
1063 /* Setup to release spin count in fork_exit(). */
1064 td->td_md.md_spinlock_count = 1;
1065 td->td_md.md_saved_msr = psl_kernset;
1066 }
1067
1068 void
1069 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
1070 stack_t *stack)
1071 {
1072 struct trapframe *tf;
1073 uintptr_t sp;
1074
1075 tf = td->td_frame;
1076 /* align stack and alloc space for frame ptr and saved LR */
1077 #ifdef __powerpc64__
1078 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 48) &
1079 ~0x1f;
1080 #else
1081 sp = ((uintptr_t)stack->ss_sp + stack->ss_size - 8) &
1082 ~0x1f;
1083 #endif
1084 bzero(tf, sizeof(struct trapframe));
1085
1086 tf->fixreg[1] = (register_t)sp;
1087 tf->fixreg[3] = (register_t)arg;
1088 if (SV_PROC_FLAG(td->td_proc, SV_ILP32)) {
1089 tf->srr0 = (register_t)entry;
1090 #ifdef __powerpc64__
1091 tf->srr1 = psl_userset32 | PSL_FE_DFLT;
1092 #else
1093 tf->srr1 = psl_userset | PSL_FE_DFLT;
1094 #endif
1095 } else {
1096 #ifdef __powerpc64__
1097 if (td->td_proc->p_sysent == &elf64_freebsd_sysvec_v2) {
1098 tf->srr0 = (register_t)entry;
1099 /* ELFv2 ABI requires that the global entry point be in r12. */
1100 tf->fixreg[12] = (register_t)entry;
1101 }
1102 else {
1103 register_t entry_desc[3];
1104 (void)copyin((void *)entry, entry_desc, sizeof(entry_desc));
1105 tf->srr0 = entry_desc[0];
1106 tf->fixreg[2] = entry_desc[1];
1107 tf->fixreg[11] = entry_desc[2];
1108 }
1109 tf->srr1 = psl_userset | PSL_FE_DFLT;
1110 #endif
1111 }
1112
1113 td->td_pcb->pcb_flags = 0;
1114 #ifdef __SPE__
1115 td->td_pcb->pcb_vec.vscr = SPEFSCR_FINVE | SPEFSCR_FDBZE |
1116 SPEFSCR_FUNFE | SPEFSCR_FOVFE;
1117 #endif
1118
1119 td->td_retval[0] = (register_t)entry;
1120 td->td_retval[1] = 0;
1121 }
1122
1123 static int
1124 emulate_mfspr(int spr, int reg, struct trapframe *frame){
1125 struct thread *td;
1126
1127 td = curthread;
1128
1129 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1130 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1131 return (SIGILL);
1132 // If DSCR was never set, get the default DSCR
1133 if ((td->td_pcb->pcb_flags & PCB_CDSCR) == 0)
1134 td->td_pcb->pcb_dscr = mfspr(SPR_DSCRP);
1135
1136 frame->fixreg[reg] = td->td_pcb->pcb_dscr;
1137 frame->srr0 += 4;
1138 return (0);
1139 } else
1140 return (SIGILL);
1141 }
1142
1143 static int
1144 emulate_mtspr(int spr, int reg, struct trapframe *frame){
1145 struct thread *td;
1146
1147 td = curthread;
1148
1149 if (spr == SPR_DSCR || spr == SPR_DSCRP) {
1150 if (!(cpu_features2 & PPC_FEATURE2_DSCR))
1151 return (SIGILL);
1152 td->td_pcb->pcb_flags |= PCB_CDSCR;
1153 td->td_pcb->pcb_dscr = frame->fixreg[reg];
1154 mtspr(SPR_DSCRP, frame->fixreg[reg]);
1155 frame->srr0 += 4;
1156 return (0);
1157 } else
1158 return (SIGILL);
1159 }
1160
1161 #define XFX 0xFC0007FF
1162 int
1163 ppc_instr_emulate(struct trapframe *frame, struct thread *td)
1164 {
1165 struct pcb *pcb;
1166 uint32_t instr;
1167 int reg, sig;
1168 int rs, spr;
1169
1170 instr = fuword32((void *)frame->srr0);
1171 sig = SIGILL;
1172
1173 if ((instr & 0xfc1fffff) == 0x7c1f42a6) { /* mfpvr */
1174 reg = (instr & ~0xfc1fffff) >> 21;
1175 frame->fixreg[reg] = mfpvr();
1176 frame->srr0 += 4;
1177 return (0);
1178 } else if ((instr & XFX) == 0x7c0002a6) { /* mfspr */
1179 rs = (instr & 0x3e00000) >> 21;
1180 spr = (instr & 0x1ff800) >> 16;
1181 return emulate_mfspr(spr, rs, frame);
1182 } else if ((instr & XFX) == 0x7c0003a6) { /* mtspr */
1183 rs = (instr & 0x3e00000) >> 21;
1184 spr = (instr & 0x1ff800) >> 16;
1185 return emulate_mtspr(spr, rs, frame);
1186 } else if ((instr & 0xfc000ffe) == 0x7c0004ac) { /* various sync */
1187 powerpc_sync(); /* Do a heavy-weight sync */
1188 frame->srr0 += 4;
1189 return (0);
1190 }
1191
1192 pcb = td->td_pcb;
1193 #ifdef FPU_EMU
1194 if (!(pcb->pcb_flags & PCB_FPREGS)) {
1195 bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu));
1196 pcb->pcb_flags |= PCB_FPREGS;
1197 } else if (pcb->pcb_flags & PCB_FPU)
1198 save_fpu(td);
1199 sig = fpu_emulate(frame, &pcb->pcb_fpu);
1200 if ((sig == 0 || sig == SIGFPE) && pcb->pcb_flags & PCB_FPU)
1201 enable_fpu(td);
1202 #endif
1203 if (sig == SIGILL) {
1204 if (pcb->pcb_lastill != frame->srr0) {
1205 /* Allow a second chance, in case of cache sync issues. */
1206 sig = 0;
1207 pmap_sync_icache(PCPU_GET(curpmap), frame->srr0, 4);
1208 pcb->pcb_lastill = frame->srr0;
1209 }
1210 }
1211
1212 return (sig);
1213 }
Cache object: 3b10de00741f1d2fcaef36750a33fcf8
|