1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
35 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_compat.h"
42 #include "opt_ddb.h"
43 #include "opt_kstack_pages.h"
44 #include "opt_msgbuf.h"
45
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/proc.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/bus.h>
53 #include <sys/cpu.h>
54 #include <sys/cons.h>
55 #include <sys/eventhandler.h>
56 #include <sys/exec.h>
57 #include <sys/imgact.h>
58 #include <sys/interrupt.h>
59 #include <sys/kdb.h>
60 #include <sys/kernel.h>
61 #include <sys/ktr.h>
62 #include <sys/linker.h>
63 #include <sys/lock.h>
64 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/ptrace.h>
68 #include <sys/reboot.h>
69 #include <sys/signalvar.h>
70 #include <sys/smp.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timetc.h>
74 #include <sys/ucontext.h>
75
76 #include <dev/ofw/openfirm.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_param.h>
86
87 #include <ddb/ddb.h>
88
89 #include <machine/bus.h>
90 #include <machine/cache.h>
91 #include <machine/clock.h>
92 #include <machine/cpu.h>
93 #include <machine/fp.h>
94 #include <machine/fsr.h>
95 #include <machine/intr_machdep.h>
96 #include <machine/md_var.h>
97 #include <machine/metadata.h>
98 #include <machine/ofw_machdep.h>
99 #include <machine/ofw_mem.h>
100 #include <machine/pcb.h>
101 #include <machine/pmap.h>
102 #include <machine/pstate.h>
103 #include <machine/reg.h>
104 #include <machine/sigframe.h>
105 #include <machine/smp.h>
106 #include <machine/tick.h>
107 #include <machine/tlb.h>
108 #include <machine/tstate.h>
109 #include <machine/upa.h>
110 #include <machine/ver.h>
111
112 typedef int ofw_vec_t(void *);
113
114 #ifdef DDB
115 extern vm_offset_t ksym_start, ksym_end;
116 #endif
117
118 int dtlb_slots;
119 int itlb_slots;
120 struct tlb_entry *kernel_tlbs;
121 int kernel_tlb_slots;
122
123 int cold = 1;
124 long Maxmem;
125 long realmem;
126
127 char pcpu0[PCPU_PAGES * PAGE_SIZE];
128 struct trapframe frame0;
129
130 vm_offset_t kstack0;
131 vm_paddr_t kstack0_phys;
132
133 struct kva_md_info kmi;
134
135 u_long ofw_vec;
136 u_long ofw_tba;
137
138 char sparc64_model[32];
139
140 static int cpu_use_vis = 1;
141
142 cpu_block_copy_t *cpu_block_copy;
143 cpu_block_zero_t *cpu_block_zero;
144
145 void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
146 ofw_vec_t *vec);
147 void sparc64_shutdown_final(void *dummy, int howto);
148
149 static void cpu_startup(void *);
150 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
151
152 CTASSERT((1 << INT_SHIFT) == sizeof(int));
153 CTASSERT((1 << PTR_SHIFT) == sizeof(char *));
154
155 CTASSERT(sizeof(struct reg) == 256);
156 CTASSERT(sizeof(struct fpreg) == 272);
157 CTASSERT(sizeof(struct __mcontext) == 512);
158
159 CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0);
160 CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0);
161 CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0);
162 CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8));
163
164 CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2));
165
166 static void
167 cpu_startup(void *arg)
168 {
169 vm_paddr_t physsz;
170 int i;
171
172 physsz = 0;
173 for (i = 0; i < sparc64_nmemreg; i++)
174 physsz += sparc64_memreg[i].mr_size;
175 printf("real memory = %lu (%lu MB)\n", physsz,
176 physsz / (1024 * 1024));
177 realmem = (long)physsz / PAGE_SIZE;
178
179 vm_ksubmap_init(&kmi);
180
181 bufinit();
182 vm_pager_bufferinit();
183
184 EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
185 SHUTDOWN_PRI_LAST);
186
187 printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
188 cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
189
190 if (bootverbose)
191 printf("machine: %s\n", sparc64_model);
192
193 cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu);
194 }
195
196 void
197 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
198 {
199 struct intr_request *ir;
200 int i;
201
202 pcpu->pc_irtail = &pcpu->pc_irhead;
203 for (i = 0; i < IR_FREE; i++) {
204 ir = &pcpu->pc_irpool[i];
205 ir->ir_next = pcpu->pc_irfree;
206 pcpu->pc_irfree = ir;
207 }
208 }
209
210 void
211 spinlock_enter(void)
212 {
213 struct thread *td;
214 register_t pil;
215
216 td = curthread;
217 if (td->td_md.md_spinlock_count == 0) {
218 pil = rdpr(pil);
219 wrpr(pil, 0, PIL_TICK);
220 td->td_md.md_saved_pil = pil;
221 }
222 td->td_md.md_spinlock_count++;
223 critical_enter();
224 }
225
226 void
227 spinlock_exit(void)
228 {
229 struct thread *td;
230
231 td = curthread;
232 critical_exit();
233 td->td_md.md_spinlock_count--;
234 if (td->td_md.md_spinlock_count == 0)
235 wrpr(pil, td->td_md.md_saved_pil, 0);
236 }
237
238 void
239 sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
240 {
241 char type[8];
242 char *env;
243 struct pcpu *pc;
244 vm_offset_t end;
245 caddr_t kmdp;
246 phandle_t child;
247 phandle_t root;
248 uint32_t portid;
249
250 end = 0;
251 kmdp = NULL;
252
253 /*
254 * Find out what kind of CPU we have first, for anything that changes
255 * behaviour.
256 */
257 cpu_impl = VER_IMPL(rdpr(ver));
258
259 /*
260 * Do CPU-specific Initialization.
261 */
262 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
263 cheetah_init();
264
265 /*
266 * Clear (S)TICK timer (including NPT).
267 */
268 tick_clear();
269
270 /*
271 * UltraSparc II[e,i] based systems come up with the tick interrupt
272 * enabled and a handler that resets the tick counter, causing DELAY()
273 * to not work properly when used early in boot.
274 * UltraSPARC III based systems come up with the system tick interrupt
275 * enabled, causing an interrupt storm on startup since they are not
276 * handled.
277 */
278 tick_stop();
279
280 /*
281 * Initialize Open Firmware (needed for console).
282 */
283 OF_init(vec);
284
285 /*
286 * Parse metadata if present and fetch parameters. Must be before the
287 * console is inited so cninit gets the right value of boothowto.
288 */
289 if (mdp != NULL) {
290 preload_metadata = mdp;
291 kmdp = preload_search_by_type("elf kernel");
292 if (kmdp != NULL) {
293 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
294 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
295 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
296 kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS,
297 int);
298 kernel_tlbs = (void *)preload_search_info(kmdp,
299 MODINFO_METADATA | MODINFOMD_DTLB);
300 }
301 }
302
303 init_param1();
304
305 /*
306 * Prime our per-CPU data page for use. Note, we are using it for
307 * our stack, so don't pass the real size (PAGE_SIZE) to pcpu_init
308 * or it'll zero it out from under us.
309 */
310 pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1;
311 pcpu_init(pc, 0, sizeof(struct pcpu));
312 pc->pc_addr = (vm_offset_t)pcpu0;
313 pc->pc_mid = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
314 pc->pc_tlb_ctx = TLB_CTX_USER_MIN;
315 pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN;
316 pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
317
318 /*
319 * Determine the OFW node and frequency of the BSP (and ensure the
320 * BSP is in the device tree in the first place).
321 */
322 pc->pc_node = 0;
323 root = OF_peer(0);
324 for (child = OF_child(root); child != 0; child = OF_peer(child)) {
325 if (OF_getprop(child, "device_type", type, sizeof(type)) <= 0)
326 continue;
327 if (strcmp(type, "cpu") != 0)
328 continue;
329 if (OF_getprop(child, cpu_impl < CPU_IMPL_ULTRASPARCIII ?
330 "upa-portid" : "portid", &portid, sizeof(portid)) <= 0)
331 continue;
332 if (portid == pc->pc_mid) {
333 pc->pc_node = child;
334 break;
335 }
336 }
337 if (pc->pc_node == 0)
338 OF_exit();
339 if (OF_getprop(child, "clock-frequency", &pc->pc_clock,
340 sizeof(pc->pc_clock)) <= 0)
341 OF_exit();
342
343 /*
344 * Provide a DELAY() that works before PCPU_REG is set. We can't
345 * set PCPU_REG without also taking over the trap table or the
346 * firmware will overwrite it. Unfortunately, it's way to early
347 * to also take over the trap table at this point.
348 */
349 clock_boot = pc->pc_clock;
350 delay_func = delay_boot;
351
352 /*
353 * Initialize the console before printing anything.
354 * NB: the low-level console drivers require a working DELAY() at
355 * this point.
356 */
357 cninit();
358
359 /*
360 * Panic if there is no metadata. Most likely the kernel was booted
361 * directly, instead of through loader(8).
362 */
363 if (mdp == NULL || kmdp == NULL) {
364 printf("sparc64_init: no loader metadata.\n"
365 "This probably means you are not using loader(8).\n");
366 panic("sparc64_init");
367 }
368
369 /*
370 * Sanity check the kernel end, which is important.
371 */
372 if (end == 0) {
373 printf("sparc64_init: warning, kernel end not specified.\n"
374 "Attempting to continue anyway.\n");
375 end = (vm_offset_t)_end;
376 }
377
378 /*
379 * Determine the TLB slot maxima, which are expected to be
380 * equal across all CPUs.
381 * NB: for Cheetah-class CPUs, these properties only refer
382 * to the t16s.
383 */
384 if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
385 sizeof(dtlb_slots)) == -1)
386 panic("sparc64_init: cannot determine number of dTLB slots");
387 if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
388 sizeof(itlb_slots)) == -1)
389 panic("sparc64_init: cannot determine number of iTLB slots");
390
391 cache_init(pc);
392 cache_enable();
393 uma_set_align(pc->pc_cache.dc_linesize - 1);
394
395 cpu_block_copy = bcopy;
396 cpu_block_zero = bzero;
397 getenv_int("machdep.use_vis", &cpu_use_vis);
398 if (cpu_use_vis) {
399 switch (cpu_impl) {
400 case CPU_IMPL_SPARC64:
401 case CPU_IMPL_ULTRASPARCI:
402 case CPU_IMPL_ULTRASPARCII:
403 case CPU_IMPL_ULTRASPARCIIi:
404 case CPU_IMPL_ULTRASPARCIIe:
405 case CPU_IMPL_ULTRASPARCIII: /* NB: we've disabled P$. */
406 case CPU_IMPL_ULTRASPARCIIIp:
407 case CPU_IMPL_ULTRASPARCIIIi:
408 case CPU_IMPL_ULTRASPARCIV:
409 case CPU_IMPL_ULTRASPARCIVp:
410 case CPU_IMPL_ULTRASPARCIIIip:
411 cpu_block_copy = spitfire_block_copy;
412 cpu_block_zero = spitfire_block_zero;
413 break;
414 }
415 }
416
417 #ifdef SMP
418 mp_init();
419 #endif
420
421 /*
422 * Initialize virtual memory and calculate physmem.
423 */
424 pmap_bootstrap(end);
425
426 /*
427 * Initialize tunables.
428 */
429 init_param2(physmem);
430 env = getenv("kernelname");
431 if (env != NULL) {
432 strlcpy(kernelname, env, sizeof(kernelname));
433 freeenv(env);
434 }
435
436 /*
437 * Initialize the interrupt tables.
438 */
439 intr_init1();
440
441 /*
442 * Initialize proc0, set kstack0, frame0, curthread and curpcb.
443 */
444 proc_linkup0(&proc0, &thread0);
445 proc0.p_md.md_sigtramp = NULL;
446 proc0.p_md.md_utrap = NULL;
447 thread0.td_kstack = kstack0;
448 thread0.td_pcb = (struct pcb *)
449 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
450 frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
451 thread0.td_frame = &frame0;
452 pc->pc_curthread = &thread0;
453 pc->pc_curpcb = thread0.td_pcb;
454
455 /*
456 * Initialize global registers.
457 */
458 cpu_setregs(pc);
459
460 /*
461 * Take over the trap table via the PROM. Using the PROM for this
462 * is necessary in order to set obp-control-relinquished to true
463 * within the PROM so obtaining /virtual-memory/translations doesn't
464 * trigger a fatal reset error or worse things further down the road.
465 * XXX it should be possible to use this soley instead of writing
466 * %tba in cpu_setregs(). Doing so causes a hang however.
467 */
468 sun4u_set_traptable(tl0_base);
469
470 /*
471 * It's now safe to use the real DELAY().
472 */
473 delay_func = delay_tick;
474
475 /*
476 * Initialize the message buffer (after setting trap table).
477 */
478 msgbufinit(msgbufp, MSGBUF_SIZE);
479
480 mutex_init();
481 intr_init2();
482
483 /*
484 * Finish pmap initialization now that we're ready for mutexes.
485 */
486 PMAP_LOCK_INIT(kernel_pmap);
487
488 OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1);
489
490 kdb_init();
491
492 #ifdef KDB
493 if (boothowto & RB_KDB)
494 kdb_enter_why(KDB_WHY_BOOTFLAGS,
495 "Boot flags requested debugger");
496 #endif
497 }
498
499 void
500 set_openfirm_callback(ofw_vec_t *vec)
501 {
502
503 ofw_tba = rdpr(tba);
504 ofw_vec = (u_long)vec;
505 }
506
507 void
508 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
509 {
510 struct trapframe *tf;
511 struct sigframe *sfp;
512 struct sigacts *psp;
513 struct sigframe sf;
514 struct thread *td;
515 struct frame *fp;
516 struct proc *p;
517 u_long sp;
518 int oonstack;
519 int sig;
520
521 oonstack = 0;
522 td = curthread;
523 p = td->td_proc;
524 PROC_LOCK_ASSERT(p, MA_OWNED);
525 sig = ksi->ksi_signo;
526 psp = p->p_sigacts;
527 mtx_assert(&psp->ps_mtx, MA_OWNED);
528 tf = td->td_frame;
529 sp = tf->tf_sp + SPOFF;
530 oonstack = sigonstack(sp);
531
532 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
533 catcher, sig);
534
535 /* Make sure we have a signal trampoline to return to. */
536 if (p->p_md.md_sigtramp == NULL) {
537 /*
538 * No signal trampoline... kill the process.
539 */
540 CTR0(KTR_SIG, "sendsig: no sigtramp");
541 printf("sendsig: %s is too old, rebuild it\n", p->p_comm);
542 sigexit(td, sig);
543 /* NOTREACHED */
544 }
545
546 /* Save user context. */
547 bzero(&sf, sizeof(sf));
548 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
549 sf.sf_uc.uc_sigmask = *mask;
550 sf.sf_uc.uc_stack = td->td_sigstk;
551 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
552 ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
553
554 /* Allocate and validate space for the signal handler context. */
555 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
556 SIGISMEMBER(psp->ps_sigonstack, sig)) {
557 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
558 td->td_sigstk.ss_size - sizeof(struct sigframe));
559 } else
560 sfp = (struct sigframe *)sp - 1;
561 mtx_unlock(&psp->ps_mtx);
562 PROC_UNLOCK(p);
563
564 fp = (struct frame *)sfp - 1;
565
566 /* Translate the signal if appropriate. */
567 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
568 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
569
570 /* Build the argument list for the signal handler. */
571 tf->tf_out[0] = sig;
572 tf->tf_out[2] = (register_t)&sfp->sf_uc;
573 tf->tf_out[4] = (register_t)catcher;
574 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
575 /* Signal handler installed with SA_SIGINFO. */
576 tf->tf_out[1] = (register_t)&sfp->sf_si;
577
578 /* Fill in POSIX parts. */
579 sf.sf_si = ksi->ksi_info;
580 sf.sf_si.si_signo = sig; /* maybe a translated signal */
581 } else {
582 /* Old FreeBSD-style arguments. */
583 tf->tf_out[1] = ksi->ksi_code;
584 tf->tf_out[3] = (register_t)ksi->ksi_addr;
585 }
586
587 /* Copy the sigframe out to the user's stack. */
588 if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
589 suword(&fp->fr_in[6], tf->tf_out[6]) != 0) {
590 /*
591 * Something is wrong with the stack pointer.
592 * ...Kill the process.
593 */
594 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
595 PROC_LOCK(p);
596 sigexit(td, SIGILL);
597 /* NOTREACHED */
598 }
599
600 tf->tf_tpc = (u_long)p->p_md.md_sigtramp;
601 tf->tf_tnpc = tf->tf_tpc + 4;
602 tf->tf_sp = (u_long)fp - SPOFF;
603
604 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc,
605 tf->tf_sp);
606
607 PROC_LOCK(p);
608 mtx_lock(&psp->ps_mtx);
609 }
610
611 #ifndef _SYS_SYSPROTO_H_
612 struct sigreturn_args {
613 ucontext_t *ucp;
614 };
615 #endif
616
617 /*
618 * MPSAFE
619 */
620 int
621 sigreturn(struct thread *td, struct sigreturn_args *uap)
622 {
623 struct proc *p;
624 mcontext_t *mc;
625 ucontext_t uc;
626 int error;
627
628 p = td->td_proc;
629 if (rwindow_save(td)) {
630 PROC_LOCK(p);
631 sigexit(td, SIGILL);
632 }
633
634 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
635 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
636 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
637 return (EFAULT);
638 }
639
640 mc = &uc.uc_mcontext;
641 error = set_mcontext(td, mc);
642 if (error != 0)
643 return (error);
644
645 PROC_LOCK(p);
646 td->td_sigmask = uc.uc_sigmask;
647 SIG_CANTMASK(td->td_sigmask);
648 signotify(td);
649 PROC_UNLOCK(p);
650
651 CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx",
652 td, mc->mc_tpc, mc->mc_sp, mc->mc_tstate);
653 return (EJUSTRETURN);
654 }
655
656 #ifdef COMPAT_FREEBSD4
657 int
658 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
659 {
660
661 return sigreturn(td, (struct sigreturn_args *)uap);
662 }
663 #endif
664
665 /*
666 * Construct a PCB from a trapframe. This is called from kdb_trap() where
667 * we want to start a backtrace from the function that caused us to enter
668 * the debugger. We have the context in the trapframe, but base the trace
669 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
670 * enough for a backtrace.
671 */
672 void
673 makectx(struct trapframe *tf, struct pcb *pcb)
674 {
675
676 pcb->pcb_pc = tf->tf_tpc;
677 pcb->pcb_sp = tf->tf_sp;
678 }
679
680 int
681 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
682 {
683 struct trapframe *tf;
684 struct pcb *pcb;
685
686 tf = td->td_frame;
687 pcb = td->td_pcb;
688 bcopy(tf, mc, sizeof(*tf));
689 if (flags & GET_MC_CLEAR_RET) {
690 mc->mc_out[0] = 0;
691 mc->mc_out[1] = 0;
692 }
693 mc->mc_flags = _MC_VERSION;
694 critical_enter();
695 if ((tf->tf_fprs & FPRS_FEF) != 0) {
696 savefpctx(pcb->pcb_ufp);
697 tf->tf_fprs &= ~FPRS_FEF;
698 pcb->pcb_flags |= PCB_FEF;
699 }
700 if ((pcb->pcb_flags & PCB_FEF) != 0) {
701 bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp));
702 mc->mc_fprs |= FPRS_FEF;
703 }
704 critical_exit();
705 return (0);
706 }
707
708 int
709 set_mcontext(struct thread *td, const mcontext_t *mc)
710 {
711 struct trapframe *tf;
712 struct pcb *pcb;
713 uint64_t wstate;
714
715 if (!TSTATE_SECURE(mc->mc_tstate) ||
716 (mc->mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION)
717 return (EINVAL);
718 tf = td->td_frame;
719 pcb = td->td_pcb;
720 /* Make sure the windows are spilled first. */
721 flushw();
722 wstate = tf->tf_wstate;
723 bcopy(mc, tf, sizeof(*tf));
724 tf->tf_wstate = wstate;
725 if ((mc->mc_fprs & FPRS_FEF) != 0) {
726 tf->tf_fprs = 0;
727 bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
728 pcb->pcb_flags |= PCB_FEF;
729 }
730 return (0);
731 }
732
733 /*
734 * Exit the kernel and execute a firmware call that will not return, as
735 * specified by the arguments.
736 */
737 void
738 cpu_shutdown(void *args)
739 {
740
741 #ifdef SMP
742 cpu_mp_shutdown();
743 #endif
744 openfirmware_exit(args);
745 }
746
747 /* Get current clock frequency for the given CPU ID. */
748 int
749 cpu_est_clockrate(int cpu_id, uint64_t *rate)
750 {
751 struct pcpu *pc;
752
753 pc = pcpu_find(cpu_id);
754 if (pc == NULL || rate == NULL)
755 return (EINVAL);
756 *rate = pc->pc_clock;
757 return (0);
758 }
759
760 /*
761 * Duplicate OF_exit() with a different firmware call function that restores
762 * the trap table, otherwise a RED state exception is triggered in at least
763 * some firmware versions.
764 */
765 void
766 cpu_halt(void)
767 {
768 static struct {
769 cell_t name;
770 cell_t nargs;
771 cell_t nreturns;
772 } args = {
773 (cell_t)"exit",
774 0,
775 0
776 };
777
778 cpu_shutdown(&args);
779 }
780
781 void
782 sparc64_shutdown_final(void *dummy, int howto)
783 {
784 static struct {
785 cell_t name;
786 cell_t nargs;
787 cell_t nreturns;
788 } args = {
789 (cell_t)"SUNW,power-off",
790 0,
791 0
792 };
793
794 /* Turn the power off? */
795 if ((howto & RB_POWEROFF) != 0)
796 cpu_shutdown(&args);
797 /* In case of halt, return to the firmware. */
798 if ((howto & RB_HALT) != 0)
799 cpu_halt();
800 }
801
802 void
803 cpu_idle(void)
804 {
805
806 /* Insert code to halt (until next interrupt) for the idle loop. */
807 }
808
809 int
810 ptrace_set_pc(struct thread *td, u_long addr)
811 {
812
813 td->td_frame->tf_tpc = addr;
814 td->td_frame->tf_tnpc = addr + 4;
815 return (0);
816 }
817
818 int
819 ptrace_single_step(struct thread *td)
820 {
821
822 /* TODO; */
823 return (0);
824 }
825
826 int
827 ptrace_clear_single_step(struct thread *td)
828 {
829
830 /* TODO; */
831 return (0);
832 }
833
834 void
835 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
836 {
837 struct trapframe *tf;
838 struct pcb *pcb;
839 struct proc *p;
840 u_long sp;
841
842 /* XXX no cpu_exec */
843 p = td->td_proc;
844 p->p_md.md_sigtramp = NULL;
845 if (p->p_md.md_utrap != NULL) {
846 utrap_free(p->p_md.md_utrap);
847 p->p_md.md_utrap = NULL;
848 }
849
850 pcb = td->td_pcb;
851 tf = td->td_frame;
852 sp = rounddown(stack, 16);
853 bzero(pcb, sizeof(*pcb));
854 bzero(tf, sizeof(*tf));
855 tf->tf_out[0] = stack;
856 tf->tf_out[3] = p->p_sysent->sv_psstrings;
857 tf->tf_out[6] = sp - SPOFF - sizeof(struct frame);
858 tf->tf_tnpc = entry + 4;
859 tf->tf_tpc = entry;
860 tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO;
861
862 td->td_retval[0] = tf->tf_out[0];
863 td->td_retval[1] = tf->tf_out[1];
864 }
865
866 int
867 fill_regs(struct thread *td, struct reg *regs)
868 {
869
870 bcopy(td->td_frame, regs, sizeof(*regs));
871 return (0);
872 }
873
874 int
875 set_regs(struct thread *td, struct reg *regs)
876 {
877 struct trapframe *tf;
878
879 if (!TSTATE_SECURE(regs->r_tstate))
880 return (EINVAL);
881 tf = td->td_frame;
882 regs->r_wstate = tf->tf_wstate;
883 bcopy(regs, tf, sizeof(*regs));
884 return (0);
885 }
886
887 int
888 fill_dbregs(struct thread *td, struct dbreg *dbregs)
889 {
890
891 return (ENOSYS);
892 }
893
894 int
895 set_dbregs(struct thread *td, struct dbreg *dbregs)
896 {
897
898 return (ENOSYS);
899 }
900
901 int
902 fill_fpregs(struct thread *td, struct fpreg *fpregs)
903 {
904 struct trapframe *tf;
905 struct pcb *pcb;
906
907 pcb = td->td_pcb;
908 tf = td->td_frame;
909 bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs));
910 fpregs->fr_fsr = tf->tf_fsr;
911 fpregs->fr_gsr = tf->tf_gsr;
912 return (0);
913 }
914
915 int
916 set_fpregs(struct thread *td, struct fpreg *fpregs)
917 {
918 struct trapframe *tf;
919 struct pcb *pcb;
920
921 pcb = td->td_pcb;
922 tf = td->td_frame;
923 tf->tf_fprs &= ~FPRS_FEF;
924 bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
925 tf->tf_fsr = fpregs->fr_fsr;
926 tf->tf_gsr = fpregs->fr_gsr;
927 return (0);
928 }
929
930 struct md_utrap *
931 utrap_alloc(void)
932 {
933 struct md_utrap *ut;
934
935 ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO);
936 ut->ut_refcnt = 1;
937 return (ut);
938 }
939
940 void
941 utrap_free(struct md_utrap *ut)
942 {
943 int refcnt;
944
945 if (ut == NULL)
946 return;
947 mtx_pool_lock(mtxpool_sleep, ut);
948 ut->ut_refcnt--;
949 refcnt = ut->ut_refcnt;
950 mtx_pool_unlock(mtxpool_sleep, ut);
951 if (refcnt == 0)
952 free(ut, M_SUBPROC);
953 }
954
955 struct md_utrap *
956 utrap_hold(struct md_utrap *ut)
957 {
958
959 if (ut == NULL)
960 return (NULL);
961 mtx_pool_lock(mtxpool_sleep, ut);
962 ut->ut_refcnt++;
963 mtx_pool_unlock(mtxpool_sleep, ut);
964 return (ut);
965 }
Cache object: ed9a3d4c4eb780f7812ada5c9d9893ec
|