1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
35 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/8.0/sys/sparc64/sparc64/machdep.c 195149 2009-06-28 22:42:51Z marius $");
40
41 #include "opt_compat.h"
42 #include "opt_ddb.h"
43 #include "opt_kstack_pages.h"
44 #include "opt_msgbuf.h"
45
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/proc.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/bus.h>
53 #include <sys/cpu.h>
54 #include <sys/cons.h>
55 #include <sys/eventhandler.h>
56 #include <sys/exec.h>
57 #include <sys/imgact.h>
58 #include <sys/interrupt.h>
59 #include <sys/kdb.h>
60 #include <sys/kernel.h>
61 #include <sys/ktr.h>
62 #include <sys/linker.h>
63 #include <sys/lock.h>
64 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/ptrace.h>
68 #include <sys/reboot.h>
69 #include <sys/signalvar.h>
70 #include <sys/smp.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timetc.h>
74 #include <sys/ucontext.h>
75
76 #include <dev/ofw/openfirm.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_param.h>
86
87 #include <ddb/ddb.h>
88
89 #include <machine/bus.h>
90 #include <machine/cache.h>
91 #include <machine/clock.h>
92 #include <machine/cpu.h>
93 #include <machine/fp.h>
94 #include <machine/fsr.h>
95 #include <machine/intr_machdep.h>
96 #include <machine/md_var.h>
97 #include <machine/metadata.h>
98 #include <machine/ofw_machdep.h>
99 #include <machine/ofw_mem.h>
100 #include <machine/pcb.h>
101 #include <machine/pmap.h>
102 #include <machine/pstate.h>
103 #include <machine/reg.h>
104 #include <machine/sigframe.h>
105 #include <machine/smp.h>
106 #include <machine/tick.h>
107 #include <machine/tlb.h>
108 #include <machine/tstate.h>
109 #include <machine/upa.h>
110 #include <machine/ver.h>
111
112 typedef int ofw_vec_t(void *);
113
114 #ifdef DDB
115 extern vm_offset_t ksym_start, ksym_end;
116 #endif
117
118 int dtlb_slots;
119 int itlb_slots;
120 struct tlb_entry *kernel_tlbs;
121 int kernel_tlb_slots;
122
123 int cold = 1;
124 long Maxmem;
125 long realmem;
126
127 void *dpcpu0;
128 char pcpu0[PCPU_PAGES * PAGE_SIZE];
129 struct trapframe frame0;
130
131 vm_offset_t kstack0;
132 vm_paddr_t kstack0_phys;
133
134 struct kva_md_info kmi;
135
136 u_long ofw_vec;
137 u_long ofw_tba;
138
139 char sparc64_model[32];
140
141 static int cpu_use_vis = 1;
142
143 cpu_block_copy_t *cpu_block_copy;
144 cpu_block_zero_t *cpu_block_zero;
145
146 void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
147 ofw_vec_t *vec);
148 void sparc64_shutdown_final(void *dummy, int howto);
149
150 static void cpu_startup(void *);
151 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
152
153 CTASSERT((1 << INT_SHIFT) == sizeof(int));
154 CTASSERT((1 << PTR_SHIFT) == sizeof(char *));
155
156 CTASSERT(sizeof(struct reg) == 256);
157 CTASSERT(sizeof(struct fpreg) == 272);
158 CTASSERT(sizeof(struct __mcontext) == 512);
159
160 CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0);
161 CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0);
162 CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0);
163 CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8));
164
165 CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2));
166
167 static void
168 cpu_startup(void *arg)
169 {
170 vm_paddr_t physsz;
171 int i;
172
173 physsz = 0;
174 for (i = 0; i < sparc64_nmemreg; i++)
175 physsz += sparc64_memreg[i].mr_size;
176 printf("real memory = %lu (%lu MB)\n", physsz,
177 physsz / (1024 * 1024));
178 realmem = (long)physsz / PAGE_SIZE;
179
180 vm_ksubmap_init(&kmi);
181
182 bufinit();
183 vm_pager_bufferinit();
184
185 EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
186 SHUTDOWN_PRI_LAST);
187
188 printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
189 cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
190
191 if (bootverbose)
192 printf("machine: %s\n", sparc64_model);
193
194 cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu);
195 }
196
197 void
198 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
199 {
200 struct intr_request *ir;
201 int i;
202
203 pcpu->pc_irtail = &pcpu->pc_irhead;
204 for (i = 0; i < IR_FREE; i++) {
205 ir = &pcpu->pc_irpool[i];
206 ir->ir_next = pcpu->pc_irfree;
207 pcpu->pc_irfree = ir;
208 }
209 }
210
211 void
212 spinlock_enter(void)
213 {
214 struct thread *td;
215 register_t pil;
216
217 td = curthread;
218 if (td->td_md.md_spinlock_count == 0) {
219 pil = rdpr(pil);
220 wrpr(pil, 0, PIL_TICK);
221 td->td_md.md_saved_pil = pil;
222 }
223 td->td_md.md_spinlock_count++;
224 critical_enter();
225 }
226
227 void
228 spinlock_exit(void)
229 {
230 struct thread *td;
231
232 td = curthread;
233 critical_exit();
234 td->td_md.md_spinlock_count--;
235 if (td->td_md.md_spinlock_count == 0)
236 wrpr(pil, td->td_md.md_saved_pil, 0);
237 }
238
239 void
240 sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
241 {
242 char type[8];
243 char *env;
244 struct pcpu *pc;
245 vm_offset_t end;
246 vm_offset_t va;
247 caddr_t kmdp;
248 phandle_t child;
249 phandle_t root;
250 uint32_t portid;
251
252 end = 0;
253 kmdp = NULL;
254
255 /*
256 * Find out what kind of CPU we have first, for anything that changes
257 * behaviour.
258 */
259 cpu_impl = VER_IMPL(rdpr(ver));
260
261 /*
262 * Do CPU-specific Initialization.
263 */
264 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
265 cheetah_init();
266
267 /*
268 * Clear (S)TICK timer (including NPT).
269 */
270 tick_clear();
271
272 /*
273 * UltraSparc II[e,i] based systems come up with the tick interrupt
274 * enabled and a handler that resets the tick counter, causing DELAY()
275 * to not work properly when used early in boot.
276 * UltraSPARC III based systems come up with the system tick interrupt
277 * enabled, causing an interrupt storm on startup since they are not
278 * handled.
279 */
280 tick_stop();
281
282 /*
283 * Set up Open Firmware entry points.
284 */
285 ofw_tba = rdpr(tba);
286 ofw_vec = (u_long)vec;
287
288 /*
289 * Parse metadata if present and fetch parameters. Must be before the
290 * console is inited so cninit gets the right value of boothowto.
291 */
292 if (mdp != NULL) {
293 preload_metadata = mdp;
294 kmdp = preload_search_by_type("elf kernel");
295 if (kmdp != NULL) {
296 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
297 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
298 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
299 kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS,
300 int);
301 kernel_tlbs = (void *)preload_search_info(kmdp,
302 MODINFO_METADATA | MODINFOMD_DTLB);
303 }
304 }
305
306 init_param1();
307
308 /*
309 * Initialize Open Firmware (needed for console).
310 */
311 OF_install(OFW_STD_DIRECT, 0);
312 OF_init(ofw_entry);
313
314 /*
315 * Prime our per-CPU data page for use. Note, we are using it for
316 * our stack, so don't pass the real size (PAGE_SIZE) to pcpu_init
317 * or it'll zero it out from under us.
318 */
319 pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1;
320 pcpu_init(pc, 0, sizeof(struct pcpu));
321 pc->pc_addr = (vm_offset_t)pcpu0;
322 pc->pc_mid = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
323 pc->pc_tlb_ctx = TLB_CTX_USER_MIN;
324 pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN;
325 pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
326
327 /*
328 * Determine the OFW node and frequency of the BSP (and ensure the
329 * BSP is in the device tree in the first place).
330 */
331 pc->pc_node = 0;
332 root = OF_peer(0);
333 for (child = OF_child(root); child != 0; child = OF_peer(child)) {
334 if (OF_getprop(child, "device_type", type, sizeof(type)) <= 0)
335 continue;
336 if (strcmp(type, "cpu") != 0)
337 continue;
338 if (OF_getprop(child, cpu_impl < CPU_IMPL_ULTRASPARCIII ?
339 "upa-portid" : "portid", &portid, sizeof(portid)) <= 0)
340 continue;
341 if (portid == pc->pc_mid) {
342 pc->pc_node = child;
343 break;
344 }
345 }
346 if (pc->pc_node == 0)
347 OF_exit();
348 if (OF_getprop(child, "clock-frequency", &pc->pc_clock,
349 sizeof(pc->pc_clock)) <= 0)
350 OF_exit();
351
352 /*
353 * Provide a DELAY() that works before PCPU_REG is set. We can't
354 * set PCPU_REG without also taking over the trap table or the
355 * firmware will overwrite it. Unfortunately, it's way to early
356 * to also take over the trap table at this point.
357 */
358 clock_boot = pc->pc_clock;
359 delay_func = delay_boot;
360
361 /*
362 * Initialize the console before printing anything.
363 * NB: the low-level console drivers require a working DELAY() at
364 * this point.
365 */
366 cninit();
367
368 /*
369 * Panic if there is no metadata. Most likely the kernel was booted
370 * directly, instead of through loader(8).
371 */
372 if (mdp == NULL || kmdp == NULL || end == 0 ||
373 kernel_tlb_slots == 0 || kernel_tlbs == NULL) {
374 printf("sparc64_init: missing loader metadata.\n"
375 "This probably means you are not using loader(8).\n");
376 panic("sparc64_init");
377 }
378
379 /*
380 * Work around the broken loader behavior of not demapping no
381 * longer used kernel TLB slots when unloading the kernel or
382 * modules.
383 */
384 for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
385 va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
386 printf("demapping unused kernel TLB slot (va %#lx - %#lx)\n",
387 va, va + PAGE_SIZE_4M - 1);
388 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
389 ASI_DMMU_DEMAP, 0);
390 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
391 ASI_IMMU_DEMAP, 0);
392 flush(KERNBASE);
393 kernel_tlb_slots--;
394 }
395
396 /*
397 * Determine the TLB slot maxima, which are expected to be
398 * equal across all CPUs.
399 * NB: for Cheetah-class CPUs, these properties only refer
400 * to the t16s.
401 */
402 if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
403 sizeof(dtlb_slots)) == -1)
404 panic("sparc64_init: cannot determine number of dTLB slots");
405 if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
406 sizeof(itlb_slots)) == -1)
407 panic("sparc64_init: cannot determine number of iTLB slots");
408
409 cache_init(pc);
410 cache_enable();
411 uma_set_align(pc->pc_cache.dc_linesize - 1);
412
413 cpu_block_copy = bcopy;
414 cpu_block_zero = bzero;
415 getenv_int("machdep.use_vis", &cpu_use_vis);
416 if (cpu_use_vis) {
417 switch (cpu_impl) {
418 case CPU_IMPL_SPARC64:
419 case CPU_IMPL_ULTRASPARCI:
420 case CPU_IMPL_ULTRASPARCII:
421 case CPU_IMPL_ULTRASPARCIIi:
422 case CPU_IMPL_ULTRASPARCIIe:
423 case CPU_IMPL_ULTRASPARCIII: /* NB: we've disabled P$. */
424 case CPU_IMPL_ULTRASPARCIIIp:
425 case CPU_IMPL_ULTRASPARCIIIi:
426 case CPU_IMPL_ULTRASPARCIV:
427 case CPU_IMPL_ULTRASPARCIVp:
428 case CPU_IMPL_ULTRASPARCIIIip:
429 cpu_block_copy = spitfire_block_copy;
430 cpu_block_zero = spitfire_block_zero;
431 break;
432 }
433 }
434
435 #ifdef SMP
436 mp_init();
437 #endif
438
439 /*
440 * Initialize virtual memory and calculate physmem.
441 */
442 pmap_bootstrap();
443
444 /*
445 * Initialize tunables.
446 */
447 init_param2(physmem);
448 env = getenv("kernelname");
449 if (env != NULL) {
450 strlcpy(kernelname, env, sizeof(kernelname));
451 freeenv(env);
452 }
453
454 /*
455 * Initialize the interrupt tables.
456 */
457 intr_init1();
458
459 /*
460 * Initialize proc0, set kstack0, frame0, curthread and curpcb.
461 */
462 proc_linkup0(&proc0, &thread0);
463 proc0.p_md.md_sigtramp = NULL;
464 proc0.p_md.md_utrap = NULL;
465 thread0.td_kstack = kstack0;
466 thread0.td_pcb = (struct pcb *)
467 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
468 frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
469 thread0.td_frame = &frame0;
470 pc->pc_curthread = &thread0;
471 pc->pc_curpcb = thread0.td_pcb;
472
473 /*
474 * Initialize global registers.
475 */
476 cpu_setregs(pc);
477
478 /*
479 * Take over the trap table via the PROM. Using the PROM for this
480 * is necessary in order to set obp-control-relinquished to true
481 * within the PROM so obtaining /virtual-memory/translations doesn't
482 * trigger a fatal reset error or worse things further down the road.
483 * XXX it should be possible to use this soley instead of writing
484 * %tba in cpu_setregs(). Doing so causes a hang however.
485 */
486 sun4u_set_traptable(tl0_base);
487
488 /*
489 * It's now safe to use the real DELAY().
490 */
491 delay_func = delay_tick;
492
493 /*
494 * Initialize the dynamic per-CPU area for the BSP and the message
495 * buffer (after setting the trap table).
496 */
497 dpcpu_init(dpcpu0, 0);
498 msgbufinit(msgbufp, MSGBUF_SIZE);
499
500 mutex_init();
501 intr_init2();
502
503 /*
504 * Finish pmap initialization now that we're ready for mutexes.
505 */
506 PMAP_LOCK_INIT(kernel_pmap);
507
508 OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1);
509
510 kdb_init();
511
512 #ifdef KDB
513 if (boothowto & RB_KDB)
514 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
515 #endif
516 }
517
518 void
519 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
520 {
521 struct trapframe *tf;
522 struct sigframe *sfp;
523 struct sigacts *psp;
524 struct sigframe sf;
525 struct thread *td;
526 struct frame *fp;
527 struct proc *p;
528 u_long sp;
529 int oonstack;
530 int sig;
531
532 oonstack = 0;
533 td = curthread;
534 p = td->td_proc;
535 PROC_LOCK_ASSERT(p, MA_OWNED);
536 sig = ksi->ksi_signo;
537 psp = p->p_sigacts;
538 mtx_assert(&psp->ps_mtx, MA_OWNED);
539 tf = td->td_frame;
540 sp = tf->tf_sp + SPOFF;
541 oonstack = sigonstack(sp);
542
543 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
544 catcher, sig);
545
546 /* Make sure we have a signal trampoline to return to. */
547 if (p->p_md.md_sigtramp == NULL) {
548 /*
549 * No signal trampoline... kill the process.
550 */
551 CTR0(KTR_SIG, "sendsig: no sigtramp");
552 printf("sendsig: %s is too old, rebuild it\n", p->p_comm);
553 sigexit(td, sig);
554 /* NOTREACHED */
555 }
556
557 /* Save user context. */
558 bzero(&sf, sizeof(sf));
559 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
560 sf.sf_uc.uc_sigmask = *mask;
561 sf.sf_uc.uc_stack = td->td_sigstk;
562 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
563 ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
564
565 /* Allocate and validate space for the signal handler context. */
566 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
567 SIGISMEMBER(psp->ps_sigonstack, sig)) {
568 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
569 td->td_sigstk.ss_size - sizeof(struct sigframe));
570 } else
571 sfp = (struct sigframe *)sp - 1;
572 mtx_unlock(&psp->ps_mtx);
573 PROC_UNLOCK(p);
574
575 fp = (struct frame *)sfp - 1;
576
577 /* Translate the signal if appropriate. */
578 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
579 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
580
581 /* Build the argument list for the signal handler. */
582 tf->tf_out[0] = sig;
583 tf->tf_out[2] = (register_t)&sfp->sf_uc;
584 tf->tf_out[4] = (register_t)catcher;
585 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
586 /* Signal handler installed with SA_SIGINFO. */
587 tf->tf_out[1] = (register_t)&sfp->sf_si;
588
589 /* Fill in POSIX parts. */
590 sf.sf_si = ksi->ksi_info;
591 sf.sf_si.si_signo = sig; /* maybe a translated signal */
592 } else {
593 /* Old FreeBSD-style arguments. */
594 tf->tf_out[1] = ksi->ksi_code;
595 tf->tf_out[3] = (register_t)ksi->ksi_addr;
596 }
597
598 /* Copy the sigframe out to the user's stack. */
599 if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
600 suword(&fp->fr_in[6], tf->tf_out[6]) != 0) {
601 /*
602 * Something is wrong with the stack pointer.
603 * ...Kill the process.
604 */
605 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
606 PROC_LOCK(p);
607 sigexit(td, SIGILL);
608 /* NOTREACHED */
609 }
610
611 tf->tf_tpc = (u_long)p->p_md.md_sigtramp;
612 tf->tf_tnpc = tf->tf_tpc + 4;
613 tf->tf_sp = (u_long)fp - SPOFF;
614
615 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc,
616 tf->tf_sp);
617
618 PROC_LOCK(p);
619 mtx_lock(&psp->ps_mtx);
620 }
621
622 #ifndef _SYS_SYSPROTO_H_
623 struct sigreturn_args {
624 ucontext_t *ucp;
625 };
626 #endif
627
628 /*
629 * MPSAFE
630 */
631 int
632 sigreturn(struct thread *td, struct sigreturn_args *uap)
633 {
634 struct proc *p;
635 mcontext_t *mc;
636 ucontext_t uc;
637 int error;
638
639 p = td->td_proc;
640 if (rwindow_save(td)) {
641 PROC_LOCK(p);
642 sigexit(td, SIGILL);
643 }
644
645 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
646 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
647 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
648 return (EFAULT);
649 }
650
651 mc = &uc.uc_mcontext;
652 error = set_mcontext(td, mc);
653 if (error != 0)
654 return (error);
655
656 PROC_LOCK(p);
657 td->td_sigmask = uc.uc_sigmask;
658 SIG_CANTMASK(td->td_sigmask);
659 signotify(td);
660 PROC_UNLOCK(p);
661
662 CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx",
663 td, mc->mc_tpc, mc->mc_sp, mc->mc_tstate);
664 return (EJUSTRETURN);
665 }
666
667 #ifdef COMPAT_FREEBSD4
668 int
669 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
670 {
671
672 return sigreturn(td, (struct sigreturn_args *)uap);
673 }
674 #endif
675
676 /*
677 * Construct a PCB from a trapframe. This is called from kdb_trap() where
678 * we want to start a backtrace from the function that caused us to enter
679 * the debugger. We have the context in the trapframe, but base the trace
680 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
681 * enough for a backtrace.
682 */
683 void
684 makectx(struct trapframe *tf, struct pcb *pcb)
685 {
686
687 pcb->pcb_pc = tf->tf_tpc;
688 pcb->pcb_sp = tf->tf_sp;
689 }
690
691 int
692 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
693 {
694 struct trapframe *tf;
695 struct pcb *pcb;
696
697 tf = td->td_frame;
698 pcb = td->td_pcb;
699 bcopy(tf, mc, sizeof(*tf));
700 if (flags & GET_MC_CLEAR_RET) {
701 mc->mc_out[0] = 0;
702 mc->mc_out[1] = 0;
703 }
704 mc->mc_flags = _MC_VERSION;
705 critical_enter();
706 if ((tf->tf_fprs & FPRS_FEF) != 0) {
707 savefpctx(pcb->pcb_ufp);
708 tf->tf_fprs &= ~FPRS_FEF;
709 pcb->pcb_flags |= PCB_FEF;
710 }
711 if ((pcb->pcb_flags & PCB_FEF) != 0) {
712 bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp));
713 mc->mc_fprs |= FPRS_FEF;
714 }
715 critical_exit();
716 return (0);
717 }
718
719 int
720 set_mcontext(struct thread *td, const mcontext_t *mc)
721 {
722 struct trapframe *tf;
723 struct pcb *pcb;
724 uint64_t wstate;
725
726 if (!TSTATE_SECURE(mc->mc_tstate) ||
727 (mc->mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION)
728 return (EINVAL);
729 tf = td->td_frame;
730 pcb = td->td_pcb;
731 /* Make sure the windows are spilled first. */
732 flushw();
733 wstate = tf->tf_wstate;
734 bcopy(mc, tf, sizeof(*tf));
735 tf->tf_wstate = wstate;
736 if ((mc->mc_fprs & FPRS_FEF) != 0) {
737 tf->tf_fprs = 0;
738 bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
739 pcb->pcb_flags |= PCB_FEF;
740 }
741 return (0);
742 }
743
744 /*
745 * Exit the kernel and execute a firmware call that will not return, as
746 * specified by the arguments.
747 */
748 void
749 cpu_shutdown(void *args)
750 {
751
752 #ifdef SMP
753 cpu_mp_shutdown();
754 #endif
755 ofw_exit(args);
756 }
757
758 /*
759 * Flush the D-cache for non-DMA I/O so that the I-cache can
760 * be made coherent later.
761 */
762 void
763 cpu_flush_dcache(void *ptr, size_t len)
764 {
765
766 /* TBD */
767 }
768
769 /* Get current clock frequency for the given CPU ID. */
770 int
771 cpu_est_clockrate(int cpu_id, uint64_t *rate)
772 {
773 struct pcpu *pc;
774
775 pc = pcpu_find(cpu_id);
776 if (pc == NULL || rate == NULL)
777 return (EINVAL);
778 *rate = pc->pc_clock;
779 return (0);
780 }
781
782 /*
783 * Duplicate OF_exit() with a different firmware call function that restores
784 * the trap table, otherwise a RED state exception is triggered in at least
785 * some firmware versions.
786 */
787 void
788 cpu_halt(void)
789 {
790 static struct {
791 cell_t name;
792 cell_t nargs;
793 cell_t nreturns;
794 } args = {
795 (cell_t)"exit",
796 0,
797 0
798 };
799
800 cpu_shutdown(&args);
801 }
802
803 void
804 sparc64_shutdown_final(void *dummy, int howto)
805 {
806 static struct {
807 cell_t name;
808 cell_t nargs;
809 cell_t nreturns;
810 } args = {
811 (cell_t)"SUNW,power-off",
812 0,
813 0
814 };
815
816 /* Turn the power off? */
817 if ((howto & RB_POWEROFF) != 0)
818 cpu_shutdown(&args);
819 /* In case of halt, return to the firmware. */
820 if ((howto & RB_HALT) != 0)
821 cpu_halt();
822 }
823
824 void
825 cpu_idle(int busy)
826 {
827
828 /* Insert code to halt (until next interrupt) for the idle loop. */
829 }
830
831 int
832 cpu_idle_wakeup(int cpu)
833 {
834
835 return (0);
836 }
837
838 int
839 ptrace_set_pc(struct thread *td, u_long addr)
840 {
841
842 td->td_frame->tf_tpc = addr;
843 td->td_frame->tf_tnpc = addr + 4;
844 return (0);
845 }
846
847 int
848 ptrace_single_step(struct thread *td)
849 {
850
851 /* TODO; */
852 return (0);
853 }
854
855 int
856 ptrace_clear_single_step(struct thread *td)
857 {
858
859 /* TODO; */
860 return (0);
861 }
862
863 void
864 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
865 {
866 struct trapframe *tf;
867 struct pcb *pcb;
868 struct proc *p;
869 u_long sp;
870
871 /* XXX no cpu_exec */
872 p = td->td_proc;
873 p->p_md.md_sigtramp = NULL;
874 if (p->p_md.md_utrap != NULL) {
875 utrap_free(p->p_md.md_utrap);
876 p->p_md.md_utrap = NULL;
877 }
878
879 pcb = td->td_pcb;
880 tf = td->td_frame;
881 sp = rounddown(stack, 16);
882 bzero(pcb, sizeof(*pcb));
883 bzero(tf, sizeof(*tf));
884 tf->tf_out[0] = stack;
885 tf->tf_out[3] = p->p_sysent->sv_psstrings;
886 tf->tf_out[6] = sp - SPOFF - sizeof(struct frame);
887 tf->tf_tnpc = entry + 4;
888 tf->tf_tpc = entry;
889 tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO;
890
891 td->td_retval[0] = tf->tf_out[0];
892 td->td_retval[1] = tf->tf_out[1];
893 }
894
895 int
896 fill_regs(struct thread *td, struct reg *regs)
897 {
898
899 bcopy(td->td_frame, regs, sizeof(*regs));
900 return (0);
901 }
902
903 int
904 set_regs(struct thread *td, struct reg *regs)
905 {
906 struct trapframe *tf;
907
908 if (!TSTATE_SECURE(regs->r_tstate))
909 return (EINVAL);
910 tf = td->td_frame;
911 regs->r_wstate = tf->tf_wstate;
912 bcopy(regs, tf, sizeof(*regs));
913 return (0);
914 }
915
916 int
917 fill_dbregs(struct thread *td, struct dbreg *dbregs)
918 {
919
920 return (ENOSYS);
921 }
922
923 int
924 set_dbregs(struct thread *td, struct dbreg *dbregs)
925 {
926
927 return (ENOSYS);
928 }
929
930 int
931 fill_fpregs(struct thread *td, struct fpreg *fpregs)
932 {
933 struct trapframe *tf;
934 struct pcb *pcb;
935
936 pcb = td->td_pcb;
937 tf = td->td_frame;
938 bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs));
939 fpregs->fr_fsr = tf->tf_fsr;
940 fpregs->fr_gsr = tf->tf_gsr;
941 return (0);
942 }
943
944 int
945 set_fpregs(struct thread *td, struct fpreg *fpregs)
946 {
947 struct trapframe *tf;
948 struct pcb *pcb;
949
950 pcb = td->td_pcb;
951 tf = td->td_frame;
952 tf->tf_fprs &= ~FPRS_FEF;
953 bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
954 tf->tf_fsr = fpregs->fr_fsr;
955 tf->tf_gsr = fpregs->fr_gsr;
956 return (0);
957 }
958
959 struct md_utrap *
960 utrap_alloc(void)
961 {
962 struct md_utrap *ut;
963
964 ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO);
965 ut->ut_refcnt = 1;
966 return (ut);
967 }
968
969 void
970 utrap_free(struct md_utrap *ut)
971 {
972 int refcnt;
973
974 if (ut == NULL)
975 return;
976 mtx_pool_lock(mtxpool_sleep, ut);
977 ut->ut_refcnt--;
978 refcnt = ut->ut_refcnt;
979 mtx_pool_unlock(mtxpool_sleep, ut);
980 if (refcnt == 0)
981 free(ut, M_SUBPROC);
982 }
983
984 struct md_utrap *
985 utrap_hold(struct md_utrap *ut)
986 {
987
988 if (ut == NULL)
989 return (NULL);
990 mtx_pool_lock(mtxpool_sleep, ut);
991 ut->ut_refcnt++;
992 mtx_pool_unlock(mtxpool_sleep, ut);
993 return (ut);
994 }
Cache object: 798dbb501225f525bbe7899f4187d81a
|