1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
35 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/6.1/sys/sparc64/sparc64/machdep.c 158179 2006-04-30 16:44:43Z cvs2svn $");
40
41 #include "opt_compat.h"
42 #include "opt_ddb.h"
43 #include "opt_kstack_pages.h"
44 #include "opt_msgbuf.h"
45
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/proc.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/bus.h>
53 #include <sys/cpu.h>
54 #include <sys/cons.h>
55 #include <sys/eventhandler.h>
56 #include <sys/exec.h>
57 #include <sys/imgact.h>
58 #include <sys/interrupt.h>
59 #include <sys/kdb.h>
60 #include <sys/kernel.h>
61 #include <sys/ktr.h>
62 #include <sys/linker.h>
63 #include <sys/lock.h>
64 #include <sys/msgbuf.h>
65 #include <sys/mutex.h>
66 #include <sys/pcpu.h>
67 #include <sys/ptrace.h>
68 #include <sys/reboot.h>
69 #include <sys/signalvar.h>
70 #include <sys/smp.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timetc.h>
74 #include <sys/ucontext.h>
75
76 #include <dev/ofw/openfirm.h>
77
78 #include <vm/vm.h>
79 #include <vm/vm_extern.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_object.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_param.h>
86
87 #include <ddb/ddb.h>
88
89 #include <machine/bus.h>
90 #include <machine/cache.h>
91 #include <machine/clock.h>
92 #include <machine/cpu.h>
93 #include <machine/fp.h>
94 #include <machine/fsr.h>
95 #include <machine/intr_machdep.h>
96 #include <machine/md_var.h>
97 #include <machine/metadata.h>
98 #include <machine/ofw_machdep.h>
99 #include <machine/ofw_mem.h>
100 #include <machine/pcb.h>
101 #include <machine/pmap.h>
102 #include <machine/pstate.h>
103 #include <machine/reg.h>
104 #include <machine/sigframe.h>
105 #include <machine/smp.h>
106 #include <machine/tick.h>
107 #include <machine/tlb.h>
108 #include <machine/tstate.h>
109 #include <machine/upa.h>
110 #include <machine/ver.h>
111
112 typedef int ofw_vec_t(void *);
113
114 #ifdef DDB
115 extern vm_offset_t ksym_start, ksym_end;
116 #endif
117
118 struct tlb_entry *kernel_tlbs;
119 int kernel_tlb_slots;
120
121 int cold = 1;
122 long Maxmem;
123 long realmem;
124
125 char pcpu0[PCPU_PAGES * PAGE_SIZE];
126 struct trapframe frame0;
127
128 vm_offset_t kstack0;
129 vm_paddr_t kstack0_phys;
130
131 struct kva_md_info kmi;
132
133 u_long ofw_vec;
134 u_long ofw_tba;
135
136 /*
137 * Note: timer quality for CPU's is set low to try and prevent them from
138 * being chosen as the primary timecounter. The CPU counters are not
139 * synchronized among the CPU's so in MP machines this causes problems
140 * when calculating the time. With this value the CPU's should only be
141 * chosen as the primary timecounter as a last resort.
142 */
143
144 #define UP_TICK_QUALITY 1000
145 #define MP_TICK_QUALITY -100
146 static struct timecounter tick_tc;
147
148 char sparc64_model[32];
149
150 static int cpu_use_vis = 1;
151
152 cpu_block_copy_t *cpu_block_copy;
153 cpu_block_zero_t *cpu_block_zero;
154
155 static timecounter_get_t tick_get_timecount;
156 void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
157 ofw_vec_t *vec);
158 void sparc64_shutdown_final(void *dummy, int howto);
159
160 static void cpu_startup(void *);
161 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
162
163 CTASSERT((1 << INT_SHIFT) == sizeof(int));
164 CTASSERT((1 << PTR_SHIFT) == sizeof(char *));
165
166 CTASSERT(sizeof(struct reg) == 256);
167 CTASSERT(sizeof(struct fpreg) == 272);
168 CTASSERT(sizeof(struct __mcontext) == 512);
169
170 CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0);
171 CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0);
172 CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0);
173 CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8));
174
175 CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2));
176
177 static void
178 cpu_startup(void *arg)
179 {
180 vm_paddr_t physsz;
181 int i;
182
183 tick_tc.tc_get_timecount = tick_get_timecount;
184 tick_tc.tc_poll_pps = NULL;
185 tick_tc.tc_counter_mask = ~0u;
186 tick_tc.tc_frequency = tick_freq;
187 tick_tc.tc_name = "tick";
188 tick_tc.tc_quality = UP_TICK_QUALITY;
189 #ifdef SMP
190 /*
191 * We do not know if each CPU's tick counter is synchronized.
192 */
193 if (cpu_mp_probe())
194 tick_tc.tc_quality = MP_TICK_QUALITY;
195 #endif
196
197 tc_init(&tick_tc);
198
199 physsz = 0;
200 for (i = 0; i < sparc64_nmemreg; i++)
201 physsz += sparc64_memreg[i].mr_size;
202 printf("real memory = %lu (%lu MB)\n", physsz,
203 physsz / (1024 * 1024));
204 realmem = (long)physsz / PAGE_SIZE;
205
206 vm_ksubmap_init(&kmi);
207
208 bufinit();
209 vm_pager_bufferinit();
210
211 EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
212 SHUTDOWN_PRI_LAST);
213
214 printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE,
215 cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
216
217 if (bootverbose)
218 printf("machine: %s\n", sparc64_model);
219
220 cpu_identify(rdpr(ver), tick_freq, PCPU_GET(cpuid));
221 }
222
223 void
224 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
225 {
226 struct intr_request *ir;
227 int i;
228
229 pcpu->pc_irtail = &pcpu->pc_irhead;
230 for (i = 0; i < IR_FREE; i++) {
231 ir = &pcpu->pc_irpool[i];
232 ir->ir_next = pcpu->pc_irfree;
233 pcpu->pc_irfree = ir;
234 }
235 }
236
237 void
238 spinlock_enter(void)
239 {
240 struct thread *td;
241 register_t pil;
242
243 td = curthread;
244 if (td->td_md.md_spinlock_count == 0) {
245 pil = rdpr(pil);
246 wrpr(pil, 0, PIL_TICK);
247 td->td_md.md_saved_pil = pil;
248 }
249 td->td_md.md_spinlock_count++;
250 critical_enter();
251 }
252
253 void
254 spinlock_exit(void)
255 {
256 struct thread *td;
257
258 td = curthread;
259 critical_exit();
260 td->td_md.md_spinlock_count--;
261 if (td->td_md.md_spinlock_count == 0)
262 wrpr(pil, td->td_md.md_saved_pil, 0);
263 }
264
265 unsigned
266 tick_get_timecount(struct timecounter *tc)
267 {
268 return ((unsigned)rd(tick));
269 }
270
271 void
272 sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
273 {
274 phandle_t child;
275 phandle_t root;
276 struct pcpu *pc;
277 vm_offset_t end;
278 caddr_t kmdp;
279 u_int clock;
280 char *env;
281 char type[8];
282
283 end = 0;
284 kmdp = NULL;
285
286 /*
287 * Find out what kind of cpu we have first, for anything that changes
288 * behaviour.
289 */
290 cpu_impl = VER_IMPL(rdpr(ver));
291
292 /*
293 * Initialize Open Firmware (needed for console).
294 */
295 OF_init(vec);
296
297 /*
298 * Parse metadata if present and fetch parameters. Must be before the
299 * console is inited so cninit gets the right value of boothowto.
300 */
301 if (mdp != NULL) {
302 preload_metadata = mdp;
303 kmdp = preload_search_by_type("elf kernel");
304 if (kmdp != NULL) {
305 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
306 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
307 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
308 kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS,
309 int);
310 kernel_tlbs = (void *)preload_search_info(kmdp,
311 MODINFO_METADATA | MODINFOMD_DTLB);
312 }
313 }
314
315 init_param1();
316
317 root = OF_peer(0);
318 for (child = OF_child(root); child != 0; child = OF_peer(child)) {
319 OF_getprop(child, "device_type", type, sizeof(type));
320 if (strcmp(type, "cpu") == 0)
321 break;
322 }
323
324 /*
325 * Initialize the tick counter. Must be before the console is inited
326 * in order to provide the low-level console drivers with a working
327 * DELAY().
328 */
329 OF_getprop(child, "clock-frequency", &clock, sizeof(clock));
330 tick_init(clock);
331
332 /*
333 * Initialize the console before printing anything.
334 */
335 cninit();
336
337 /*
338 * Panic if there is no metadata. Most likely the kernel was booted
339 * directly, instead of through loader(8).
340 */
341 if (mdp == NULL || kmdp == NULL) {
342 printf("sparc64_init: no loader metadata.\n"
343 "This probably means you are not using loader(8).\n");
344 panic("sparc64_init");
345 }
346
347 /*
348 * Sanity check the kernel end, which is important.
349 */
350 if (end == 0) {
351 printf("sparc64_init: warning, kernel end not specified.\n"
352 "Attempting to continue anyway.\n");
353 end = (vm_offset_t)_end;
354 }
355
356 cache_init(child);
357
358 getenv_int("machdep.use_vis", &cpu_use_vis);
359 if (cpu_use_vis) {
360 cpu_block_copy = spitfire_block_copy;
361 cpu_block_zero = spitfire_block_zero;
362 } else {
363 cpu_block_copy = bcopy;
364 cpu_block_zero = bzero;
365 }
366
367 #ifdef SMP
368 mp_tramp = mp_tramp_alloc();
369 #endif
370
371 /*
372 * Initialize virtual memory and calculate physmem.
373 */
374 pmap_bootstrap(end);
375
376 /*
377 * Initialize tunables.
378 */
379 init_param2(physmem);
380 env = getenv("kernelname");
381 if (env != NULL) {
382 strlcpy(kernelname, env, sizeof(kernelname));
383 freeenv(env);
384 }
385
386 /*
387 * Initialize the interrupt tables.
388 */
389 intr_init1();
390
391 /*
392 * Initialize proc0 stuff (p_contested needs to be done early).
393 */
394 proc_linkup(&proc0, &ksegrp0, &thread0);
395 proc0.p_md.md_sigtramp = NULL;
396 proc0.p_md.md_utrap = NULL;
397 thread0.td_kstack = kstack0;
398 thread0.td_pcb = (struct pcb *)
399 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
400 frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
401 thread0.td_frame = &frame0;
402
403 /*
404 * Prime our per-cpu data page for use. Note, we are using it for our
405 * stack, so don't pass the real size (PAGE_SIZE) to pcpu_init or
406 * it'll zero it out from under us.
407 */
408 pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1;
409 pcpu_init(pc, 0, sizeof(struct pcpu));
410 pc->pc_curthread = &thread0;
411 pc->pc_curpcb = thread0.td_pcb;
412 pc->pc_mid = UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG));
413 pc->pc_addr = (vm_offset_t)pcpu0;
414 pc->pc_node = child;
415 pc->pc_tlb_ctx = TLB_CTX_USER_MIN;
416 pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN;
417 pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
418
419 /*
420 * Initialize global registers.
421 */
422 cpu_setregs(pc);
423
424 /*
425 * Initialize the message buffer (after setting trap table).
426 */
427 msgbufinit(msgbufp, MSGBUF_SIZE);
428
429 mutex_init();
430 intr_init2();
431
432 /*
433 * Finish pmap initialization now that we're ready for mutexes.
434 */
435 PMAP_LOCK_INIT(kernel_pmap);
436
437 OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1);
438
439 kdb_init();
440
441 #ifdef KDB
442 if (boothowto & RB_KDB)
443 kdb_enter("Boot flags requested debugger");
444 #endif
445 }
446
447 void
448 set_openfirm_callback(ofw_vec_t *vec)
449 {
450 ofw_tba = rdpr(tba);
451 ofw_vec = (u_long)vec;
452 }
453
454 void
455 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
456 {
457 struct trapframe *tf;
458 struct sigframe *sfp;
459 struct sigacts *psp;
460 struct sigframe sf;
461 struct thread *td;
462 struct frame *fp;
463 struct proc *p;
464 int oonstack;
465 u_long sp;
466
467 oonstack = 0;
468 td = curthread;
469 p = td->td_proc;
470 PROC_LOCK_ASSERT(p, MA_OWNED);
471 psp = p->p_sigacts;
472 mtx_assert(&psp->ps_mtx, MA_OWNED);
473 tf = td->td_frame;
474 sp = tf->tf_sp + SPOFF;
475 oonstack = sigonstack(sp);
476
477 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
478 catcher, sig);
479
480 /* Make sure we have a signal trampoline to return to. */
481 if (p->p_md.md_sigtramp == NULL) {
482 /*
483 * No signal tramoline... kill the process.
484 */
485 CTR0(KTR_SIG, "sendsig: no sigtramp");
486 printf("sendsig: %s is too old, rebuild it\n", p->p_comm);
487 sigexit(td, sig);
488 /* NOTREACHED */
489 }
490
491 /* Save user context. */
492 bzero(&sf, sizeof(sf));
493 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
494 sf.sf_uc.uc_sigmask = *mask;
495 sf.sf_uc.uc_stack = td->td_sigstk;
496 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
497 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
498
499 /* Allocate and validate space for the signal handler context. */
500 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
501 SIGISMEMBER(psp->ps_sigonstack, sig)) {
502 sfp = (struct sigframe *)(td->td_sigstk.ss_sp +
503 td->td_sigstk.ss_size - sizeof(struct sigframe));
504 } else
505 sfp = (struct sigframe *)sp - 1;
506 mtx_unlock(&psp->ps_mtx);
507 PROC_UNLOCK(p);
508
509 fp = (struct frame *)sfp - 1;
510
511 /* Translate the signal if appropriate. */
512 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
513 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
514
515 /* Build the argument list for the signal handler. */
516 tf->tf_out[0] = sig;
517 tf->tf_out[1] = (register_t)&sfp->sf_si;
518 tf->tf_out[2] = (register_t)&sfp->sf_uc;
519 tf->tf_out[4] = (register_t)catcher;
520 /* Fill siginfo structure. */
521 sf.sf_si.si_signo = sig;
522 sf.sf_si.si_code = code;
523 sf.sf_si.si_addr = (void *)tf->tf_sfar;
524
525 /* Copy the sigframe out to the user's stack. */
526 if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
527 suword(&fp->fr_in[6], tf->tf_out[6]) != 0) {
528 /*
529 * Something is wrong with the stack pointer.
530 * ...Kill the process.
531 */
532 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
533 PROC_LOCK(p);
534 sigexit(td, SIGILL);
535 /* NOTREACHED */
536 }
537
538 tf->tf_tpc = (u_long)p->p_md.md_sigtramp;
539 tf->tf_tnpc = tf->tf_tpc + 4;
540 tf->tf_sp = (u_long)fp - SPOFF;
541
542 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc,
543 tf->tf_sp);
544
545 PROC_LOCK(p);
546 mtx_lock(&psp->ps_mtx);
547 }
548
549 /*
550 * Build siginfo_t for SA thread
551 */
552 void
553 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
554 {
555 struct proc *p;
556 struct thread *td;
557
558 td = curthread;
559 p = td->td_proc;
560 PROC_LOCK_ASSERT(p, MA_OWNED);
561
562 bzero(si, sizeof(*si));
563 si->si_signo = sig;
564 si->si_code = code;
565 /* XXXKSE fill other fields */
566 }
567
568 #ifndef _SYS_SYSPROTO_H_
569 struct sigreturn_args {
570 ucontext_t *ucp;
571 };
572 #endif
573
574 /*
575 * MPSAFE
576 */
577 int
578 sigreturn(struct thread *td, struct sigreturn_args *uap)
579 {
580 struct proc *p;
581 mcontext_t *mc;
582 ucontext_t uc;
583 int error;
584
585 p = td->td_proc;
586 if (rwindow_save(td)) {
587 PROC_LOCK(p);
588 sigexit(td, SIGILL);
589 }
590
591 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
592 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
593 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
594 return (EFAULT);
595 }
596
597 mc = &uc.uc_mcontext;
598 error = set_mcontext(td, mc);
599 if (error != 0)
600 return (error);
601
602 PROC_LOCK(p);
603 td->td_sigmask = uc.uc_sigmask;
604 SIG_CANTMASK(td->td_sigmask);
605 signotify(td);
606 PROC_UNLOCK(p);
607
608 CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx",
609 td, mc->mc_tpc, mc->mc_sp, mc->mc_tstate);
610 return (EJUSTRETURN);
611 }
612
613 #ifdef COMPAT_FREEBSD4
614 int
615 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
616 {
617
618 return sigreturn(td, (struct sigreturn_args *)uap);
619 }
620 #endif
621
622 /*
623 * Construct a PCB from a trapframe. This is called from kdb_trap() where
624 * we want to start a backtrace from the function that caused us to enter
625 * the debugger. We have the context in the trapframe, but base the trace
626 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
627 * enough for a backtrace.
628 */
629 void
630 makectx(struct trapframe *tf, struct pcb *pcb)
631 {
632
633 pcb->pcb_pc = tf->tf_tpc;
634 pcb->pcb_sp = tf->tf_sp;
635 }
636
637 int
638 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
639 {
640 struct trapframe *tf;
641 struct pcb *pcb;
642
643 tf = td->td_frame;
644 pcb = td->td_pcb;
645 bcopy(tf, mc, sizeof(*tf));
646 if (flags & GET_MC_CLEAR_RET) {
647 mc->mc_out[0] = 0;
648 mc->mc_out[1] = 0;
649 }
650 mc->mc_flags = _MC_VERSION;
651 critical_enter();
652 if ((tf->tf_fprs & FPRS_FEF) != 0) {
653 savefpctx(pcb->pcb_ufp);
654 tf->tf_fprs &= ~FPRS_FEF;
655 pcb->pcb_flags |= PCB_FEF;
656 }
657 if ((pcb->pcb_flags & PCB_FEF) != 0) {
658 bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp));
659 mc->mc_fprs |= FPRS_FEF;
660 }
661 critical_exit();
662 return (0);
663 }
664
665 int
666 set_mcontext(struct thread *td, const mcontext_t *mc)
667 {
668 struct trapframe *tf;
669 struct pcb *pcb;
670 uint64_t wstate;
671
672 if (!TSTATE_SECURE(mc->mc_tstate) ||
673 (mc->mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION)
674 return (EINVAL);
675 tf = td->td_frame;
676 pcb = td->td_pcb;
677 /* Make sure the windows are spilled first. */
678 flushw();
679 wstate = tf->tf_wstate;
680 bcopy(mc, tf, sizeof(*tf));
681 tf->tf_wstate = wstate;
682 if ((mc->mc_fprs & FPRS_FEF) != 0) {
683 tf->tf_fprs = 0;
684 bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
685 pcb->pcb_flags |= PCB_FEF;
686 }
687 return (0);
688 }
689
690 /*
691 * Exit the kernel and execute a firmware call that will not return, as
692 * specified by the arguments.
693 */
694 void
695 cpu_shutdown(void *args)
696 {
697
698 #ifdef SMP
699 cpu_mp_shutdown();
700 #endif
701 openfirmware_exit(args);
702 }
703
704 /* Get current clock frequency for the given cpu id. */
705 int
706 cpu_est_clockrate(int cpu_id, uint64_t *rate)
707 {
708
709 return (ENXIO);
710 }
711
712 /*
713 * Duplicate OF_exit() with a different firmware call function that restores
714 * the trap table, otherwise a RED state exception is triggered in at least
715 * some firmware versions.
716 */
717 void
718 cpu_halt(void)
719 {
720 static struct {
721 cell_t name;
722 cell_t nargs;
723 cell_t nreturns;
724 } args = {
725 (cell_t)"exit",
726 0,
727 0
728 };
729
730 cpu_shutdown(&args);
731 }
732
733 void
734 sparc64_shutdown_final(void *dummy, int howto)
735 {
736 static struct {
737 cell_t name;
738 cell_t nargs;
739 cell_t nreturns;
740 } args = {
741 (cell_t)"SUNW,power-off",
742 0,
743 0
744 };
745
746 /* Turn the power off? */
747 if ((howto & RB_POWEROFF) != 0)
748 cpu_shutdown(&args);
749 /* In case of halt, return to the firmware */
750 if ((howto & RB_HALT) != 0)
751 cpu_halt();
752 }
753
754 void
755 cpu_idle(void)
756 {
757 /* Insert code to halt (until next interrupt) for the idle loop */
758 }
759
760 int
761 ptrace_set_pc(struct thread *td, u_long addr)
762 {
763
764 td->td_frame->tf_tpc = addr;
765 td->td_frame->tf_tnpc = addr + 4;
766 return (0);
767 }
768
769 int
770 ptrace_single_step(struct thread *td)
771 {
772 /* TODO; */
773 return (0);
774 }
775
776 int
777 ptrace_clear_single_step(struct thread *td)
778 {
779 /* TODO; */
780 return (0);
781 }
782
783 void
784 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
785 {
786 struct trapframe *tf;
787 struct pcb *pcb;
788 struct proc *p;
789 u_long sp;
790
791 /* XXX no cpu_exec */
792 p = td->td_proc;
793 p->p_md.md_sigtramp = NULL;
794 if (p->p_md.md_utrap != NULL) {
795 utrap_free(p->p_md.md_utrap);
796 p->p_md.md_utrap = NULL;
797 }
798
799 pcb = td->td_pcb;
800 tf = td->td_frame;
801 sp = rounddown(stack, 16);
802 bzero(pcb, sizeof(*pcb));
803 bzero(tf, sizeof(*tf));
804 tf->tf_out[0] = stack;
805 tf->tf_out[3] = p->p_sysent->sv_psstrings;
806 tf->tf_out[6] = sp - SPOFF - sizeof(struct frame);
807 tf->tf_tnpc = entry + 4;
808 tf->tf_tpc = entry;
809 tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO;
810
811 td->td_retval[0] = tf->tf_out[0];
812 td->td_retval[1] = tf->tf_out[1];
813 }
814
815 int
816 fill_regs(struct thread *td, struct reg *regs)
817 {
818
819 bcopy(td->td_frame, regs, sizeof(*regs));
820 return (0);
821 }
822
823 int
824 set_regs(struct thread *td, struct reg *regs)
825 {
826 struct trapframe *tf;
827
828 if (!TSTATE_SECURE(regs->r_tstate))
829 return (EINVAL);
830 tf = td->td_frame;
831 regs->r_wstate = tf->tf_wstate;
832 bcopy(regs, tf, sizeof(*regs));
833 return (0);
834 }
835
836 int
837 fill_dbregs(struct thread *td, struct dbreg *dbregs)
838 {
839
840 return (ENOSYS);
841 }
842
843 int
844 set_dbregs(struct thread *td, struct dbreg *dbregs)
845 {
846
847 return (ENOSYS);
848 }
849
850 int
851 fill_fpregs(struct thread *td, struct fpreg *fpregs)
852 {
853 struct trapframe *tf;
854 struct pcb *pcb;
855
856 pcb = td->td_pcb;
857 tf = td->td_frame;
858 bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs));
859 fpregs->fr_fsr = tf->tf_fsr;
860 fpregs->fr_gsr = tf->tf_gsr;
861 return (0);
862 }
863
864 int
865 set_fpregs(struct thread *td, struct fpreg *fpregs)
866 {
867 struct trapframe *tf;
868 struct pcb *pcb;
869
870 pcb = td->td_pcb;
871 tf = td->td_frame;
872 tf->tf_fprs &= ~FPRS_FEF;
873 bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
874 tf->tf_fsr = fpregs->fr_fsr;
875 tf->tf_gsr = fpregs->fr_gsr;
876 return (0);
877 }
878
879 struct md_utrap *
880 utrap_alloc(void)
881 {
882 struct md_utrap *ut;
883
884 ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO);
885 ut->ut_refcnt = 1;
886 return (ut);
887 }
888
889 void
890 utrap_free(struct md_utrap *ut)
891 {
892 int refcnt;
893
894 if (ut == NULL)
895 return;
896 mtx_pool_lock(mtxpool_sleep, ut);
897 ut->ut_refcnt--;
898 refcnt = ut->ut_refcnt;
899 mtx_pool_unlock(mtxpool_sleep, ut);
900 if (refcnt == 0)
901 free(ut, M_SUBPROC);
902 }
903
904 struct md_utrap *
905 utrap_hold(struct md_utrap *ut)
906 {
907
908 if (ut == NULL)
909 return (NULL);
910 mtx_pool_lock(mtxpool_sleep, ut);
911 ut->ut_refcnt++;
912 mtx_pool_unlock(mtxpool_sleep, ut);
913 return (ut);
914 }
Cache object: 12e40d8e360405816a54cd2809b508bf
|