1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * Copyright (c) 1992 Terrence R. Lambert.
4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * William Jolitz.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
35 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/11.0/sys/sparc64/sparc64/machdep.c 294930 2016-01-27 17:55:01Z jhb $");
40
41 #include "opt_compat.h"
42 #include "opt_ddb.h"
43 #include "opt_kstack_pages.h"
44
45 #include <sys/param.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/bus.h>
52 #include <sys/cpu.h>
53 #include <sys/cons.h>
54 #include <sys/eventhandler.h>
55 #include <sys/exec.h>
56 #include <sys/imgact.h>
57 #include <sys/interrupt.h>
58 #include <sys/kdb.h>
59 #include <sys/kernel.h>
60 #include <sys/ktr.h>
61 #include <sys/linker.h>
62 #include <sys/lock.h>
63 #include <sys/msgbuf.h>
64 #include <sys/mutex.h>
65 #include <sys/pcpu.h>
66 #include <sys/ptrace.h>
67 #include <sys/reboot.h>
68 #include <sys/rwlock.h>
69 #include <sys/signalvar.h>
70 #include <sys/smp.h>
71 #include <sys/syscallsubr.h>
72 #include <sys/sysent.h>
73 #include <sys/sysproto.h>
74 #include <sys/timetc.h>
75 #include <sys/ucontext.h>
76
77 #include <dev/ofw/openfirm.h>
78
79 #include <vm/vm.h>
80 #include <vm/vm_extern.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_pager.h>
86 #include <vm/vm_param.h>
87
88 #include <ddb/ddb.h>
89
90 #include <machine/bus.h>
91 #include <machine/cache.h>
92 #include <machine/cmt.h>
93 #include <machine/cpu.h>
94 #include <machine/fireplane.h>
95 #include <machine/fp.h>
96 #include <machine/fsr.h>
97 #include <machine/intr_machdep.h>
98 #include <machine/jbus.h>
99 #include <machine/md_var.h>
100 #include <machine/metadata.h>
101 #include <machine/ofw_machdep.h>
102 #include <machine/ofw_mem.h>
103 #include <machine/pcb.h>
104 #include <machine/pmap.h>
105 #include <machine/pstate.h>
106 #include <machine/reg.h>
107 #include <machine/sigframe.h>
108 #include <machine/smp.h>
109 #include <machine/tick.h>
110 #include <machine/tlb.h>
111 #include <machine/tstate.h>
112 #include <machine/upa.h>
113 #include <machine/ver.h>
114
115 typedef int ofw_vec_t(void *);
116
117 int dtlb_slots;
118 int itlb_slots;
119 struct tlb_entry *kernel_tlbs;
120 int kernel_tlb_slots;
121
122 int cold = 1;
123 long Maxmem;
124 long realmem;
125
126 void *dpcpu0;
127 char pcpu0[PCPU_PAGES * PAGE_SIZE];
128 struct trapframe frame0;
129
130 vm_offset_t kstack0;
131 vm_paddr_t kstack0_phys;
132
133 struct kva_md_info kmi;
134
135 u_long ofw_vec;
136 u_long ofw_tba;
137 u_int tba_taken_over;
138
139 char sparc64_model[32];
140
141 static int cpu_use_vis = 1;
142
143 cpu_block_copy_t *cpu_block_copy;
144 cpu_block_zero_t *cpu_block_zero;
145
146 static phandle_t find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl);
147 void sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3,
148 ofw_vec_t *vec);
149 static void sparc64_shutdown_final(void *dummy, int howto);
150
151 static void cpu_startup(void *arg);
152 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
153
154 CTASSERT((1 << INT_SHIFT) == sizeof(int));
155 CTASSERT((1 << PTR_SHIFT) == sizeof(char *));
156
157 CTASSERT(sizeof(struct reg) == 256);
158 CTASSERT(sizeof(struct fpreg) == 272);
159 CTASSERT(sizeof(struct __mcontext) == 512);
160
161 CTASSERT((sizeof(struct pcb) & (64 - 1)) == 0);
162 CTASSERT((offsetof(struct pcb, pcb_kfp) & (64 - 1)) == 0);
163 CTASSERT((offsetof(struct pcb, pcb_ufp) & (64 - 1)) == 0);
164 CTASSERT(sizeof(struct pcb) <= ((KSTACK_PAGES * PAGE_SIZE) / 8));
165
166 CTASSERT(sizeof(struct pcpu) <= ((PCPU_PAGES * PAGE_SIZE) / 2));
167
168 static void
169 cpu_startup(void *arg)
170 {
171 vm_paddr_t physsz;
172 int i;
173
174 physsz = 0;
175 for (i = 0; i < sparc64_nmemreg; i++)
176 physsz += sparc64_memreg[i].mr_size;
177 printf("real memory = %lu (%lu MB)\n", physsz,
178 physsz / (1024 * 1024));
179 realmem = (long)physsz / PAGE_SIZE;
180
181 vm_ksubmap_init(&kmi);
182
183 bufinit();
184 vm_pager_bufferinit();
185
186 EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL,
187 SHUTDOWN_PRI_LAST);
188
189 printf("avail memory = %lu (%lu MB)\n", vm_cnt.v_free_count * PAGE_SIZE,
190 vm_cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE));
191
192 if (bootverbose)
193 printf("machine: %s\n", sparc64_model);
194
195 cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu);
196 }
197
198 void
199 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
200 {
201 struct intr_request *ir;
202 int i;
203
204 pcpu->pc_irtail = &pcpu->pc_irhead;
205 for (i = 0; i < IR_FREE; i++) {
206 ir = &pcpu->pc_irpool[i];
207 ir->ir_next = pcpu->pc_irfree;
208 pcpu->pc_irfree = ir;
209 }
210 }
211
212 void
213 spinlock_enter(void)
214 {
215 struct thread *td;
216 register_t pil;
217
218 td = curthread;
219 if (td->td_md.md_spinlock_count == 0) {
220 pil = rdpr(pil);
221 wrpr(pil, 0, PIL_TICK);
222 td->td_md.md_spinlock_count = 1;
223 td->td_md.md_saved_pil = pil;
224 } else
225 td->td_md.md_spinlock_count++;
226 critical_enter();
227 }
228
229 void
230 spinlock_exit(void)
231 {
232 struct thread *td;
233 register_t pil;
234
235 td = curthread;
236 critical_exit();
237 pil = td->td_md.md_saved_pil;
238 td->td_md.md_spinlock_count--;
239 if (td->td_md.md_spinlock_count == 0)
240 wrpr(pil, pil, 0);
241 }
242
243 static phandle_t
244 find_bsp(phandle_t node, uint32_t bspid, u_int cpu_impl)
245 {
246 char type[sizeof("cpu")];
247 phandle_t child;
248 uint32_t portid;
249
250 for (; node != 0; node = OF_peer(node)) {
251 child = OF_child(node);
252 if (child > 0) {
253 child = find_bsp(child, bspid, cpu_impl);
254 if (child > 0)
255 return (child);
256 } else {
257 if (OF_getprop(node, "device_type", type,
258 sizeof(type)) <= 0)
259 continue;
260 if (strcmp(type, "cpu") != 0)
261 continue;
262 if (OF_getprop(node, cpu_portid_prop(cpu_impl),
263 &portid, sizeof(portid)) <= 0)
264 continue;
265 if (portid == bspid)
266 return (node);
267 }
268 }
269 return (0);
270 }
271
272 const char *
273 cpu_portid_prop(u_int cpu_impl)
274 {
275
276 switch (cpu_impl) {
277 case CPU_IMPL_SPARC64:
278 case CPU_IMPL_SPARC64V:
279 case CPU_IMPL_ULTRASPARCI:
280 case CPU_IMPL_ULTRASPARCII:
281 case CPU_IMPL_ULTRASPARCIIi:
282 case CPU_IMPL_ULTRASPARCIIe:
283 return ("upa-portid");
284 case CPU_IMPL_ULTRASPARCIII:
285 case CPU_IMPL_ULTRASPARCIIIp:
286 case CPU_IMPL_ULTRASPARCIIIi:
287 case CPU_IMPL_ULTRASPARCIIIip:
288 return ("portid");
289 case CPU_IMPL_ULTRASPARCIV:
290 case CPU_IMPL_ULTRASPARCIVp:
291 return ("cpuid");
292 default:
293 return ("");
294 }
295 }
296
297 uint32_t
298 cpu_get_mid(u_int cpu_impl)
299 {
300
301 switch (cpu_impl) {
302 case CPU_IMPL_SPARC64:
303 case CPU_IMPL_SPARC64V:
304 case CPU_IMPL_ULTRASPARCI:
305 case CPU_IMPL_ULTRASPARCII:
306 case CPU_IMPL_ULTRASPARCIIi:
307 case CPU_IMPL_ULTRASPARCIIe:
308 return (UPA_CR_GET_MID(ldxa(0, ASI_UPA_CONFIG_REG)));
309 case CPU_IMPL_ULTRASPARCIII:
310 case CPU_IMPL_ULTRASPARCIIIp:
311 return (FIREPLANE_CR_GET_AID(ldxa(AA_FIREPLANE_CONFIG,
312 ASI_FIREPLANE_CONFIG_REG)));
313 case CPU_IMPL_ULTRASPARCIIIi:
314 case CPU_IMPL_ULTRASPARCIIIip:
315 return (JBUS_CR_GET_JID(ldxa(0, ASI_JBUS_CONFIG_REG)));
316 case CPU_IMPL_ULTRASPARCIV:
317 case CPU_IMPL_ULTRASPARCIVp:
318 return (INTR_ID_GET_ID(ldxa(AA_INTR_ID, ASI_INTR_ID)));
319 default:
320 return (0);
321 }
322 }
323
324 void
325 sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
326 {
327 char *env;
328 struct pcpu *pc;
329 vm_offset_t end;
330 vm_offset_t va;
331 caddr_t kmdp;
332 phandle_t root;
333 u_int cpu_impl;
334
335 end = 0;
336 kmdp = NULL;
337
338 /*
339 * Find out what kind of CPU we have first, for anything that changes
340 * behaviour.
341 */
342 cpu_impl = VER_IMPL(rdpr(ver));
343
344 /*
345 * Do CPU-specific initialization.
346 */
347 if (cpu_impl >= CPU_IMPL_ULTRASPARCIII)
348 cheetah_init(cpu_impl);
349 else if (cpu_impl == CPU_IMPL_SPARC64V)
350 zeus_init(cpu_impl);
351
352 /*
353 * Clear (S)TICK timer (including NPT).
354 */
355 tick_clear(cpu_impl);
356
357 /*
358 * UltraSparc II[e,i] based systems come up with the tick interrupt
359 * enabled and a handler that resets the tick counter, causing DELAY()
360 * to not work properly when used early in boot.
361 * UltraSPARC III based systems come up with the system tick interrupt
362 * enabled, causing an interrupt storm on startup since they are not
363 * handled.
364 */
365 tick_stop(cpu_impl);
366
367 /*
368 * Set up Open Firmware entry points.
369 */
370 ofw_tba = rdpr(tba);
371 ofw_vec = (u_long)vec;
372
373 /*
374 * Parse metadata if present and fetch parameters. Must be before the
375 * console is inited so cninit() gets the right value of boothowto.
376 */
377 if (mdp != NULL) {
378 preload_metadata = mdp;
379 kmdp = preload_search_by_type("elf kernel");
380 if (kmdp != NULL) {
381 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
382 init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *),
383 0);
384 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
385 kernel_tlb_slots = MD_FETCH(kmdp, MODINFOMD_DTLB_SLOTS,
386 int);
387 kernel_tlbs = (void *)preload_search_info(kmdp,
388 MODINFO_METADATA | MODINFOMD_DTLB);
389 }
390 }
391
392 init_param1();
393
394 /*
395 * Initialize Open Firmware (needed for console).
396 */
397 OF_install(OFW_STD_DIRECT, 0);
398 OF_init(ofw_entry);
399
400 /*
401 * Prime our per-CPU data page for use. Note, we are using it for
402 * our stack, so don't pass the real size (PAGE_SIZE) to pcpu_init
403 * or it'll zero it out from under us.
404 */
405 pc = (struct pcpu *)(pcpu0 + (PCPU_PAGES * PAGE_SIZE)) - 1;
406 pcpu_init(pc, 0, sizeof(struct pcpu));
407 pc->pc_addr = (vm_offset_t)pcpu0;
408 pc->pc_impl = cpu_impl;
409 pc->pc_mid = cpu_get_mid(cpu_impl);
410 pc->pc_tlb_ctx = TLB_CTX_USER_MIN;
411 pc->pc_tlb_ctx_min = TLB_CTX_USER_MIN;
412 pc->pc_tlb_ctx_max = TLB_CTX_USER_MAX;
413
414 /*
415 * Determine the OFW node and frequency of the BSP (and ensure the
416 * BSP is in the device tree in the first place).
417 */
418 root = OF_peer(0);
419 pc->pc_node = find_bsp(root, pc->pc_mid, cpu_impl);
420 if (pc->pc_node == 0)
421 OF_panic("%s: cannot find boot CPU node", __func__);
422 if (OF_getprop(pc->pc_node, "clock-frequency", &pc->pc_clock,
423 sizeof(pc->pc_clock)) <= 0)
424 OF_panic("%s: cannot determine boot CPU clock", __func__);
425
426 /*
427 * Panic if there is no metadata. Most likely the kernel was booted
428 * directly, instead of through loader(8).
429 */
430 if (mdp == NULL || kmdp == NULL || end == 0 ||
431 kernel_tlb_slots == 0 || kernel_tlbs == NULL)
432 OF_panic("%s: missing loader metadata.\nThis probably means "
433 "you are not using loader(8).", __func__);
434
435 /*
436 * Work around the broken loader behavior of not demapping no
437 * longer used kernel TLB slots when unloading the kernel or
438 * modules.
439 */
440 for (va = KERNBASE + (kernel_tlb_slots - 1) * PAGE_SIZE_4M;
441 va >= roundup2(end, PAGE_SIZE_4M); va -= PAGE_SIZE_4M) {
442 if (bootverbose)
443 OF_printf("demapping unused kernel TLB slot "
444 "(va %#lx - %#lx)\n", va, va + PAGE_SIZE_4M - 1);
445 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
446 ASI_DMMU_DEMAP, 0);
447 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
448 ASI_IMMU_DEMAP, 0);
449 flush(KERNBASE);
450 kernel_tlb_slots--;
451 }
452
453 /*
454 * Determine the TLB slot maxima, which are expected to be
455 * equal across all CPUs.
456 * NB: for cheetah-class CPUs, these properties only refer
457 * to the t16s.
458 */
459 if (OF_getprop(pc->pc_node, "#dtlb-entries", &dtlb_slots,
460 sizeof(dtlb_slots)) == -1)
461 OF_panic("%s: cannot determine number of dTLB slots",
462 __func__);
463 if (OF_getprop(pc->pc_node, "#itlb-entries", &itlb_slots,
464 sizeof(itlb_slots)) == -1)
465 OF_panic("%s: cannot determine number of iTLB slots",
466 __func__);
467
468 /*
469 * Initialize and enable the caches. Note that this may include
470 * applying workarounds.
471 */
472 cache_init(pc);
473 cache_enable(cpu_impl);
474 uma_set_align(pc->pc_cache.dc_linesize - 1);
475
476 cpu_block_copy = bcopy;
477 cpu_block_zero = bzero;
478 getenv_int("machdep.use_vis", &cpu_use_vis);
479 if (cpu_use_vis) {
480 switch (cpu_impl) {
481 case CPU_IMPL_SPARC64:
482 case CPU_IMPL_ULTRASPARCI:
483 case CPU_IMPL_ULTRASPARCII:
484 case CPU_IMPL_ULTRASPARCIIi:
485 case CPU_IMPL_ULTRASPARCIIe:
486 case CPU_IMPL_ULTRASPARCIII: /* NB: we've disabled P$. */
487 case CPU_IMPL_ULTRASPARCIIIp:
488 case CPU_IMPL_ULTRASPARCIIIi:
489 case CPU_IMPL_ULTRASPARCIV:
490 case CPU_IMPL_ULTRASPARCIVp:
491 case CPU_IMPL_ULTRASPARCIIIip:
492 cpu_block_copy = spitfire_block_copy;
493 cpu_block_zero = spitfire_block_zero;
494 break;
495 case CPU_IMPL_SPARC64V:
496 cpu_block_copy = zeus_block_copy;
497 cpu_block_zero = zeus_block_zero;
498 break;
499 }
500 }
501
502 #ifdef SMP
503 mp_init();
504 #endif
505
506 /*
507 * Initialize virtual memory and calculate physmem.
508 */
509 pmap_bootstrap(cpu_impl);
510
511 /*
512 * Initialize tunables.
513 */
514 init_param2(physmem);
515 env = kern_getenv("kernelname");
516 if (env != NULL) {
517 strlcpy(kernelname, env, sizeof(kernelname));
518 freeenv(env);
519 }
520
521 /*
522 * Initialize the interrupt tables.
523 */
524 intr_init1();
525
526 /*
527 * Initialize proc0, set kstack0, frame0, curthread and curpcb.
528 */
529 proc_linkup0(&proc0, &thread0);
530 proc0.p_md.md_sigtramp = NULL;
531 proc0.p_md.md_utrap = NULL;
532 thread0.td_kstack = kstack0;
533 thread0.td_kstack_pages = KSTACK_PAGES;
534 thread0.td_pcb = (struct pcb *)
535 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
536 frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
537 thread0.td_frame = &frame0;
538 pc->pc_curthread = &thread0;
539 pc->pc_curpcb = thread0.td_pcb;
540
541 /*
542 * Initialize global registers.
543 */
544 cpu_setregs(pc);
545
546 /*
547 * Take over the trap table via the PROM. Using the PROM for this
548 * is necessary in order to set obp-control-relinquished to true
549 * within the PROM so obtaining /virtual-memory/translations doesn't
550 * trigger a fatal reset error or worse things further down the road.
551 * XXX it should be possible to use this solely instead of writing
552 * %tba in cpu_setregs(). Doing so causes a hang however.
553 *
554 * NB: the low-level console drivers require a working DELAY() and
555 * some compiler optimizations may cause the curthread accesses of
556 * mutex(9) to be factored out even if the latter aren't actually
557 * called. Both of these require PCPU_REG to be set. However, we
558 * can't set PCPU_REG without also taking over the trap table or the
559 * firmware will overwrite it.
560 */
561 sun4u_set_traptable(tl0_base);
562
563 /*
564 * Initialize the dynamic per-CPU area for the BSP and the message
565 * buffer (after setting the trap table).
566 */
567 dpcpu_init(dpcpu0, 0);
568 msgbufinit(msgbufp, msgbufsize);
569
570 /*
571 * Initialize mutexes.
572 */
573 mutex_init();
574
575 /*
576 * Initialize console now that we have a reasonable set of system
577 * services.
578 */
579 cninit();
580
581 /*
582 * Finish the interrupt initialization now that mutexes work and
583 * enable them.
584 */
585 intr_init2();
586 wrpr(pil, 0, 0);
587 wrpr(pstate, 0, PSTATE_KERNEL);
588
589 OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1);
590
591 kdb_init();
592
593 #ifdef KDB
594 if (boothowto & RB_KDB)
595 kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
596 #endif
597 }
598
599 void
600 sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
601 {
602 struct trapframe *tf;
603 struct sigframe *sfp;
604 struct sigacts *psp;
605 struct sigframe sf;
606 struct thread *td;
607 struct frame *fp;
608 struct proc *p;
609 u_long sp;
610 int oonstack;
611 int sig;
612
613 oonstack = 0;
614 td = curthread;
615 p = td->td_proc;
616 PROC_LOCK_ASSERT(p, MA_OWNED);
617 sig = ksi->ksi_signo;
618 psp = p->p_sigacts;
619 mtx_assert(&psp->ps_mtx, MA_OWNED);
620 tf = td->td_frame;
621 sp = tf->tf_sp + SPOFF;
622 oonstack = sigonstack(sp);
623
624 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
625 catcher, sig);
626
627 /* Make sure we have a signal trampoline to return to. */
628 if (p->p_md.md_sigtramp == NULL) {
629 /*
630 * No signal trampoline... kill the process.
631 */
632 CTR0(KTR_SIG, "sendsig: no sigtramp");
633 printf("sendsig: %s is too old, rebuild it\n", p->p_comm);
634 sigexit(td, sig);
635 /* NOTREACHED */
636 }
637
638 /* Save user context. */
639 bzero(&sf, sizeof(sf));
640 get_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
641 sf.sf_uc.uc_sigmask = *mask;
642 sf.sf_uc.uc_stack = td->td_sigstk;
643 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
644 ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
645
646 /* Allocate and validate space for the signal handler context. */
647 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
648 SIGISMEMBER(psp->ps_sigonstack, sig)) {
649 sfp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp +
650 td->td_sigstk.ss_size - sizeof(struct sigframe));
651 } else
652 sfp = (struct sigframe *)sp - 1;
653 mtx_unlock(&psp->ps_mtx);
654 PROC_UNLOCK(p);
655
656 fp = (struct frame *)sfp - 1;
657
658 /* Build the argument list for the signal handler. */
659 tf->tf_out[0] = sig;
660 tf->tf_out[2] = (register_t)&sfp->sf_uc;
661 tf->tf_out[4] = (register_t)catcher;
662 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
663 /* Signal handler installed with SA_SIGINFO. */
664 tf->tf_out[1] = (register_t)&sfp->sf_si;
665
666 /* Fill in POSIX parts. */
667 sf.sf_si = ksi->ksi_info;
668 sf.sf_si.si_signo = sig; /* maybe a translated signal */
669 } else {
670 /* Old FreeBSD-style arguments. */
671 tf->tf_out[1] = ksi->ksi_code;
672 tf->tf_out[3] = (register_t)ksi->ksi_addr;
673 }
674
675 /* Copy the sigframe out to the user's stack. */
676 if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 ||
677 suword(&fp->fr_in[6], tf->tf_out[6]) != 0) {
678 /*
679 * Something is wrong with the stack pointer.
680 * ...Kill the process.
681 */
682 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
683 PROC_LOCK(p);
684 sigexit(td, SIGILL);
685 /* NOTREACHED */
686 }
687
688 tf->tf_tpc = (u_long)p->p_md.md_sigtramp;
689 tf->tf_tnpc = tf->tf_tpc + 4;
690 tf->tf_sp = (u_long)fp - SPOFF;
691
692 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc,
693 tf->tf_sp);
694
695 PROC_LOCK(p);
696 mtx_lock(&psp->ps_mtx);
697 }
698
699 #ifndef _SYS_SYSPROTO_H_
700 struct sigreturn_args {
701 ucontext_t *ucp;
702 };
703 #endif
704
705 /*
706 * MPSAFE
707 */
708 int
709 sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
710 {
711 struct proc *p;
712 mcontext_t *mc;
713 ucontext_t uc;
714 int error;
715
716 p = td->td_proc;
717 if (rwindow_save(td)) {
718 PROC_LOCK(p);
719 sigexit(td, SIGILL);
720 }
721
722 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
723 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
724 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
725 return (EFAULT);
726 }
727
728 mc = &uc.uc_mcontext;
729 error = set_mcontext(td, mc);
730 if (error != 0)
731 return (error);
732
733 kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
734
735 CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx",
736 td, mc->_mc_tpc, mc->_mc_sp, mc->_mc_tstate);
737 return (EJUSTRETURN);
738 }
739
740 /*
741 * Construct a PCB from a trapframe. This is called from kdb_trap() where
742 * we want to start a backtrace from the function that caused us to enter
743 * the debugger. We have the context in the trapframe, but base the trace
744 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
745 * enough for a backtrace.
746 */
747 void
748 makectx(struct trapframe *tf, struct pcb *pcb)
749 {
750
751 pcb->pcb_pc = tf->tf_tpc;
752 pcb->pcb_sp = tf->tf_sp;
753 }
754
755 int
756 get_mcontext(struct thread *td, mcontext_t *mc, int flags)
757 {
758 struct trapframe *tf;
759 struct pcb *pcb;
760
761 tf = td->td_frame;
762 pcb = td->td_pcb;
763 /*
764 * Copy the registers which will be restored by tl0_ret() from the
765 * trapframe.
766 * Note that we skip %g7 which is used as the userland TLS register
767 * and %wstate.
768 */
769 mc->_mc_flags = _MC_VERSION;
770 mc->mc_global[1] = tf->tf_global[1];
771 mc->mc_global[2] = tf->tf_global[2];
772 mc->mc_global[3] = tf->tf_global[3];
773 mc->mc_global[4] = tf->tf_global[4];
774 mc->mc_global[5] = tf->tf_global[5];
775 mc->mc_global[6] = tf->tf_global[6];
776 if (flags & GET_MC_CLEAR_RET) {
777 mc->mc_out[0] = 0;
778 mc->mc_out[1] = 0;
779 } else {
780 mc->mc_out[0] = tf->tf_out[0];
781 mc->mc_out[1] = tf->tf_out[1];
782 }
783 mc->mc_out[2] = tf->tf_out[2];
784 mc->mc_out[3] = tf->tf_out[3];
785 mc->mc_out[4] = tf->tf_out[4];
786 mc->mc_out[5] = tf->tf_out[5];
787 mc->mc_out[6] = tf->tf_out[6];
788 mc->mc_out[7] = tf->tf_out[7];
789 mc->_mc_fprs = tf->tf_fprs;
790 mc->_mc_fsr = tf->tf_fsr;
791 mc->_mc_gsr = tf->tf_gsr;
792 mc->_mc_tnpc = tf->tf_tnpc;
793 mc->_mc_tpc = tf->tf_tpc;
794 mc->_mc_tstate = tf->tf_tstate;
795 mc->_mc_y = tf->tf_y;
796 critical_enter();
797 if ((tf->tf_fprs & FPRS_FEF) != 0) {
798 savefpctx(pcb->pcb_ufp);
799 tf->tf_fprs &= ~FPRS_FEF;
800 pcb->pcb_flags |= PCB_FEF;
801 }
802 if ((pcb->pcb_flags & PCB_FEF) != 0) {
803 bcopy(pcb->pcb_ufp, mc->mc_fp, sizeof(mc->mc_fp));
804 mc->_mc_fprs |= FPRS_FEF;
805 }
806 critical_exit();
807 return (0);
808 }
809
810 int
811 set_mcontext(struct thread *td, mcontext_t *mc)
812 {
813 struct trapframe *tf;
814 struct pcb *pcb;
815
816 if (!TSTATE_SECURE(mc->_mc_tstate) ||
817 (mc->_mc_flags & ((1L << _MC_VERSION_BITS) - 1)) != _MC_VERSION)
818 return (EINVAL);
819 tf = td->td_frame;
820 pcb = td->td_pcb;
821 /* Make sure the windows are spilled first. */
822 flushw();
823 /*
824 * Copy the registers which will be restored by tl0_ret() to the
825 * trapframe.
826 * Note that we skip %g7 which is used as the userland TLS register
827 * and %wstate.
828 */
829 tf->tf_global[1] = mc->mc_global[1];
830 tf->tf_global[2] = mc->mc_global[2];
831 tf->tf_global[3] = mc->mc_global[3];
832 tf->tf_global[4] = mc->mc_global[4];
833 tf->tf_global[5] = mc->mc_global[5];
834 tf->tf_global[6] = mc->mc_global[6];
835 tf->tf_out[0] = mc->mc_out[0];
836 tf->tf_out[1] = mc->mc_out[1];
837 tf->tf_out[2] = mc->mc_out[2];
838 tf->tf_out[3] = mc->mc_out[3];
839 tf->tf_out[4] = mc->mc_out[4];
840 tf->tf_out[5] = mc->mc_out[5];
841 tf->tf_out[6] = mc->mc_out[6];
842 tf->tf_out[7] = mc->mc_out[7];
843 tf->tf_fprs = mc->_mc_fprs;
844 tf->tf_fsr = mc->_mc_fsr;
845 tf->tf_gsr = mc->_mc_gsr;
846 tf->tf_tnpc = mc->_mc_tnpc;
847 tf->tf_tpc = mc->_mc_tpc;
848 tf->tf_tstate = mc->_mc_tstate;
849 tf->tf_y = mc->_mc_y;
850 if ((mc->_mc_fprs & FPRS_FEF) != 0) {
851 tf->tf_fprs = 0;
852 bcopy(mc->mc_fp, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
853 pcb->pcb_flags |= PCB_FEF;
854 }
855 return (0);
856 }
857
858 /*
859 * Exit the kernel and execute a firmware call that will not return, as
860 * specified by the arguments.
861 */
862 void
863 cpu_shutdown(void *args)
864 {
865
866 #ifdef SMP
867 cpu_mp_shutdown();
868 #endif
869 ofw_exit(args);
870 }
871
872 /*
873 * Flush the D-cache for non-DMA I/O so that the I-cache can
874 * be made coherent later.
875 */
876 void
877 cpu_flush_dcache(void *ptr, size_t len)
878 {
879
880 /* TBD */
881 }
882
883 /* Get current clock frequency for the given CPU ID. */
884 int
885 cpu_est_clockrate(int cpu_id, uint64_t *rate)
886 {
887 struct pcpu *pc;
888
889 pc = pcpu_find(cpu_id);
890 if (pc == NULL || rate == NULL)
891 return (EINVAL);
892 *rate = pc->pc_clock;
893 return (0);
894 }
895
896 /*
897 * Duplicate OF_exit() with a different firmware call function that restores
898 * the trap table, otherwise a RED state exception is triggered in at least
899 * some firmware versions.
900 */
901 void
902 cpu_halt(void)
903 {
904 static struct {
905 cell_t name;
906 cell_t nargs;
907 cell_t nreturns;
908 } args = {
909 (cell_t)"exit",
910 0,
911 0
912 };
913
914 cpu_shutdown(&args);
915 }
916
917 static void
918 sparc64_shutdown_final(void *dummy, int howto)
919 {
920 static struct {
921 cell_t name;
922 cell_t nargs;
923 cell_t nreturns;
924 } args = {
925 (cell_t)"SUNW,power-off",
926 0,
927 0
928 };
929
930 /* Turn the power off? */
931 if ((howto & RB_POWEROFF) != 0)
932 cpu_shutdown(&args);
933 /* In case of halt, return to the firmware. */
934 if ((howto & RB_HALT) != 0)
935 cpu_halt();
936 }
937
938 void
939 cpu_idle(int busy)
940 {
941
942 /* Insert code to halt (until next interrupt) for the idle loop. */
943 }
944
945 int
946 cpu_idle_wakeup(int cpu)
947 {
948
949 return (1);
950 }
951
952 int
953 ptrace_set_pc(struct thread *td, u_long addr)
954 {
955
956 td->td_frame->tf_tpc = addr;
957 td->td_frame->tf_tnpc = addr + 4;
958 return (0);
959 }
960
961 int
962 ptrace_single_step(struct thread *td)
963 {
964
965 /* TODO; */
966 return (0);
967 }
968
969 int
970 ptrace_clear_single_step(struct thread *td)
971 {
972
973 /* TODO; */
974 return (0);
975 }
976
977 void
978 exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
979 {
980 struct trapframe *tf;
981 struct pcb *pcb;
982 struct proc *p;
983 u_long sp;
984
985 /* XXX no cpu_exec */
986 p = td->td_proc;
987 p->p_md.md_sigtramp = NULL;
988 if (p->p_md.md_utrap != NULL) {
989 utrap_free(p->p_md.md_utrap);
990 p->p_md.md_utrap = NULL;
991 }
992
993 pcb = td->td_pcb;
994 tf = td->td_frame;
995 sp = rounddown(stack, 16);
996 bzero(pcb, sizeof(*pcb));
997 bzero(tf, sizeof(*tf));
998 tf->tf_out[0] = stack;
999 tf->tf_out[3] = p->p_sysent->sv_psstrings;
1000 tf->tf_out[6] = sp - SPOFF - sizeof(struct frame);
1001 tf->tf_tnpc = imgp->entry_addr + 4;
1002 tf->tf_tpc = imgp->entry_addr;
1003 /*
1004 * While we could adhere to the memory model indicated in the ELF
1005 * header, it turns out that just always using TSO performs best.
1006 */
1007 tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO;
1008
1009 td->td_retval[0] = tf->tf_out[0];
1010 td->td_retval[1] = tf->tf_out[1];
1011 }
1012
1013 int
1014 fill_regs(struct thread *td, struct reg *regs)
1015 {
1016
1017 bcopy(td->td_frame, regs, sizeof(*regs));
1018 return (0);
1019 }
1020
1021 int
1022 set_regs(struct thread *td, struct reg *regs)
1023 {
1024 struct trapframe *tf;
1025
1026 if (!TSTATE_SECURE(regs->r_tstate))
1027 return (EINVAL);
1028 tf = td->td_frame;
1029 regs->r_wstate = tf->tf_wstate;
1030 bcopy(regs, tf, sizeof(*regs));
1031 return (0);
1032 }
1033
1034 int
1035 fill_dbregs(struct thread *td, struct dbreg *dbregs)
1036 {
1037
1038 return (ENOSYS);
1039 }
1040
1041 int
1042 set_dbregs(struct thread *td, struct dbreg *dbregs)
1043 {
1044
1045 return (ENOSYS);
1046 }
1047
1048 int
1049 fill_fpregs(struct thread *td, struct fpreg *fpregs)
1050 {
1051 struct trapframe *tf;
1052 struct pcb *pcb;
1053
1054 pcb = td->td_pcb;
1055 tf = td->td_frame;
1056 bcopy(pcb->pcb_ufp, fpregs->fr_regs, sizeof(fpregs->fr_regs));
1057 fpregs->fr_fsr = tf->tf_fsr;
1058 fpregs->fr_gsr = tf->tf_gsr;
1059 return (0);
1060 }
1061
1062 int
1063 set_fpregs(struct thread *td, struct fpreg *fpregs)
1064 {
1065 struct trapframe *tf;
1066 struct pcb *pcb;
1067
1068 pcb = td->td_pcb;
1069 tf = td->td_frame;
1070 tf->tf_fprs &= ~FPRS_FEF;
1071 bcopy(fpregs->fr_regs, pcb->pcb_ufp, sizeof(pcb->pcb_ufp));
1072 tf->tf_fsr = fpregs->fr_fsr;
1073 tf->tf_gsr = fpregs->fr_gsr;
1074 return (0);
1075 }
1076
1077 struct md_utrap *
1078 utrap_alloc(void)
1079 {
1080 struct md_utrap *ut;
1081
1082 ut = malloc(sizeof(struct md_utrap), M_SUBPROC, M_WAITOK | M_ZERO);
1083 ut->ut_refcnt = 1;
1084 return (ut);
1085 }
1086
1087 void
1088 utrap_free(struct md_utrap *ut)
1089 {
1090 int refcnt;
1091
1092 if (ut == NULL)
1093 return;
1094 mtx_pool_lock(mtxpool_sleep, ut);
1095 ut->ut_refcnt--;
1096 refcnt = ut->ut_refcnt;
1097 mtx_pool_unlock(mtxpool_sleep, ut);
1098 if (refcnt == 0)
1099 free(ut, M_SUBPROC);
1100 }
1101
1102 struct md_utrap *
1103 utrap_hold(struct md_utrap *ut)
1104 {
1105
1106 if (ut == NULL)
1107 return (NULL);
1108 mtx_pool_lock(mtxpool_sleep, ut);
1109 ut->ut_refcnt++;
1110 mtx_pool_unlock(mtxpool_sleep, ut);
1111 return (ut);
1112 }
Cache object: 481722466c1467b43b7f89841c6b39c7
|