1 /*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD: releng/6.0/sys/powerpc/powerpc/machdep.c 148846 2005-08-08 07:02:12Z grehan $");
59
60 #include "opt_compat.h"
61 #include "opt_ddb.h"
62 #include "opt_kstack_pages.h"
63 #include "opt_msgbuf.h"
64
65 #include <sys/param.h>
66 #include <sys/proc.h>
67 #include <sys/systm.h>
68 #include <sys/bio.h>
69 #include <sys/buf.h>
70 #include <sys/bus.h>
71 #include <sys/cons.h>
72 #include <sys/cpu.h>
73 #include <sys/eventhandler.h>
74 #include <sys/exec.h>
75 #include <sys/imgact.h>
76 #include <sys/kdb.h>
77 #include <sys/kernel.h>
78 #include <sys/ktr.h>
79 #include <sys/linker.h>
80 #include <sys/lock.h>
81 #include <sys/malloc.h>
82 #include <sys/mbuf.h>
83 #include <sys/msgbuf.h>
84 #include <sys/mutex.h>
85 #include <sys/ptrace.h>
86 #include <sys/reboot.h>
87 #include <sys/signalvar.h>
88 #include <sys/sysctl.h>
89 #include <sys/sysent.h>
90 #include <sys/sysproto.h>
91 #include <sys/ucontext.h>
92 #include <sys/uio.h>
93 #include <sys/vmmeter.h>
94 #include <sys/vnode.h>
95
96 #include <net/netisr.h>
97
98 #include <vm/vm.h>
99 #include <vm/vm_extern.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_pager.h>
105
106 #include <machine/bat.h>
107 #include <machine/clock.h>
108 #include <machine/cpu.h>
109 #include <machine/elf.h>
110 #include <machine/fpu.h>
111 #include <machine/md_var.h>
112 #include <machine/metadata.h>
113 #include <machine/pcb.h>
114 #include <machine/powerpc.h>
115 #include <machine/reg.h>
116 #include <machine/sigframe.h>
117 #include <machine/trap.h>
118 #include <machine/vmparam.h>
119
120 #include <ddb/ddb.h>
121
122 #include <dev/ofw/openfirm.h>
123
124 #ifdef DDB
125 extern vm_offset_t ksym_start, ksym_end;
126 #endif
127
128 int cold = 1;
129
130 struct pcpu __pcpu[MAXCPU];
131 struct trapframe frame0;
132
133 vm_offset_t kstack0;
134 vm_offset_t kstack0_phys;
135
136 char machine[] = "powerpc";
137 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
138
139 static char model[128];
140 SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, model, 0, "");
141
142 static int cacheline_size = CACHELINESIZE;
143 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
144 CTLFLAG_RD, &cacheline_size, 0, "");
145
146 static void cpu_startup(void *);
147 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL)
148
149 void powerpc_init(u_int, u_int, u_int, void *);
150
151 int save_ofw_mapping(void);
152 int restore_ofw_mapping(void);
153
154 void install_extint(void (*)(void));
155
156 int setfault(faultbuf); /* defined in locore.S */
157
158 static int grab_mcontext(struct thread *, mcontext_t *, int);
159
160 void asm_panic(char *);
161
162 long Maxmem = 0;
163 long realmem = 0;
164
165 struct pmap ofw_pmap;
166 extern int ofmsr;
167
168 struct bat battable[16];
169
170 struct kva_md_info kmi;
171
172 static void
173 powerpc_ofw_shutdown(void *junk, int howto)
174 {
175 if (howto & RB_HALT) {
176 OF_halt();
177 }
178 OF_reboot();
179 }
180
181 static void
182 cpu_startup(void *dummy)
183 {
184
185 /*
186 * Initialise the decrementer-based clock.
187 */
188 decr_init();
189
190 /*
191 * Good {morning,afternoon,evening,night}.
192 */
193 cpu_setup(PCPU_GET(cpuid));
194
195 /* startrtclock(); */
196 #ifdef PERFMON
197 perfmon_init();
198 #endif
199 printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
200 ptoa(physmem) / 1048576);
201 realmem = physmem;
202
203 /*
204 * Display any holes after the first chunk of extended memory.
205 */
206 if (bootverbose) {
207 int indx;
208
209 printf("Physical memory chunk(s):\n");
210 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
211 int size1 = phys_avail[indx + 1] - phys_avail[indx];
212
213 printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
214 phys_avail[indx], phys_avail[indx + 1] - 1, size1,
215 size1 / PAGE_SIZE);
216 }
217 }
218
219 vm_ksubmap_init(&kmi);
220
221 printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count),
222 ptoa(cnt.v_free_count) / 1048576);
223
224 /*
225 * Set up buffers, so they can be used to read disk labels.
226 */
227 bufinit();
228 vm_pager_bufferinit();
229
230 EVENTHANDLER_REGISTER(shutdown_final, powerpc_ofw_shutdown, 0,
231 SHUTDOWN_PRI_LAST);
232
233 #ifdef SMP
234 /*
235 * OK, enough kmem_alloc/malloc state should be up, lets get on with it!
236 */
237 mp_start(); /* fire up the secondaries */
238 mp_announce();
239 #endif /* SMP */
240 }
241
242 extern char kernel_text[], _end[];
243
244 extern void *trapcode, *trapsize;
245 extern void *alitrap, *alisize;
246 extern void *dsitrap, *dsisize;
247 extern void *decrint, *decrsize;
248 extern void *extint, *extsize;
249 extern void *dblow, *dbsize;
250 extern void *vectrap, *vectrapsize;
251
252 void
253 powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
254 {
255 struct pcpu *pc;
256 vm_offset_t end, off;
257 void *kmdp;
258 char *env;
259
260 end = 0;
261 kmdp = NULL;
262
263 /*
264 * Parse metadata if present and fetch parameters. Must be done
265 * before console is inited so cninit gets the right value of
266 * boothowto.
267 */
268 if (mdp != NULL) {
269 preload_metadata = mdp;
270 kmdp = preload_search_by_type("elf kernel");
271 if (kmdp != NULL) {
272 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
273 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
274 end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
275 #ifdef DDB
276 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
277 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
278 #endif
279 }
280 }
281
282 /*
283 * Init params/tunables that can be overridden by the loader
284 */
285 init_param1();
286
287 /*
288 * Start initializing proc0 and thread0.
289 */
290 proc_linkup(&proc0, &ksegrp0, &thread0);
291 thread0.td_frame = &frame0;
292
293 /*
294 * Set up per-cpu data.
295 */
296 pc = &__pcpu[0];
297 pcpu_init(pc, 0, sizeof(struct pcpu));
298 pc->pc_curthread = &thread0;
299 pc->pc_curpcb = thread0.td_pcb;
300 pc->pc_cpuid = 0;
301
302 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
303
304 mutex_init();
305
306 /*
307 * Initialize the console before printing anything.
308 */
309 cninit();
310
311 /*
312 * Complain if there is no metadata.
313 */
314 if (mdp == NULL || kmdp == NULL) {
315 printf("powerpc_init: no loader metadata.\n");
316 }
317
318 kdb_init();
319
320 /*
321 * XXX: Initialize the interrupt tables.
322 * Disable translation in case the vector area
323 * hasn't been mapped (G5)
324 */
325 mtmsr(mfmsr() & ~(PSL_IR | PSL_DR));
326 isync();
327 bcopy(&trapcode, (void *)EXC_RST, (size_t)&trapsize);
328 bcopy(&trapcode, (void *)EXC_MCHK, (size_t)&trapsize);
329 bcopy(&dsitrap, (void *)EXC_DSI, (size_t)&dsisize);
330 bcopy(&trapcode, (void *)EXC_ISI, (size_t)&trapsize);
331 bcopy(&trapcode, (void *)EXC_EXI, (size_t)&trapsize);
332 bcopy(&trapcode, (void *)EXC_ALI, (size_t)&trapsize);
333 bcopy(&trapcode, (void *)EXC_PGM, (size_t)&trapsize);
334 bcopy(&trapcode, (void *)EXC_FPU, (size_t)&trapsize);
335 bcopy(&trapcode, (void *)EXC_DECR, (size_t)&trapsize);
336 bcopy(&trapcode, (void *)EXC_SC, (size_t)&trapsize);
337 bcopy(&trapcode, (void *)EXC_TRC, (size_t)&trapsize);
338 bcopy(&trapcode, (void *)EXC_FPA, (size_t)&trapsize);
339 bcopy(&vectrap, (void *)EXC_VEC, (size_t)&vectrapsize);
340 bcopy(&trapcode, (void *)EXC_VECAST, (size_t)&trapsize);
341 bcopy(&trapcode, (void *)EXC_THRM, (size_t)&trapsize);
342 bcopy(&trapcode, (void *)EXC_BPT, (size_t)&trapsize);
343 #ifdef KDB
344 bcopy(&dblow, (void *)EXC_RST, (size_t)&dbsize);
345 bcopy(&dblow, (void *)EXC_MCHK, (size_t)&dbsize);
346 bcopy(&dblow, (void *)EXC_PGM, (size_t)&dbsize);
347 bcopy(&dblow, (void *)EXC_TRC, (size_t)&dbsize);
348 bcopy(&dblow, (void *)EXC_BPT, (size_t)&dbsize);
349 #endif
350 __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD);
351
352 /*
353 * Make sure translation has been enabled
354 */
355 mtmsr(mfmsr() | PSL_IR|PSL_DR|PSL_ME|PSL_RI);
356 isync();
357
358 /*
359 * Initialise virtual memory.
360 */
361 pmap_bootstrap(startkernel, endkernel);
362
363 /*
364 * Initialize params/tunables that are derived from memsize
365 */
366 init_param2(physmem);
367
368 /*
369 * Grab booted kernel's name
370 */
371 env = getenv("kernelname");
372 if (env != NULL) {
373 strlcpy(kernelname, env, sizeof(kernelname));
374 freeenv(env);
375 }
376
377 /*
378 * Finish setting up thread0.
379 */
380 thread0.td_kstack = kstack0;
381 thread0.td_pcb = (struct pcb *)
382 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
383
384 /*
385 * Map and initialise the message buffer.
386 */
387 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
388 pmap_kenter((vm_offset_t)msgbufp + off, msgbuf_phys + off);
389 msgbufinit(msgbufp, MSGBUF_SIZE);
390
391 #ifdef KDB
392 if (boothowto & RB_KDB)
393 kdb_enter("Boot flags requested debugger");
394 #endif
395 }
396
397 void
398 bzero(void *buf, size_t len)
399 {
400 caddr_t p;
401
402 p = buf;
403
404 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
405 *p++ = 0;
406 len--;
407 }
408
409 while (len >= sizeof(u_long) * 8) {
410 *(u_long*) p = 0;
411 *((u_long*) p + 1) = 0;
412 *((u_long*) p + 2) = 0;
413 *((u_long*) p + 3) = 0;
414 len -= sizeof(u_long) * 8;
415 *((u_long*) p + 4) = 0;
416 *((u_long*) p + 5) = 0;
417 *((u_long*) p + 6) = 0;
418 *((u_long*) p + 7) = 0;
419 p += sizeof(u_long) * 8;
420 }
421
422 while (len >= sizeof(u_long)) {
423 *(u_long*) p = 0;
424 len -= sizeof(u_long);
425 p += sizeof(u_long);
426 }
427
428 while (len) {
429 *p++ = 0;
430 len--;
431 }
432 }
433
434 void
435 sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code)
436 {
437 struct trapframe *tf;
438 struct sigframe *sfp;
439 struct sigacts *psp;
440 struct sigframe sf;
441 struct thread *td;
442 struct proc *p;
443 int oonstack, rndfsize;
444
445 td = curthread;
446 p = td->td_proc;
447 PROC_LOCK_ASSERT(p, MA_OWNED);
448 psp = p->p_sigacts;
449 mtx_assert(&psp->ps_mtx, MA_OWNED);
450 tf = td->td_frame;
451 oonstack = sigonstack(tf->fixreg[1]);
452
453 rndfsize = ((sizeof(sf) + 15) / 16) * 16;
454
455 CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
456 catcher, sig);
457
458 /*
459 * Save user context
460 */
461 memset(&sf, 0, sizeof(sf));
462 grab_mcontext(td, &sf.sf_uc.uc_mcontext, 0);
463 sf.sf_uc.uc_sigmask = *mask;
464 sf.sf_uc.uc_stack = td->td_sigstk;
465 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK)
466 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE;
467
468 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0;
469
470 /*
471 * Allocate and validate space for the signal handler context.
472 */
473 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack &&
474 SIGISMEMBER(psp->ps_sigonstack, sig)) {
475 sfp = (struct sigframe *)((caddr_t)td->td_sigstk.ss_sp +
476 td->td_sigstk.ss_size - rndfsize);
477 } else {
478 sfp = (struct sigframe *)(tf->fixreg[1] - rndfsize);
479 }
480
481 /*
482 * Translate the signal if appropriate (Linux emu ?)
483 */
484 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
485 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
486
487 /*
488 * Save the floating-point state, if necessary, then copy it.
489 */
490 /* XXX */
491
492 /*
493 * Set up the registers to return to sigcode.
494 *
495 * r1/sp - sigframe ptr
496 * lr - sig function, dispatched to by blrl in trampoline
497 * r3 - sig number
498 * r4 - SIGINFO ? &siginfo : exception code
499 * r5 - user context
500 * srr0 - trampoline function addr
501 */
502 tf->lr = (register_t)catcher;
503 tf->fixreg[1] = (register_t)sfp;
504 tf->fixreg[FIRSTARG] = sig;
505 tf->fixreg[FIRSTARG+2] = (register_t)&sfp->sf_uc;
506 if (SIGISMEMBER(psp->ps_siginfo, sig)) {
507 /*
508 * Signal handler installed with SA_SIGINFO.
509 */
510 tf->fixreg[FIRSTARG+1] = (register_t)&sfp->sf_si;
511
512 /*
513 * Fill siginfo structure.
514 */
515 sf.sf_si.si_signo = sig;
516 sf.sf_si.si_code = code;
517 sf.sf_si.si_addr = (void *)tf->srr0;
518 } else {
519 /* Old FreeBSD-style arguments. */
520 tf->fixreg[FIRSTARG+1] = code;
521 }
522 mtx_unlock(&psp->ps_mtx);
523 PROC_UNLOCK(p);
524
525 tf->srr0 = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
526
527 /*
528 * copy the frame out to userland.
529 */
530 if (copyout((caddr_t)&sf, (caddr_t)sfp, sizeof(sf)) != 0) {
531 /*
532 * Process has trashed its stack. Kill it.
533 */
534 CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp);
535 PROC_LOCK(p);
536 sigexit(td, SIGILL);
537 }
538
539 CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td,
540 tf->srr0, tf->fixreg[1]);
541
542 PROC_LOCK(p);
543 mtx_lock(&psp->ps_mtx);
544 }
545
546 /*
547 * Build siginfo_t for SA thread
548 */
549 void
550 cpu_thread_siginfo(int sig, u_long code, siginfo_t *si)
551 {
552 struct proc *p;
553 struct thread *td;
554
555 td = curthread;
556 p = td->td_proc;
557 PROC_LOCK_ASSERT(p, MA_OWNED);
558
559 bzero(si, sizeof(*si));
560 si->si_signo = sig;
561 si->si_code = code;
562 /* XXXKSE fill other fields */
563 }
564
565 int
566 sigreturn(struct thread *td, struct sigreturn_args *uap)
567 {
568 struct proc *p;
569 ucontext_t uc;
570 int error;
571
572 CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp);
573
574 if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) {
575 CTR1(KTR_SIG, "sigreturn: efault td=%p", td);
576 return (EFAULT);
577 }
578
579 error = set_mcontext(td, &uc.uc_mcontext);
580 if (error != 0)
581 return (error);
582
583 p = td->td_proc;
584 PROC_LOCK(p);
585 td->td_sigmask = uc.uc_sigmask;
586 SIG_CANTMASK(td->td_sigmask);
587 signotify(td);
588 PROC_UNLOCK(p);
589
590 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x",
591 td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]);
592
593 return (EJUSTRETURN);
594 }
595
596 #ifdef COMPAT_FREEBSD4
597 int
598 freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap)
599 {
600
601 return sigreturn(td, (struct sigreturn_args *)uap);
602 }
603 #endif
604
605 /*
606 * Construct a PCB from a trapframe. This is called from kdb_trap() where
607 * we want to start a backtrace from the function that caused us to enter
608 * the debugger. We have the context in the trapframe, but base the trace
609 * on the PCB. The PCB doesn't have to be perfect, as long as it contains
610 * enough for a backtrace.
611 */
612 void
613 makectx(struct trapframe *tf, struct pcb *pcb)
614 {
615
616 pcb->pcb_lr = tf->srr0;
617 pcb->pcb_sp = tf->fixreg[1];
618 }
619
620 /*
621 * get_mcontext/sendsig helper routine that doesn't touch the
622 * proc lock
623 */
624 static int
625 grab_mcontext(struct thread *td, mcontext_t *mcp, int flags)
626 {
627 struct pcb *pcb;
628
629 pcb = td->td_pcb;
630
631 memset(mcp, 0, sizeof(mcontext_t));
632
633 mcp->mc_vers = _MC_VERSION;
634 mcp->mc_flags = 0;
635 memcpy(&mcp->mc_frame, td->td_frame, sizeof(struct trapframe));
636 if (flags & GET_MC_CLEAR_RET) {
637 mcp->mc_gpr[3] = 0;
638 mcp->mc_gpr[4] = 0;
639 }
640
641 /*
642 * This assumes that floating-point context is *not* lazy,
643 * so if the thread has used FP there would have been a
644 * FP-unavailable exception that would have set things up
645 * correctly.
646 */
647 if (pcb->pcb_flags & PCB_FPU) {
648 KASSERT(td == curthread,
649 ("get_mcontext: fp save not curthread"));
650 critical_enter();
651 save_fpu(td);
652 critical_exit();
653 mcp->mc_flags |= _MC_FP_VALID;
654 memcpy(&mcp->mc_fpscr, &pcb->pcb_fpu.fpscr, sizeof(double));
655 memcpy(mcp->mc_fpreg, pcb->pcb_fpu.fpr, 32*sizeof(double));
656 }
657
658 /* XXX Altivec context ? */
659
660 mcp->mc_len = sizeof(*mcp);
661
662 return (0);
663 }
664
665 int
666 get_mcontext(struct thread *td, mcontext_t *mcp, int flags)
667 {
668 int error;
669
670 error = grab_mcontext(td, mcp, flags);
671 if (error == 0) {
672 PROC_LOCK(curthread->td_proc);
673 mcp->mc_onstack = sigonstack(td->td_frame->fixreg[1]);
674 PROC_UNLOCK(curthread->td_proc);
675 }
676
677 return (error);
678 }
679
680 int
681 set_mcontext(struct thread *td, const mcontext_t *mcp)
682 {
683 struct pcb *pcb;
684 struct trapframe *tf;
685
686 pcb = td->td_pcb;
687 tf = td->td_frame;
688
689 if (mcp->mc_vers != _MC_VERSION ||
690 mcp->mc_len != sizeof(*mcp))
691 return (EINVAL);
692
693 /*
694 * Don't let the user set privileged MSR bits
695 */
696 if ((mcp->mc_srr1 & PSL_USERSTATIC) != (tf->srr1 & PSL_USERSTATIC)) {
697 return (EINVAL);
698 }
699
700 memcpy(tf, mcp->mc_frame, sizeof(mcp->mc_frame));
701
702 if (mcp->mc_flags & _MC_FP_VALID) {
703 if ((pcb->pcb_flags & PCB_FPU) != PCB_FPU) {
704 critical_enter();
705 enable_fpu(td);
706 critical_exit();
707 }
708 memcpy(&pcb->pcb_fpu.fpscr, &mcp->mc_fpscr, sizeof(double));
709 memcpy(pcb->pcb_fpu.fpr, mcp->mc_fpreg, 32*sizeof(double));
710 }
711
712 /* XXX Altivec context? */
713
714 return (0);
715 }
716
717 void
718 cpu_boot(int howto)
719 {
720 }
721
722 /* Get current clock frequency for the given cpu id. */
723 int
724 cpu_est_clockrate(int cpu_id, uint64_t *rate)
725 {
726
727 return (ENXIO);
728 }
729
730 /*
731 * Shutdown the CPU as much as possible.
732 */
733 void
734 cpu_halt(void)
735 {
736
737 OF_exit();
738 }
739
740 void
741 cpu_idle(void)
742 {
743 /* TODO: Insert code to halt (until next interrupt) */
744
745 #ifdef INVARIANTS
746 if ((mfmsr() & PSL_EE) != PSL_EE) {
747 struct thread *td = curthread;
748 printf("td msr %x\n", td->td_md.md_saved_msr);
749 panic("ints disabled in idleproc!");
750 }
751 #endif
752 }
753
754 /*
755 * Set set up registers on exec.
756 */
757 void
758 exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings)
759 {
760 struct trapframe *tf;
761 struct ps_strings arginfo;
762
763 tf = trapframe(td);
764 bzero(tf, sizeof *tf);
765 tf->fixreg[1] = -roundup(-stack + 8, 16);
766
767 /*
768 * XXX Machine-independent code has already copied arguments and
769 * XXX environment to userland. Get them back here.
770 */
771 (void)copyin((char *)PS_STRINGS, &arginfo, sizeof(arginfo));
772
773 /*
774 * Set up arguments for _start():
775 * _start(argc, argv, envp, obj, cleanup, ps_strings);
776 *
777 * Notes:
778 * - obj and cleanup are the auxilliary and termination
779 * vectors. They are fixed up by ld.elf_so.
780 * - ps_strings is a NetBSD extention, and will be
781 * ignored by executables which are strictly
782 * compliant with the SVR4 ABI.
783 *
784 * XXX We have to set both regs and retval here due to different
785 * XXX calling convention in trap.c and init_main.c.
786 */
787 /*
788 * XXX PG: these get overwritten in the syscall return code.
789 * execve() should return EJUSTRETURN, like it does on NetBSD.
790 * Emulate by setting the syscall return value cells. The
791 * registers still have to be set for init's fork trampoline.
792 */
793 td->td_retval[0] = arginfo.ps_nargvstr;
794 td->td_retval[1] = (register_t)arginfo.ps_argvstr;
795 tf->fixreg[3] = arginfo.ps_nargvstr;
796 tf->fixreg[4] = (register_t)arginfo.ps_argvstr;
797 tf->fixreg[5] = (register_t)arginfo.ps_envstr;
798 tf->fixreg[6] = 0; /* auxillary vector */
799 tf->fixreg[7] = 0; /* termination vector */
800 tf->fixreg[8] = (register_t)PS_STRINGS; /* NetBSD extension */
801
802 tf->srr0 = entry;
803 tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
804 td->td_pcb->pcb_flags = 0;
805 }
806
807 int
808 fill_regs(struct thread *td, struct reg *regs)
809 {
810 struct trapframe *tf;
811
812 tf = td->td_frame;
813 memcpy(regs, tf, sizeof(struct reg));
814
815 return (0);
816 }
817
818 int
819 fill_dbregs(struct thread *td, struct dbreg *dbregs)
820 {
821 /* No debug registers on PowerPC */
822 return (ENOSYS);
823 }
824
825 int
826 fill_fpregs(struct thread *td, struct fpreg *fpregs)
827 {
828 struct pcb *pcb;
829
830 pcb = td->td_pcb;
831
832 if ((pcb->pcb_flags & PCB_FPU) == 0)
833 memset(fpregs, 0, sizeof(struct fpreg));
834 else
835 memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg));
836
837 return (0);
838 }
839
840 int
841 set_regs(struct thread *td, struct reg *regs)
842 {
843 struct trapframe *tf;
844
845 tf = td->td_frame;
846 memcpy(tf, regs, sizeof(struct reg));
847
848 return (0);
849 }
850
851 int
852 set_dbregs(struct thread *td, struct dbreg *dbregs)
853 {
854 /* No debug registers on PowerPC */
855 return (ENOSYS);
856 }
857
858 int
859 set_fpregs(struct thread *td, struct fpreg *fpregs)
860 {
861 struct pcb *pcb;
862
863 pcb = td->td_pcb;
864 if ((pcb->pcb_flags & PCB_FPU) == 0)
865 enable_fpu(td);
866 memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg));
867
868 return (0);
869 }
870
871 int
872 ptrace_set_pc(struct thread *td, unsigned long addr)
873 {
874 struct trapframe *tf;
875
876 tf = td->td_frame;
877 tf->srr0 = (register_t)addr;
878
879 return (0);
880 }
881
882 int
883 ptrace_single_step(struct thread *td)
884 {
885 struct trapframe *tf;
886
887 tf = td->td_frame;
888 tf->srr1 |= PSL_SE;
889
890 return (0);
891 }
892
893 int
894 ptrace_clear_single_step(struct thread *td)
895 {
896 struct trapframe *tf;
897
898 tf = td->td_frame;
899 tf->srr1 &= ~PSL_SE;
900
901 return (0);
902 }
903
904 /*
905 * Initialise a struct pcpu.
906 */
907 void
908 cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz)
909 {
910
911 }
912
913 void
914 spinlock_enter(void)
915 {
916 struct thread *td;
917
918 td = curthread;
919 if (td->td_md.md_spinlock_count == 0)
920 td->td_md.md_saved_msr = intr_disable();
921 td->td_md.md_spinlock_count++;
922 critical_enter();
923 }
924
925 void
926 spinlock_exit(void)
927 {
928 struct thread *td;
929
930 td = curthread;
931 critical_exit();
932 td->td_md.md_spinlock_count--;
933 if (td->td_md.md_spinlock_count == 0)
934 intr_restore(td->td_md.md_saved_msr);
935 }
936
937 /*
938 * kcopy(const void *src, void *dst, size_t len);
939 *
940 * Copy len bytes from src to dst, aborting if we encounter a fatal
941 * page fault.
942 *
943 * kcopy() _must_ save and restore the old fault handler since it is
944 * called by uiomove(), which may be in the path of servicing a non-fatal
945 * page fault.
946 */
947 int
948 kcopy(const void *src, void *dst, size_t len)
949 {
950 struct thread *td;
951 faultbuf env, *oldfault;
952 int rv;
953
954 td = PCPU_GET(curthread);
955 oldfault = td->td_pcb->pcb_onfault;
956 if ((rv = setfault(env)) != 0) {
957 td->td_pcb->pcb_onfault = oldfault;
958 return rv;
959 }
960
961 memcpy(dst, src, len);
962
963 td->td_pcb->pcb_onfault = oldfault;
964 return (0);
965 }
966
967 void
968 asm_panic(char *pstr)
969 {
970 panic(pstr);
971 }
972
973 int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */
974
975 int
976 db_trap_glue(struct trapframe *frame)
977 {
978 if (!(frame->srr1 & PSL_PR)
979 && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC
980 || (frame->exc == EXC_PGM
981 && (frame->srr1 & 0x20000))
982 || frame->exc == EXC_BPT
983 || frame->exc == EXC_DSI)) {
984 int type = frame->exc;
985 if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
986 type = T_BREAKPOINT;
987 }
988 return (kdb_trap(type, 0, frame));
989 }
990
991 return (0);
992 }
Cache object: e87017edfd09fff02ad25268893a720e
|