1 /*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the following acknowledgement:
16 * This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*-
32 * Copyright (C) 2001 Benno Rice
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
45 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
46 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
47 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
49 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
50 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
51 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
52 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
53 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $
55 */
56
57 #include <sys/cdefs.h>
58 __FBSDID("$FreeBSD$");
59
60 #include "opt_ddb.h"
61 #include "opt_kstack_pages.h"
62 #include "opt_platform.h"
63
64 #include <sys/param.h>
65 #include <sys/proc.h>
66 #include <sys/systm.h>
67 #include <sys/bio.h>
68 #include <sys/buf.h>
69 #include <sys/bus.h>
70 #include <sys/cons.h>
71 #include <sys/cpu.h>
72 #include <sys/eventhandler.h>
73 #include <sys/exec.h>
74 #include <sys/imgact.h>
75 #include <sys/kdb.h>
76 #include <sys/kernel.h>
77 #include <sys/ktr.h>
78 #include <sys/linker.h>
79 #include <sys/lock.h>
80 #include <sys/malloc.h>
81 #include <sys/mbuf.h>
82 #include <sys/msgbuf.h>
83 #include <sys/mutex.h>
84 #include <sys/ptrace.h>
85 #include <sys/reboot.h>
86 #include <sys/rwlock.h>
87 #include <sys/signalvar.h>
88 #include <sys/syscallsubr.h>
89 #include <sys/sysctl.h>
90 #include <sys/sysent.h>
91 #include <sys/sysproto.h>
92 #include <sys/ucontext.h>
93 #include <sys/uio.h>
94 #include <sys/vmmeter.h>
95 #include <sys/vnode.h>
96
97 #include <net/netisr.h>
98
99 #include <vm/vm.h>
100 #include <vm/vm_extern.h>
101 #include <vm/vm_kern.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_phys.h>
104 #include <vm/vm_map.h>
105 #include <vm/vm_object.h>
106 #include <vm/vm_pager.h>
107
108 #include <machine/altivec.h>
109 #ifndef __powerpc64__
110 #include <machine/bat.h>
111 #endif
112 #include <machine/cpu.h>
113 #include <machine/elf.h>
114 #include <machine/fpu.h>
115 #include <machine/hid.h>
116 #include <machine/ifunc.h>
117 #include <machine/kdb.h>
118 #include <machine/md_var.h>
119 #include <machine/metadata.h>
120 #include <machine/mmuvar.h>
121 #include <machine/pcb.h>
122 #include <machine/reg.h>
123 #include <machine/sigframe.h>
124 #include <machine/spr.h>
125 #include <machine/trap.h>
126 #include <machine/vmparam.h>
127 #include <machine/ofw_machdep.h>
128
129 #include <ddb/ddb.h>
130
131 #include <dev/ofw/openfirm.h>
132 #include <dev/ofw/ofw_subr.h>
133
134 int cold = 1;
135 #ifdef __powerpc64__
136 int cacheline_size = 128;
137 #else
138 int cacheline_size = 32;
139 #endif
140 int hw_direct_map = 1;
141
142 #ifdef BOOKE
143 extern vm_paddr_t kernload;
144 #endif
145
146 extern void *ap_pcpu;
147
148 struct pcpu __pcpu[MAXCPU] __aligned(PAGE_SIZE);
149 static char init_kenv[2048];
150
151 static struct trapframe frame0;
152
153 char machine[] = "powerpc";
154 SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
155
156 static void cpu_startup(void *);
157 SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
158
159 SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
160 CTLFLAG_RD, &cacheline_size, 0, "");
161
162 uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *,
163 uint32_t);
164
165 static void fake_preload_metadata(void);
166
167 long Maxmem = 0;
168 long realmem = 0;
169
170 /* Default MSR values set in the AIM/Book-E early startup code */
171 register_t psl_kernset;
172 register_t psl_userset;
173 register_t psl_userstatic;
174 #ifdef __powerpc64__
175 register_t psl_userset32;
176 #endif
177
178 struct kva_md_info kmi;
179
180 static void
181 cpu_startup(void *dummy)
182 {
183
184 /*
185 * Initialise the decrementer-based clock.
186 */
187 decr_init();
188
189 /*
190 * Good {morning,afternoon,evening,night}.
191 */
192 cpu_setup(PCPU_GET(cpuid));
193
194 #ifdef PERFMON
195 perfmon_init();
196 #endif
197 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem),
198 ptoa((uintmax_t)physmem) / 1048576);
199 realmem = physmem;
200
201 if (bootverbose)
202 printf("available KVA = %zu (%zu MB)\n",
203 virtual_end - virtual_avail,
204 (virtual_end - virtual_avail) / 1048576);
205
206 /*
207 * Display any holes after the first chunk of extended memory.
208 */
209 if (bootverbose) {
210 int indx;
211
212 printf("Physical memory chunk(s):\n");
213 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
214 vm_paddr_t size1 =
215 phys_avail[indx + 1] - phys_avail[indx];
216
217 #ifdef __powerpc64__
218 printf("0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n",
219 #else
220 printf("0x%09jx - 0x%09jx, %ju bytes (%ju pages)\n",
221 #endif
222 (uintmax_t)phys_avail[indx],
223 (uintmax_t)phys_avail[indx + 1] - 1,
224 (uintmax_t)size1, (uintmax_t)size1 / PAGE_SIZE);
225 }
226 }
227
228 vm_ksubmap_init(&kmi);
229
230 printf("avail memory = %ju (%ju MB)\n",
231 ptoa((uintmax_t)vm_free_count()),
232 ptoa((uintmax_t)vm_free_count()) / 1048576);
233
234 /*
235 * Set up buffers, so they can be used to read disk labels.
236 */
237 bufinit();
238 vm_pager_bufferinit();
239 }
240
241 extern vm_offset_t __startkernel, __endkernel;
242 extern unsigned char __bss_start[];
243 extern unsigned char __sbss_start[];
244 extern unsigned char __sbss_end[];
245 extern unsigned char _end[];
246
247 void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry,
248 void *mdp, uint32_t mdp_cookie);
249 void aim_cpu_init(vm_offset_t toc);
250 void booke_cpu_init(void);
251
252 #ifdef DDB
253 static void load_external_symtab(void);
254 #endif
255
256 uintptr_t
257 powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp,
258 uint32_t mdp_cookie)
259 {
260 struct pcpu *pc;
261 struct cpuref bsp;
262 vm_offset_t startkernel, endkernel;
263 char *env;
264 void *kmdp = NULL;
265 bool ofw_bootargs = false;
266 #ifdef DDB
267 bool symbols_provided = false;
268 vm_offset_t ksym_start;
269 vm_offset_t ksym_end;
270 vm_offset_t ksym_sz;
271 #endif
272
273 /* First guess at start/end kernel positions */
274 startkernel = __startkernel;
275 endkernel = __endkernel;
276
277 /*
278 * If the metadata pointer cookie is not set to the magic value,
279 * the number in mdp should be treated as nonsense.
280 */
281 if (mdp_cookie != 0xfb5d104d)
282 mdp = NULL;
283
284 #if !defined(BOOKE)
285 /*
286 * On BOOKE the BSS is already cleared and some variables
287 * initialized. Do not wipe them out.
288 */
289 bzero(__sbss_start, __sbss_end - __sbss_start);
290 bzero(__bss_start, _end - __bss_start);
291 #endif
292
293 cpu_feature_setup();
294
295 #ifdef AIM
296 aim_early_init(fdt, toc, ofentry, mdp, mdp_cookie);
297 #endif
298
299 /*
300 * At this point, we are executing in our correct memory space.
301 * Book-E started there, and AIM has done an rfi and restarted
302 * execution from _start.
303 *
304 * We may still be in real mode, however. If we are running out of
305 * the direct map on 64 bit, this is possible to do.
306 */
307
308 /*
309 * Parse metadata if present and fetch parameters. Must be done
310 * before console is inited so cninit gets the right value of
311 * boothowto.
312 */
313 if (mdp != NULL) {
314 /*
315 * Starting up from loader.
316 *
317 * Full metadata has been provided, but we need to figure
318 * out the correct address to relocate it to.
319 */
320 char *envp = NULL;
321 uintptr_t md_offset = 0;
322 vm_paddr_t kernelstartphys, kernelendphys;
323
324 #ifdef AIM
325 if ((uintptr_t)&powerpc_init > DMAP_BASE_ADDRESS)
326 md_offset = DMAP_BASE_ADDRESS;
327 #else /* BOOKE */
328 md_offset = VM_MIN_KERNEL_ADDRESS - kernload;
329 #endif
330
331 preload_metadata = mdp;
332 if (md_offset > 0) {
333 /* Translate phys offset into DMAP offset. */
334 preload_metadata += md_offset;
335 preload_bootstrap_relocate(md_offset);
336 }
337 kmdp = preload_search_by_type("elf kernel");
338 if (kmdp != NULL) {
339 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
340 envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
341 if (envp != NULL)
342 envp += md_offset;
343 init_static_kenv(envp, 0);
344 if (fdt == 0) {
345 fdt = MD_FETCH(kmdp, MODINFOMD_DTBP, uintptr_t);
346 if (fdt != 0)
347 fdt += md_offset;
348 }
349 kernelstartphys = MD_FETCH(kmdp, MODINFO_ADDR,
350 vm_offset_t);
351 /* kernelstartphys is already relocated. */
352 kernelendphys = MD_FETCH(kmdp, MODINFOMD_KERNEND,
353 vm_offset_t);
354 if (kernelendphys != 0)
355 kernelendphys += md_offset;
356 endkernel = ulmax(endkernel, kernelendphys);
357 #ifdef DDB
358 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
359 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
360 ksym_sz = *(Elf_Size*)ksym_start;
361
362 db_fetch_ksymtab(ksym_start, ksym_end, md_offset);
363 /* Symbols provided by loader. */
364 symbols_provided = true;
365 #endif
366 }
367 } else {
368 /*
369 * Self-loading kernel, we have to fake up metadata.
370 *
371 * Since we are creating the metadata from the final
372 * memory space, we don't need to call
373 * preload_boostrap_relocate().
374 */
375 fake_preload_metadata();
376 kmdp = preload_search_by_type("elf kernel");
377 init_static_kenv(init_kenv, sizeof(init_kenv));
378 ofw_bootargs = true;
379 }
380
381 /* Store boot environment state */
382 OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry);
383
384 /*
385 * Init params/tunables that can be overridden by the loader
386 */
387 init_param1();
388
389 /*
390 * Start initializing proc0 and thread0.
391 */
392 proc_linkup0(&proc0, &thread0);
393 thread0.td_frame = &frame0;
394 #ifdef __powerpc64__
395 __asm __volatile("mr 13,%0" :: "r"(&thread0));
396 #else
397 __asm __volatile("mr 2,%0" :: "r"(&thread0));
398 #endif
399
400 /*
401 * Init mutexes, which we use heavily in PMAP
402 */
403 mutex_init();
404
405 /*
406 * Install the OF client interface
407 */
408 OF_bootstrap();
409
410 #ifdef DDB
411 if (!symbols_provided && hw_direct_map)
412 load_external_symtab();
413 #endif
414
415 if (ofw_bootargs)
416 ofw_parse_bootargs();
417
418 #ifdef AIM
419 /*
420 * Early I/O map needs to be initialized before console, in order to
421 * map frame buffers properly, and after boot args have been parsed,
422 * to handle tunables properly.
423 */
424 pmap_early_io_map_init();
425 #endif
426
427 /*
428 * Initialize the console before printing anything.
429 */
430 cninit();
431
432 #ifdef AIM
433 aim_cpu_init(toc);
434 #else /* BOOKE */
435 booke_cpu_init();
436
437 /* Make sure the kernel icache is valid before we go too much further */
438 __syncicache((caddr_t)startkernel, endkernel - startkernel);
439 #endif
440
441 /*
442 * Choose a platform module so we can get the physical memory map.
443 */
444
445 platform_probe_and_attach();
446
447 /*
448 * Set up per-cpu data for the BSP now that the platform can tell
449 * us which that is.
450 */
451 if (platform_smp_get_bsp(&bsp) != 0)
452 bsp.cr_cpuid = 0;
453 pc = &__pcpu[bsp.cr_cpuid];
454 __asm __volatile("mtsprg 0, %0" :: "r"(pc));
455 pcpu_init(pc, bsp.cr_cpuid, sizeof(struct pcpu));
456 pc->pc_curthread = &thread0;
457 thread0.td_oncpu = bsp.cr_cpuid;
458 pc->pc_cpuid = bsp.cr_cpuid;
459 pc->pc_hwref = bsp.cr_hwref;
460
461 /*
462 * Init KDB
463 */
464 kdb_init();
465
466 /*
467 * Bring up MMU
468 */
469 pmap_mmu_init();
470 link_elf_ireloc(kmdp);
471 pmap_bootstrap(startkernel, endkernel);
472 mtmsr(psl_kernset & ~PSL_EE);
473
474 /*
475 * Initialize params/tunables that are derived from memsize
476 */
477 init_param2(physmem);
478
479 /*
480 * Grab booted kernel's name
481 */
482 env = kern_getenv("kernelname");
483 if (env != NULL) {
484 strlcpy(kernelname, env, sizeof(kernelname));
485 freeenv(env);
486 }
487
488 /*
489 * Finish setting up thread0.
490 */
491 thread0.td_pcb = (struct pcb *)
492 ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
493 sizeof(struct pcb)) & ~15UL);
494 bzero((void *)thread0.td_pcb, sizeof(struct pcb));
495 pc->pc_curpcb = thread0.td_pcb;
496
497 /* Initialise the message buffer. */
498 msgbufinit(msgbufp, msgbufsize);
499
500 #ifdef KDB
501 if (boothowto & RB_KDB)
502 kdb_enter(KDB_WHY_BOOTFLAGS,
503 "Boot flags requested debugger");
504 #endif
505
506 return (((uintptr_t)thread0.td_pcb -
507 (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL);
508 }
509
510 #ifdef DDB
511 /*
512 * On powernv and some booke systems, we might not have symbols loaded via
513 * loader. However, if the user passed the kernel in as the initrd as well,
514 * we can manually load it via reinterpreting the initrd copy of the kernel.
515 *
516 * In the BOOKE case, we don't actually have a DMAP yet, so we have to use
517 * temporary maps to inspect the memory, but write DMAP addresses to the
518 * configuration variables.
519 */
520 static void
521 load_external_symtab(void) {
522 phandle_t chosen;
523 vm_paddr_t start, end;
524 pcell_t cell[2];
525 ssize_t size;
526 u_char *kernelimg; /* Temporary map */
527 u_char *kernelimg_final; /* Final location */
528
529 int i;
530
531 Elf_Ehdr *ehdr;
532 Elf_Phdr *phdr;
533 Elf_Shdr *shdr;
534
535 vm_offset_t ksym_start, ksym_sz, kstr_start, kstr_sz,
536 ksym_start_final, kstr_start_final;
537
538 if (!hw_direct_map)
539 return;
540
541 chosen = OF_finddevice("/chosen");
542 if (chosen <= 0)
543 return;
544
545 if (!OF_hasprop(chosen, "linux,initrd-start") ||
546 !OF_hasprop(chosen, "linux,initrd-end"))
547 return;
548
549 size = OF_getencprop(chosen, "linux,initrd-start", cell, sizeof(cell));
550 if (size == 4)
551 start = cell[0];
552 else if (size == 8)
553 start = (uint64_t)cell[0] << 32 | cell[1];
554 else
555 return;
556
557 size = OF_getencprop(chosen, "linux,initrd-end", cell, sizeof(cell));
558 if (size == 4)
559 end = cell[0];
560 else if (size == 8)
561 end = (uint64_t)cell[0] << 32 | cell[1];
562 else
563 return;
564
565 if (!(end - start > 0))
566 return;
567
568 kernelimg_final = (u_char *) PHYS_TO_DMAP(start);
569 #ifdef AIM
570 kernelimg = kernelimg_final;
571 #else /* BOOKE */
572 kernelimg = (u_char *)pmap_early_io_map(start, PAGE_SIZE);
573 #endif
574 ehdr = (Elf_Ehdr *)kernelimg;
575
576 if (!IS_ELF(*ehdr)) {
577 #ifdef BOOKE
578 pmap_early_io_unmap(start, PAGE_SIZE);
579 #endif
580 return;
581 }
582
583 #ifdef BOOKE
584 pmap_early_io_unmap(start, PAGE_SIZE);
585 kernelimg = (u_char *)pmap_early_io_map(start, (end - start));
586 #endif
587
588 phdr = (Elf_Phdr *)(kernelimg + ehdr->e_phoff);
589 shdr = (Elf_Shdr *)(kernelimg + ehdr->e_shoff);
590
591 ksym_start = 0;
592 ksym_sz = 0;
593 ksym_start_final = 0;
594 kstr_start = 0;
595 kstr_sz = 0;
596 kstr_start_final = 0;
597 for (i = 0; i < ehdr->e_shnum; i++) {
598 if (shdr[i].sh_type == SHT_SYMTAB) {
599 ksym_start = (vm_offset_t)(kernelimg +
600 shdr[i].sh_offset);
601 ksym_start_final = (vm_offset_t)
602 (kernelimg_final + shdr[i].sh_offset);
603 ksym_sz = (vm_offset_t)(shdr[i].sh_size);
604 kstr_start = (vm_offset_t)(kernelimg +
605 shdr[shdr[i].sh_link].sh_offset);
606 kstr_start_final = (vm_offset_t)
607 (kernelimg_final +
608 shdr[shdr[i].sh_link].sh_offset);
609
610 kstr_sz = (vm_offset_t)
611 (shdr[shdr[i].sh_link].sh_size);
612 }
613 }
614
615 if (ksym_start != 0 && kstr_start != 0 && ksym_sz != 0 &&
616 kstr_sz != 0 && ksym_start < kstr_start) {
617 /*
618 * We can't use db_fetch_ksymtab() here, because we need to
619 * feed in DMAP addresses that are not mapped yet on booke.
620 *
621 * Write the variables directly, where db_init() will pick
622 * them up later, after the DMAP is up.
623 */
624 ksymtab = ksym_start_final;
625 ksymtab_size = ksym_sz;
626 kstrtab = kstr_start_final;
627 ksymtab_relbase = (__startkernel - KERNBASE);
628 }
629
630 #ifdef BOOKE
631 pmap_early_io_unmap(start, (end - start));
632 #endif
633
634 };
635 #endif
636
637 /*
638 * When not being loaded from loader, we need to create our own metadata
639 * so we can interact with the kernel linker.
640 */
641 static void
642 fake_preload_metadata(void) {
643 /* We depend on dword alignment here. */
644 static uint32_t fake_preload[36] __aligned(8);
645 int i = 0;
646
647 fake_preload[i++] = MODINFO_NAME;
648 fake_preload[i++] = strlen("kernel") + 1;
649 strcpy((char*)&fake_preload[i], "kernel");
650 /* ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
651 i += 2;
652
653 fake_preload[i++] = MODINFO_TYPE;
654 fake_preload[i++] = strlen("elf kernel") + 1;
655 strcpy((char*)&fake_preload[i], "elf kernel");
656 /* ['e' 'l' 'f' ' '] ['k' 'e' 'r' 'n'] ['e' 'l' '\0' ..] */
657 i += 3;
658
659 #ifdef __powerpc64__
660 /* Padding -- Fields start on u_long boundaries */
661 fake_preload[i++] = 0;
662 #endif
663
664 fake_preload[i++] = MODINFO_ADDR;
665 fake_preload[i++] = sizeof(vm_offset_t);
666 *(vm_offset_t *)&fake_preload[i] =
667 (vm_offset_t)(__startkernel);
668 i += (sizeof(vm_offset_t) / 4);
669
670 fake_preload[i++] = MODINFO_SIZE;
671 fake_preload[i++] = sizeof(vm_offset_t);
672 *(vm_offset_t *)&fake_preload[i] =
673 (vm_offset_t)(__endkernel) - (vm_offset_t)(__startkernel);
674 i += (sizeof(vm_offset_t) / 4);
675
676 /*
677 * MODINFOMD_SSYM and MODINFOMD_ESYM cannot be provided here,
678 * as the memory comes from outside the loaded ELF sections.
679 *
680 * If the symbols are being provided by other means (MFS), the
681 * tables will be loaded into the debugger directly.
682 */
683
684 /* Null field at end to mark end of data. */
685 fake_preload[i++] = 0;
686 fake_preload[i] = 0;
687 preload_metadata = (void*)fake_preload;
688 }
689
690 /*
691 * Flush the D-cache for non-DMA I/O so that the I-cache can
692 * be made coherent later.
693 */
694 void
695 cpu_flush_dcache(void *ptr, size_t len)
696 {
697 register_t addr, off;
698
699 /*
700 * Align the address to a cacheline and adjust the length
701 * accordingly. Then round the length to a multiple of the
702 * cacheline for easy looping.
703 */
704 addr = (uintptr_t)ptr;
705 off = addr & (cacheline_size - 1);
706 addr -= off;
707 len = roundup2(len + off, cacheline_size);
708
709 while (len > 0) {
710 __asm __volatile ("dcbf 0,%0" :: "r"(addr));
711 __asm __volatile ("sync");
712 addr += cacheline_size;
713 len -= cacheline_size;
714 }
715 }
716
717 int
718 ptrace_set_pc(struct thread *td, unsigned long addr)
719 {
720 struct trapframe *tf;
721
722 tf = td->td_frame;
723 tf->srr0 = (register_t)addr;
724
725 return (0);
726 }
727
728 void
729 spinlock_enter(void)
730 {
731 struct thread *td;
732 register_t msr;
733
734 td = curthread;
735 if (td->td_md.md_spinlock_count == 0) {
736 nop_prio_mhigh();
737 msr = intr_disable();
738 td->td_md.md_spinlock_count = 1;
739 td->td_md.md_saved_msr = msr;
740 critical_enter();
741 } else
742 td->td_md.md_spinlock_count++;
743 }
744
745 void
746 spinlock_exit(void)
747 {
748 struct thread *td;
749 register_t msr;
750
751 td = curthread;
752 msr = td->td_md.md_saved_msr;
753 td->td_md.md_spinlock_count--;
754 if (td->td_md.md_spinlock_count == 0) {
755 critical_exit();
756 intr_restore(msr);
757 nop_prio_medium();
758 }
759 }
760
761 /*
762 * Simple ddb(4) command/hack to view any SPR on the running CPU.
763 * Uses a trivial asm function to perform the mfspr, and rewrites the mfspr
764 * instruction each time.
765 * XXX: Since it uses code modification, it won't work if the kernel code pages
766 * are marked RO.
767 */
768 extern register_t get_spr(int);
769
770 #ifdef DDB
771 DB_SHOW_COMMAND(spr, db_show_spr)
772 {
773 register_t spr;
774 volatile uint32_t *p;
775 int sprno, saved_sprno;
776
777 if (!have_addr)
778 return;
779
780 saved_sprno = sprno = (intptr_t) addr;
781 sprno = ((sprno & 0x3e0) >> 5) | ((sprno & 0x1f) << 5);
782 p = (uint32_t *)(void *)&get_spr;
783 #ifdef __powerpc64__
784 #if defined(_CALL_ELF) && _CALL_ELF == 2
785 /* Account for ELFv2 function prologue. */
786 p += 2;
787 #else
788 p = *(volatile uint32_t * volatile *)p;
789 #endif
790 #endif
791 *p = (*p & ~0x001ff800) | (sprno << 11);
792 __syncicache(__DEVOLATILE(uint32_t *, p), cacheline_size);
793 spr = get_spr(sprno);
794
795 db_printf("SPR %d(%x): %lx\n", saved_sprno, saved_sprno,
796 (unsigned long)spr);
797 }
798
799 DB_SHOW_COMMAND(frame, db_show_frame)
800 {
801 struct trapframe *tf;
802 long reg;
803 int i;
804
805 tf = have_addr ? (struct trapframe *)addr : curthread->td_frame;
806
807 /*
808 * Everything casts through long to simplify the printing.
809 * 'long' is native register size anyway.
810 */
811 db_printf("trap frame %p\n", tf);
812 for (i = 0; i < nitems(tf->fixreg); i++) {
813 reg = tf->fixreg[i];
814 db_printf(" r%d:\t%#lx (%ld)\n", i, reg, reg);
815 }
816 reg = tf->lr;
817 db_printf(" lr:\t%#lx\n", reg);
818 reg = tf->cr;
819 db_printf(" cr:\t%#lx\n", reg);
820 reg = tf->xer;
821 db_printf(" xer:\t%#lx\n", reg);
822 reg = tf->ctr;
823 db_printf(" ctr:\t%#lx (%ld)\n", reg, reg);
824 reg = tf->srr0;
825 db_printf(" srr0:\t%#lx\n", reg);
826 reg = tf->srr1;
827 db_printf(" srr1:\t%#lx\n", reg);
828 reg = tf->exc;
829 db_printf(" exc:\t%#lx\n", reg);
830 reg = tf->dar;
831 db_printf(" dar:\t%#lx\n", reg);
832 #ifdef AIM
833 reg = tf->cpu.aim.dsisr;
834 db_printf(" dsisr:\t%#lx\n", reg);
835 #else
836 reg = tf->cpu.booke.esr;
837 db_printf(" esr:\t%#lx\n", reg);
838 reg = tf->cpu.booke.dbcr0;
839 db_printf(" dbcr0:\t%#lx\n", reg);
840 #endif
841 }
842 #endif
843
844 #undef bzero
845 void
846 bzero(void *buf, size_t len)
847 {
848 caddr_t p;
849
850 p = buf;
851
852 while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) {
853 *p++ = 0;
854 len--;
855 }
856
857 while (len >= sizeof(u_long) * 8) {
858 *(u_long*) p = 0;
859 *((u_long*) p + 1) = 0;
860 *((u_long*) p + 2) = 0;
861 *((u_long*) p + 3) = 0;
862 len -= sizeof(u_long) * 8;
863 *((u_long*) p + 4) = 0;
864 *((u_long*) p + 5) = 0;
865 *((u_long*) p + 6) = 0;
866 *((u_long*) p + 7) = 0;
867 p += sizeof(u_long) * 8;
868 }
869
870 while (len >= sizeof(u_long)) {
871 *(u_long*) p = 0;
872 len -= sizeof(u_long);
873 p += sizeof(u_long);
874 }
875
876 while (len) {
877 *p++ = 0;
878 len--;
879 }
880 }
881
882 /* __stack_chk_fail_local() is called in secure-plt (32-bit). */
883 #if !defined(__powerpc64__)
884 extern void __stack_chk_fail(void);
885 void __stack_chk_fail_local(void);
886
887 void
888 __stack_chk_fail_local(void)
889 {
890
891 __stack_chk_fail();
892 }
893 #endif
Cache object: a3d26e0cf27ac1a696622c19bf7866c9
|