1 /*-
2 * Copyright (c) 1994-1998 Mark Brinicombe.
3 * Copyright (c) 1994 Brini.
4 * All rights reserved.
5 *
6 * This code is derived from software written for Brini by Mark Brinicombe
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Brini.
19 * 4. The name of the company nor the name of the author may be used to
20 * endorse or promote products derived from this software without specific
21 * prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
27 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45
36 */
37
38 #include "opt_ddb.h"
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: releng/8.3/sys/arm/mv/mv_machdep.c 219662 2011-03-15 08:20:59Z pluknet $");
42
43 #define _ARM32_BUS_DMA_PRIVATE
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/signalvar.h>
48 #include <sys/imgact.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/linker.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mutex.h>
55 #include <sys/pcpu.h>
56 #include <sys/proc.h>
57 #include <sys/ptrace.h>
58 #include <sys/cons.h>
59 #include <sys/bio.h>
60 #include <sys/bus.h>
61 #include <sys/buf.h>
62 #include <sys/exec.h>
63 #include <sys/kdb.h>
64 #include <sys/msgbuf.h>
65 #include <machine/reg.h>
66 #include <machine/cpu.h>
67
68 #include <vm/vm.h>
69 #include <vm/pmap.h>
70 #include <vm/vm_object.h>
71 #include <vm/vm_page.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_map.h>
74 #include <vm/vnode_pager.h>
75 #include <machine/pte.h>
76 #include <machine/pmap.h>
77 #include <machine/vmparam.h>
78 #include <machine/pcb.h>
79 #include <machine/undefined.h>
80 #include <machine/machdep.h>
81 #include <machine/metadata.h>
82 #include <machine/armreg.h>
83 #include <machine/bus.h>
84 #include <sys/reboot.h>
85 #include <machine/bootinfo.h>
86
87 #include <arm/mv/mvvar.h> /* XXX eventually this should be eliminated */
88 #include <arm/mv/mvwin.h>
89
90 #ifdef DEBUG
91 #define debugf(fmt, args...) printf(fmt, ##args)
92 #else
93 #define debugf(fmt, args...)
94 #endif
95
96 /*
97 * This is the number of L2 page tables required for covering max
98 * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
99 * stacks etc.), uprounded to be divisible by 4.
100 */
101 #define KERNEL_PT_MAX 78
102
103 /* Define various stack sizes in pages */
104 #define IRQ_STACK_SIZE 1
105 #define ABT_STACK_SIZE 1
106 #define UND_STACK_SIZE 1
107
108 /* Maximum number of memory regions */
109 #define MEM_REGIONS 8
110
111 extern unsigned char kernbase[];
112 extern unsigned char _etext[];
113 extern unsigned char _edata[];
114 extern unsigned char __bss_start[];
115 extern unsigned char _end[];
116
117 extern u_int data_abort_handler_address;
118 extern u_int prefetch_abort_handler_address;
119 extern u_int undefined_handler_address;
120
121 extern const struct pmap_devmap *pmap_devmap_bootstrap_table;
122 extern vm_offset_t pmap_bootstrap_lastaddr;
123
124 struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
125
126 extern int *end;
127
128 struct pcpu __pcpu;
129 struct pcpu *pcpup = &__pcpu;
130
131 /* Physical and virtual addresses for some global pages */
132
133 vm_paddr_t phys_avail[10];
134 vm_paddr_t dump_avail[4];
135 vm_offset_t physical_pages;
136 vm_offset_t pmap_bootstrap_lastaddr;
137
138 const struct pmap_devmap *pmap_devmap_bootstrap_table;
139 struct pv_addr systempage;
140 struct pv_addr msgbufpv;
141 struct pv_addr irqstack;
142 struct pv_addr undstack;
143 struct pv_addr abtstack;
144 struct pv_addr kernelstack;
145
146 static struct trapframe proc0_tf;
147
148 struct mem_region {
149 vm_offset_t mr_start;
150 vm_size_t mr_size;
151 };
152
153 static struct mem_region availmem_regions[MEM_REGIONS];
154 static int availmem_regions_sz;
155
156 struct bootinfo *bootinfo;
157
158 static void print_kenv(void);
159 static void print_kernel_section_addr(void);
160 static void print_bootinfo(void);
161
162 static void physmap_init(int);
163
164 static char *
165 kenv_next(char *cp)
166 {
167
168 if (cp != NULL) {
169 while (*cp != 0)
170 cp++;
171 cp++;
172 if (*cp == 0)
173 cp = NULL;
174 }
175 return (cp);
176 }
177
178 static void
179 print_kenv(void)
180 {
181 int len;
182 char *cp;
183
184 debugf("loader passed (static) kenv:\n");
185 if (kern_envp == NULL) {
186 debugf(" no env, null ptr\n");
187 return;
188 }
189 debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp);
190
191 len = 0;
192 for (cp = kern_envp; cp != NULL; cp = kenv_next(cp))
193 debugf(" %x %s\n", (uint32_t)cp, cp);
194 }
195
196 static void
197 print_bootinfo(void)
198 {
199 struct bi_mem_region *mr;
200 struct bi_eth_addr *eth;
201 int i, j;
202
203 debugf("bootinfo:\n");
204 if (bootinfo == NULL) {
205 debugf(" no bootinfo, null ptr\n");
206 return;
207 }
208
209 debugf(" version = 0x%08x\n", bootinfo->bi_version);
210 debugf(" ccsrbar = 0x%08x\n", bootinfo->bi_bar_base);
211 debugf(" cpu_clk = 0x%08x\n", bootinfo->bi_cpu_clk);
212 debugf(" bus_clk = 0x%08x\n", bootinfo->bi_bus_clk);
213
214 debugf(" mem regions:\n");
215 mr = (struct bi_mem_region *)bootinfo->bi_data;
216 for (i = 0; i < bootinfo->bi_mem_reg_no; i++, mr++)
217 debugf(" #%d, base = 0x%08x, size = 0x%08x\n", i,
218 mr->mem_base, mr->mem_size);
219
220 debugf(" eth addresses:\n");
221 eth = (struct bi_eth_addr *)mr;
222 for (i = 0; i < bootinfo->bi_eth_addr_no; i++, eth++) {
223 debugf(" #%d, addr = ", i);
224 for (j = 0; j < 6; j++)
225 debugf("%02x ", eth->mac_addr[j]);
226 debugf("\n");
227 }
228 }
229
230 static void
231 print_kernel_section_addr(void)
232 {
233
234 debugf("kernel image addresses:\n");
235 debugf(" kernbase = 0x%08x\n", (uint32_t)kernbase);
236 debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext);
237 debugf(" _edata = 0x%08x\n", (uint32_t)_edata);
238 debugf(" __bss_start = 0x%08x\n", (uint32_t)__bss_start);
239 debugf(" _end = 0x%08x\n", (uint32_t)_end);
240 }
241
242 struct bi_mem_region *
243 bootinfo_mr(void)
244 {
245
246 return ((struct bi_mem_region *)bootinfo->bi_data);
247 }
248
249 static void
250 physmap_init(int hardcoded)
251 {
252 int i, j, cnt;
253 vm_offset_t phys_kernelend, kernload;
254 uint32_t s, e, sz;
255 struct mem_region *mp, *mp1;
256
257 phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR);
258 kernload = KERNPHYSADDR;
259
260 /*
261 * Use hardcoded physical addresses if we don't use memory regions
262 * from metadata.
263 */
264 if (hardcoded) {
265 phys_avail[0] = 0;
266 phys_avail[1] = kernload;
267
268 phys_avail[2] = phys_kernelend;
269 phys_avail[3] = PHYSMEM_SIZE;
270
271 phys_avail[4] = 0;
272 phys_avail[5] = 0;
273 return;
274 }
275
276 /*
277 * Remove kernel physical address range from avail
278 * regions list. Page align all regions.
279 * Non-page aligned memory isn't very interesting to us.
280 * Also, sort the entries for ascending addresses.
281 */
282 sz = 0;
283 cnt = availmem_regions_sz;
284 debugf("processing avail regions:\n");
285 for (mp = availmem_regions; mp->mr_size; mp++) {
286 s = mp->mr_start;
287 e = mp->mr_start + mp->mr_size;
288 debugf(" %08x-%08x -> ", s, e);
289 /* Check whether this region holds all of the kernel. */
290 if (s < kernload && e > phys_kernelend) {
291 availmem_regions[cnt].mr_start = phys_kernelend;
292 availmem_regions[cnt++].mr_size = e - phys_kernelend;
293 e = kernload;
294 }
295 /* Look whether this regions starts within the kernel. */
296 if (s >= kernload && s < phys_kernelend) {
297 if (e <= phys_kernelend)
298 goto empty;
299 s = phys_kernelend;
300 }
301 /* Now look whether this region ends within the kernel. */
302 if (e > kernload && e <= phys_kernelend) {
303 if (s >= kernload) {
304 goto empty;
305 }
306 e = kernload;
307 }
308 /* Now page align the start and size of the region. */
309 s = round_page(s);
310 e = trunc_page(e);
311 if (e < s)
312 e = s;
313 sz = e - s;
314 debugf("%08x-%08x = %x\n", s, e, sz);
315
316 /* Check whether some memory is left here. */
317 if (sz == 0) {
318 empty:
319 printf("skipping\n");
320 bcopy(mp + 1, mp,
321 (cnt - (mp - availmem_regions)) * sizeof(*mp));
322 cnt--;
323 mp--;
324 continue;
325 }
326
327 /* Do an insertion sort. */
328 for (mp1 = availmem_regions; mp1 < mp; mp1++)
329 if (s < mp1->mr_start)
330 break;
331 if (mp1 < mp) {
332 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
333 mp1->mr_start = s;
334 mp1->mr_size = sz;
335 } else {
336 mp->mr_start = s;
337 mp->mr_size = sz;
338 }
339 }
340 availmem_regions_sz = cnt;
341
342 /* Fill in phys_avail table, based on availmem_regions */
343 debugf("fill in phys_avail:\n");
344 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
345
346 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
347 availmem_regions[i].mr_start,
348 availmem_regions[i].mr_start + availmem_regions[i].mr_size,
349 availmem_regions[i].mr_size);
350
351 phys_avail[j] = availmem_regions[i].mr_start;
352 phys_avail[j + 1] = availmem_regions[i].mr_start +
353 availmem_regions[i].mr_size;
354 }
355 phys_avail[j] = 0;
356 phys_avail[j + 1] = 0;
357 }
358
359 void *
360 initarm(void *mdp, void *unused __unused)
361 {
362 struct pv_addr kernel_l1pt;
363 struct pv_addr dpcpu;
364 vm_offset_t freemempos, l2_start, lastaddr;
365 uint32_t memsize, l2size;
366 struct bi_mem_region *mr;
367 void *kmdp;
368 u_int l1pagetable;
369 int i = 0, j = 0;
370
371 kmdp = NULL;
372 lastaddr = 0;
373 memsize = 0;
374
375 set_cpufuncs();
376
377 /*
378 * Mask metadata pointer: it is supposed to be on page boundary. If
379 * the first argument (mdp) doesn't point to a valid address the
380 * bootloader must have passed us something else than the metadata
381 * ptr... In this case we want to fall back to some built-in settings.
382 */
383 mdp = (void *)((uint32_t)mdp & ~PAGE_MASK);
384
385 /* Parse metadata and fetch parameters */
386 if (mdp != NULL) {
387 preload_metadata = mdp;
388 kmdp = preload_search_by_type("elf kernel");
389 if (kmdp != NULL) {
390 bootinfo = (struct bootinfo *)preload_search_info(kmdp,
391 MODINFO_METADATA|MODINFOMD_BOOTINFO);
392
393 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
394 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
395 lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
396 }
397
398 /* Initialize memory regions table */
399 mr = bootinfo_mr();
400 for (i = 0; i < bootinfo->bi_mem_reg_no; i++, mr++) {
401 if (i == MEM_REGIONS)
402 break;
403 availmem_regions[i].mr_start = mr->mem_base;
404 availmem_regions[i].mr_size = mr->mem_size;
405 memsize += mr->mem_size;
406 }
407 availmem_regions_sz = i;
408 } else {
409 /* Fall back to hardcoded metadata. */
410 lastaddr = fake_preload_metadata();
411
412 /*
413 * Assume a single memory region of size specified in board
414 * configuration file.
415 */
416 memsize = PHYSMEM_SIZE;
417 }
418
419 /*
420 * If memsize is invalid, we can neither proceed nor panic (too
421 * early for console output).
422 */
423 if (memsize == 0)
424 while (1);
425
426 /* Platform-specific initialisation */
427 pmap_bootstrap_lastaddr = MV_BASE - ARM_NOCACHE_KVA_SIZE;
428 pmap_devmap_bootstrap_table = &pmap_devmap[0];
429
430 pcpu_init(pcpup, 0, sizeof(struct pcpu));
431 PCPU_SET(curthread, &thread0);
432
433 /* Calculate number of L2 tables needed for mapping vm_page_array */
434 l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
435 l2size = (l2size >> L1_S_SHIFT) + 1;
436
437 /*
438 * Add one table for end of kernel map, one for stacks, msgbuf and
439 * L1 and L2 tables map and one for vectors map.
440 */
441 l2size += 3;
442
443 /* Make it divisible by 4 */
444 l2size = (l2size + 3) & ~3;
445
446 #define KERNEL_TEXT_BASE (KERNBASE)
447 freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
448
449 /* Define a macro to simplify memory allocation */
450 #define valloc_pages(var, np) \
451 alloc_pages((var).pv_va, (np)); \
452 (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
453
454 #define alloc_pages(var, np) \
455 (var) = freemempos; \
456 freemempos += (np * PAGE_SIZE); \
457 memset((char *)(var), 0, ((np) * PAGE_SIZE));
458
459 while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
460 freemempos += PAGE_SIZE;
461 valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
462
463 for (i = 0; i < l2size; ++i) {
464 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
465 valloc_pages(kernel_pt_table[i],
466 L2_TABLE_SIZE / PAGE_SIZE);
467 j = i;
468 } else {
469 kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
470 L2_TABLE_SIZE_REAL * (i - j);
471 kernel_pt_table[i].pv_pa =
472 kernel_pt_table[i].pv_va - KERNVIRTADDR +
473 KERNPHYSADDR;
474
475 }
476 }
477 /*
478 * Allocate a page for the system page mapped to 0x00000000
479 * or 0xffff0000. This page will just contain the system vectors
480 * and can be shared by all processes.
481 */
482 valloc_pages(systempage, 1);
483
484 /* Allocate dynamic per-cpu area. */
485 valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
486 dpcpu_init((void *)dpcpu.pv_va, 0);
487
488 /* Allocate stacks for all modes */
489 valloc_pages(irqstack, IRQ_STACK_SIZE);
490 valloc_pages(abtstack, ABT_STACK_SIZE);
491 valloc_pages(undstack, UND_STACK_SIZE);
492 valloc_pages(kernelstack, KSTACK_PAGES);
493
494 init_param1();
495
496 valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
497
498 /*
499 * Now we start construction of the L1 page table
500 * We start by mapping the L2 page tables into the L1.
501 * This means that we can replace L1 mappings later on if necessary
502 */
503 l1pagetable = kernel_l1pt.pv_va;
504
505 /*
506 * Try to map as much as possible of kernel text and data using
507 * 1MB section mapping and for the rest of initial kernel address
508 * space use L2 coarse tables.
509 *
510 * Link L2 tables for mapping remainder of kernel (modulo 1MB)
511 * and kernel structures
512 */
513 l2_start = lastaddr & ~(L1_S_OFFSET);
514 for (i = 0 ; i < l2size - 1; i++)
515 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
516 &kernel_pt_table[i]);
517
518 pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
519
520 /* Map kernel code and data */
521 pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR,
522 (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
523 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
524
525
526 /* Map L1 directory and allocated L2 page tables */
527 pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
528 L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
529
530 pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
531 kernel_pt_table[0].pv_pa,
532 L2_TABLE_SIZE_REAL * l2size,
533 VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
534
535 /* Map allocated DPCPU, stacks and msgbuf */
536 pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
537 freemempos - dpcpu.pv_va,
538 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
539
540 /* Link and map the vector page */
541 pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
542 &kernel_pt_table[l2size - 1]);
543 pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
544 VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
545
546 pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table);
547 cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
548 DOMAIN_CLIENT);
549 setttb(kernel_l1pt.pv_pa);
550 cpu_tlb_flushID();
551 cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
552 cninit();
553 physmem = memsize / PAGE_SIZE;
554
555 debugf("initarm: console initialized\n");
556 debugf(" arg1 mdp = 0x%08x\n", (uint32_t)mdp);
557 debugf(" boothowto = 0x%08x\n", boothowto);
558 print_bootinfo();
559 print_kernel_section_addr();
560 print_kenv();
561
562 /*
563 * Re-initialise MPP
564 */
565 platform_mpp_init();
566
567 /*
568 * Re-initialise decode windows
569 */
570 if (soc_decode_win() != 0)
571 printf("WARNING: could not re-initialise decode windows! "
572 "Running with existing settings...\n");
573 /*
574 * Pages were allocated during the secondary bootstrap for the
575 * stacks for different CPU modes.
576 * We must now set the r13 registers in the different CPU modes to
577 * point to these stacks.
578 * Since the ARM stacks use STMFD etc. we must set r13 to the top end
579 * of the stack memory.
580 */
581 cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
582 set_stackptr(PSR_IRQ32_MODE,
583 irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
584 set_stackptr(PSR_ABT32_MODE,
585 abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
586 set_stackptr(PSR_UND32_MODE,
587 undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
588
589 /*
590 * We must now clean the cache again....
591 * Cleaning may be done by reading new data to displace any
592 * dirty data in the cache. This will have happened in setttb()
593 * but since we are boot strapping the addresses used for the read
594 * may have just been remapped and thus the cache could be out
595 * of sync. A re-clean after the switch will cure this.
596 * After booting there are no gross relocations of the kernel thus
597 * this problem will not occur after initarm().
598 */
599 cpu_idcache_wbinv_all();
600
601 /* Set stack for exception handlers */
602 data_abort_handler_address = (u_int)data_abort_handler;
603 prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
604 undefined_handler_address = (u_int)undefinedinstruction_bounce;
605 undefined_init();
606
607 proc_linkup0(&proc0, &thread0);
608 thread0.td_kstack = kernelstack.pv_va;
609 thread0.td_kstack_pages = KSTACK_PAGES;
610 thread0.td_pcb = (struct pcb *)
611 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
612 thread0.td_pcb->pcb_flags = 0;
613 thread0.td_frame = &proc0_tf;
614 pcpup->pc_curpcb = thread0.td_pcb;
615
616 arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
617
618 dump_avail[0] = 0;
619 dump_avail[1] = memsize;
620 dump_avail[2] = 0;
621 dump_avail[3] = 0;
622
623 pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt);
624 msgbufp = (void *)msgbufpv.pv_va;
625 msgbufinit(msgbufp, msgbufsize);
626 mutex_init();
627
628 /*
629 * Prepare map of physical memory regions available to vm subsystem.
630 * If metadata pointer doesn't point to a valid address, use hardcoded
631 * values.
632 */
633 physmap_init((mdp != NULL) ? 0 : 1);
634
635 /* Do basic tuning, hz etc */
636 init_param2(physmem);
637 kdb_init();
638 return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
639 sizeof(struct pcb)));
640 }
641
642 struct arm32_dma_range *
643 bus_dma_get_range(void)
644 {
645
646 return (NULL);
647 }
648
649 int
650 bus_dma_get_range_nb(void)
651 {
652
653 return (0);
654 }
Cache object: 4d494203f00512bc01c4d17fd97635e1
|