1 /*-
2 * Copyright (c) 2012-2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: releng/11.2/sys/arm64/arm64/locore.S 331972 2018-04-04 02:22:56Z mmel $
27 */
28
29 #include "assym.s"
30 #include "opt_kstack_pages.h"
31 #include <sys/syscall.h>
32 #include <machine/asm.h>
33 #include <machine/armreg.h>
34 #include <machine/hypervisor.h>
35 #include <machine/param.h>
36 #include <machine/pte.h>
37 #include <machine/vmparam.h>
38
39 #define VIRT_BITS 48
40 #define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
41
42 .globl kernbase
43 .set kernbase, KERNBASE
44
45 #define DEVICE_MEM 0
46 #define NORMAL_UNCACHED 1
47 #define NORMAL_MEM 2
48
49 /*
50 * We assume:
51 * MMU on with an identity map, or off
52 * D-Cache: off
53 * I-Cache: on or off
54 * We are loaded at a 2MiB aligned address
55 */
56
57 .text
58 .globl _start
59 _start:
60 /* Drop to EL1 */
61 bl drop_to_el1
62
63 /*
64 * Disable the MMU. We may have entered the kernel with it on and
65 * will need to update the tables later. If this has been set up
66 * with anything other than a VA == PA map then this will fail,
67 * but in this case the code to find where we are running from
68 * would have also failed.
69 */
70 dsb sy
71 mrs x2, sctlr_el1
72 bic x2, x2, SCTLR_M
73 msr sctlr_el1, x2
74 isb
75
76 /* Set the context id */
77 msr contextidr_el1, xzr
78
79 /* Get the virt -> phys offset */
80 bl get_virt_delta
81
82 /*
83 * At this point:
84 * x29 = PA - VA
85 * x28 = Our physical load address
86 */
87
88 /* Create the page tables */
89 bl create_pagetables
90
91 /*
92 * At this point:
93 * x27 = TTBR0 table
94 * x26 = Kernel L1 table
95 * x24 = TTBR1 table
96 */
97
98 /* Enable the mmu */
99 bl start_mmu
100
101 /* Jump to the virtual address space */
102 ldr x15, .Lvirtdone
103 br x15
104
105 virtdone:
106 /* Set up the stack */
107 adr x25, initstack_end
108 mov sp, x25
109 sub sp, sp, #PCB_SIZE
110
111 /* Zero the BSS */
112 ldr x15, .Lbss
113 ldr x14, .Lend
114 1:
115 str xzr, [x15], #8
116 cmp x15, x14
117 b.lo 1b
118
119 /* Backup the module pointer */
120 mov x1, x0
121
122 /* Make the page table base a virtual address */
123 sub x26, x26, x29
124 sub x24, x24, x29
125
126 sub sp, sp, #(64 * 4)
127 mov x0, sp
128
129 /* Degate the delda so it is VA -> PA */
130 neg x29, x29
131
132 str x1, [x0] /* modulep */
133 str x26, [x0, 8] /* kern_l1pt */
134 str x29, [x0, 16] /* kern_delta */
135 str x25, [x0, 24] /* kern_stack */
136 str x24, [x0, 32] /* kern_l0pt */
137
138 /* trace back starts here */
139 mov fp, #0
140 /* Branch to C code */
141 bl initarm
142 bl mi_startup
143
144 /* We should not get here */
145 brk 0
146
147 .align 3
148 .Lvirtdone:
149 .quad virtdone
150 .Lbss:
151 .quad __bss_start
152 .Lend:
153 .quad _end
154
155 #ifdef SMP
156 /*
157 * mpentry(unsigned long)
158 *
159 * Called by a core when it is being brought online.
160 * The data in x0 is passed straight to init_secondary.
161 */
162 ENTRY(mpentry)
163 /* Disable interrupts */
164 msr daifset, #2
165
166 /* Drop to EL1 */
167 bl drop_to_el1
168
169 /* Set the context id */
170 msr contextidr_el1, xzr
171
172 /* Load the kernel page table */
173 adr x24, pagetable_l0_ttbr1
174 /* Load the identity page table */
175 adr x27, pagetable_l0_ttbr0
176
177 /* Enable the mmu */
178 bl start_mmu
179
180 /* Jump to the virtual address space */
181 ldr x15, =mp_virtdone
182 br x15
183
184 mp_virtdone:
185 ldr x4, =secondary_stacks
186 mov x5, #(PAGE_SIZE * KSTACK_PAGES)
187 mul x5, x0, x5
188 add sp, x4, x5
189
190 b init_secondary
191 END(mpentry)
192 #endif
193
194 /*
195 * If we are started in EL2, configure the required hypervisor
196 * registers and drop to EL1.
197 */
198 drop_to_el1:
199 mrs x1, CurrentEL
200 lsr x1, x1, #2
201 cmp x1, #0x2
202 b.eq 1f
203 ret
204 1:
205 /* Configure the Hypervisor */
206 mov x2, #(HCR_RW)
207 msr hcr_el2, x2
208
209 /* Load the Virtualization Process ID Register */
210 mrs x2, midr_el1
211 msr vpidr_el2, x2
212
213 /* Load the Virtualization Multiprocess ID Register */
214 mrs x2, mpidr_el1
215 msr vmpidr_el2, x2
216
217 /* Set the bits that need to be 1 in sctlr_el1 */
218 ldr x2, .Lsctlr_res1
219 msr sctlr_el1, x2
220
221 /* Don't trap to EL2 for exceptions */
222 mov x2, #CPTR_RES1
223 msr cptr_el2, x2
224
225 /* Don't trap to EL2 for CP15 traps */
226 msr hstr_el2, xzr
227
228 /* Enable access to the physical timers at EL1 */
229 mrs x2, cnthctl_el2
230 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
231 msr cnthctl_el2, x2
232
233 /* Set the counter offset to a known value */
234 msr cntvoff_el2, xzr
235
236 /* Hypervisor trap functions */
237 adr x2, hyp_vectors
238 msr vbar_el2, x2
239
240 mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
241 msr spsr_el2, x2
242
243 /* Configure GICv3 CPU interface */
244 mrs x2, id_aa64pfr0_el1
245 /* Extract GIC bits from the register */
246 ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
247 /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
248 cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
249 b.ne 2f
250
251 mrs x2, icc_sre_el2
252 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
253 orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
254 msr icc_sre_el2, x2
255 2:
256
257 /* Set the address to return to our return address */
258 msr elr_el2, x30
259 isb
260
261 eret
262
263 .align 3
264 .Lsctlr_res1:
265 .quad SCTLR_RES1
266
267 #define VECT_EMPTY \
268 .align 7; \
269 1: b 1b
270
271 .align 11
272 hyp_vectors:
273 VECT_EMPTY /* Synchronous EL2t */
274 VECT_EMPTY /* IRQ EL2t */
275 VECT_EMPTY /* FIQ EL2t */
276 VECT_EMPTY /* Error EL2t */
277
278 VECT_EMPTY /* Synchronous EL2h */
279 VECT_EMPTY /* IRQ EL2h */
280 VECT_EMPTY /* FIQ EL2h */
281 VECT_EMPTY /* Error EL2h */
282
283 VECT_EMPTY /* Synchronous 64-bit EL1 */
284 VECT_EMPTY /* IRQ 64-bit EL1 */
285 VECT_EMPTY /* FIQ 64-bit EL1 */
286 VECT_EMPTY /* Error 64-bit EL1 */
287
288 VECT_EMPTY /* Synchronous 32-bit EL1 */
289 VECT_EMPTY /* IRQ 32-bit EL1 */
290 VECT_EMPTY /* FIQ 32-bit EL1 */
291 VECT_EMPTY /* Error 32-bit EL1 */
292
293 /*
294 * Get the delta between the physical address we were loaded to and the
295 * virtual address we expect to run from. This is used when building the
296 * initial page table.
297 */
298 get_virt_delta:
299 /* Load the physical address of virt_map */
300 adr x29, virt_map
301 /* Load the virtual address of virt_map stored in virt_map */
302 ldr x28, [x29]
303 /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
304 sub x29, x29, x28
305 /* Find the load address for the kernel */
306 mov x28, #(KERNBASE)
307 add x28, x28, x29
308 ret
309
310 .align 3
311 virt_map:
312 .quad virt_map
313
314 /*
315 * This builds the page tables containing the identity map, and the kernel
316 * virtual map.
317 *
318 * It relys on:
319 * We were loaded to an address that is on a 2MiB boundary
320 * All the memory must not cross a 1GiB boundaty
321 * x28 contains the physical address we were loaded from
322 *
323 * TODO: This is out of date.
324 * There are at least 5 pages before that address for the page tables
325 * The pages used are:
326 * - The Kernel L2 table
327 * - The Kernel L1 table
328 * - The Kernel L0 table (TTBR1)
329 * - The identity (PA = VA) L1 table
330 * - The identity (PA = VA) L0 table (TTBR0)
331 * - The DMAP L1 tables
332 */
333 create_pagetables:
334 /* Save the Link register */
335 mov x5, x30
336
337 /* Clean the page table */
338 adr x6, pagetable
339 mov x26, x6
340 adr x27, pagetable_end
341 1:
342 stp xzr, xzr, [x6], #16
343 stp xzr, xzr, [x6], #16
344 stp xzr, xzr, [x6], #16
345 stp xzr, xzr, [x6], #16
346 cmp x6, x27
347 b.lo 1b
348
349 /*
350 * Build the TTBR1 maps.
351 */
352
353 /* Find the size of the kernel */
354 mov x6, #(KERNBASE)
355 ldr x7, .Lend
356 /* Find the end - begin */
357 sub x8, x7, x6
358 /* Get the number of l2 pages to allocate, rounded down */
359 lsr x10, x8, #(L2_SHIFT)
360 /* Add 8 MiB for any rounding above and the module data */
361 add x10, x10, #4
362
363 /* Create the kernel space L2 table */
364 mov x6, x26
365 mov x7, #NORMAL_MEM
366 mov x8, #(KERNBASE & L2_BLOCK_MASK)
367 mov x9, x28
368 bl build_l2_block_pagetable
369
370 /* Move to the l1 table */
371 add x26, x26, #PAGE_SIZE
372
373 /* Link the l1 -> l2 table */
374 mov x9, x6
375 mov x6, x26
376 bl link_l1_pagetable
377
378 /* Move to the l0 table */
379 add x24, x26, #PAGE_SIZE
380
381 /* Link the l0 -> l1 table */
382 mov x9, x6
383 mov x6, x24
384 mov x10, #1
385 bl link_l0_pagetable
386
387 /* Link the DMAP tables */
388 ldr x8, =DMAP_MIN_ADDRESS
389 adr x9, pagetable_dmap;
390 mov x10, #DMAP_TABLES
391 bl link_l0_pagetable
392
393 /*
394 * Build the TTBR0 maps.
395 */
396 add x27, x24, #PAGE_SIZE
397
398 mov x6, x27 /* The initial page table */
399 #if defined(SOCDEV_PA) && defined(SOCDEV_VA)
400 /* Create a table for the UART */
401 mov x7, #DEVICE_MEM
402 mov x8, #(SOCDEV_VA) /* VA start */
403 mov x9, #(SOCDEV_PA) /* PA start */
404 mov x10, #1
405 bl build_l1_block_pagetable
406 #endif
407
408 /* Create the VA = PA map */
409 mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */
410 mov x9, x27
411 mov x8, x9 /* VA start (== PA start) */
412 mov x10, #1
413 bl build_l1_block_pagetable
414
415 /* Move to the l0 table */
416 add x27, x27, #PAGE_SIZE
417
418 /* Link the l0 -> l1 table */
419 mov x9, x6
420 mov x6, x27
421 mov x10, #1
422 bl link_l0_pagetable
423
424 /* Restore the Link register */
425 mov x30, x5
426 ret
427
428 /*
429 * Builds an L0 -> L1 table descriptor
430 *
431 * This is a link for a 512GiB block of memory with up to 1GiB regions mapped
432 * within it by build_l1_block_pagetable.
433 *
434 * x6 = L0 table
435 * x8 = Virtual Address
436 * x9 = L1 PA (trashed)
437 * x10 = Entry count
438 * x11, x12 and x13 are trashed
439 */
440 link_l0_pagetable:
441 /*
442 * Link an L0 -> L1 table entry.
443 */
444 /* Find the table index */
445 lsr x11, x8, #L0_SHIFT
446 and x11, x11, #L0_ADDR_MASK
447
448 /* Build the L0 block entry */
449 mov x12, #L0_TABLE
450
451 /* Only use the output address bits */
452 lsr x9, x9, #PAGE_SHIFT
453 1: orr x13, x12, x9, lsl #PAGE_SHIFT
454
455 /* Store the entry */
456 str x13, [x6, x11, lsl #3]
457
458 sub x10, x10, #1
459 add x11, x11, #1
460 add x9, x9, #1
461 cbnz x10, 1b
462
463 ret
464
465 /*
466 * Builds an L1 -> L2 table descriptor
467 *
468 * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
469 * within it by build_l2_block_pagetable.
470 *
471 * x6 = L1 table
472 * x8 = Virtual Address
473 * x9 = L2 PA (trashed)
474 * x11, x12 and x13 are trashed
475 */
476 link_l1_pagetable:
477 /*
478 * Link an L1 -> L2 table entry.
479 */
480 /* Find the table index */
481 lsr x11, x8, #L1_SHIFT
482 and x11, x11, #Ln_ADDR_MASK
483
484 /* Build the L1 block entry */
485 mov x12, #L1_TABLE
486
487 /* Only use the output address bits */
488 lsr x9, x9, #PAGE_SHIFT
489 orr x13, x12, x9, lsl #PAGE_SHIFT
490
491 /* Store the entry */
492 str x13, [x6, x11, lsl #3]
493
494 ret
495
496 /*
497 * Builds count 1 GiB page table entry
498 * x6 = L1 table
499 * x7 = Type (0 = Device, 1 = Normal)
500 * x8 = VA start
501 * x9 = PA start (trashed)
502 * x10 = Entry count
503 * x11, x12 and x13 are trashed
504 */
505 build_l1_block_pagetable:
506 /*
507 * Build the L1 table entry.
508 */
509 /* Find the table index */
510 lsr x11, x8, #L1_SHIFT
511 and x11, x11, #Ln_ADDR_MASK
512
513 /* Build the L1 block entry */
514 lsl x12, x7, #2
515 orr x12, x12, #L1_BLOCK
516 orr x12, x12, #(ATTR_AF)
517 #ifdef SMP
518 orr x12, x12, ATTR_SH(ATTR_SH_IS)
519 #endif
520
521 /* Only use the output address bits */
522 lsr x9, x9, #L1_SHIFT
523
524 /* Set the physical address for this virtual address */
525 1: orr x13, x12, x9, lsl #L1_SHIFT
526
527 /* Store the entry */
528 str x13, [x6, x11, lsl #3]
529
530 sub x10, x10, #1
531 add x11, x11, #1
532 add x9, x9, #1
533 cbnz x10, 1b
534
535 ret
536
537 /*
538 * Builds count 2 MiB page table entry
539 * x6 = L2 table
540 * x7 = Type (0 = Device, 1 = Normal)
541 * x8 = VA start
542 * x9 = PA start (trashed)
543 * x10 = Entry count
544 * x11, x12 and x13 are trashed
545 */
546 build_l2_block_pagetable:
547 /*
548 * Build the L2 table entry.
549 */
550 /* Find the table index */
551 lsr x11, x8, #L2_SHIFT
552 and x11, x11, #Ln_ADDR_MASK
553
554 /* Build the L2 block entry */
555 lsl x12, x7, #2
556 orr x12, x12, #L2_BLOCK
557 orr x12, x12, #(ATTR_AF)
558 #ifdef SMP
559 orr x12, x12, ATTR_SH(ATTR_SH_IS)
560 #endif
561
562 /* Only use the output address bits */
563 lsr x9, x9, #L2_SHIFT
564
565 /* Set the physical address for this virtual address */
566 1: orr x13, x12, x9, lsl #L2_SHIFT
567
568 /* Store the entry */
569 str x13, [x6, x11, lsl #3]
570
571 sub x10, x10, #1
572 add x11, x11, #1
573 add x9, x9, #1
574 cbnz x10, 1b
575
576 ret
577
578 start_mmu:
579 dsb sy
580
581 /* Load the exception vectors */
582 ldr x2, =exception_vectors
583 msr vbar_el1, x2
584
585 /* Load ttbr0 and ttbr1 */
586 msr ttbr0_el1, x27
587 msr ttbr1_el1, x24
588 isb
589
590 /* Clear the Monitor Debug System control register */
591 msr mdscr_el1, xzr
592
593 /* Invalidate the TLB */
594 tlbi vmalle1is
595
596 ldr x2, mair
597 msr mair_el1, x2
598
599 /*
600 * Setup TCR according to PARange bits from ID_AA64MMFR0_EL1.
601 */
602 ldr x2, tcr
603 mrs x3, id_aa64mmfr0_el1
604 bfi x2, x3, #32, #3
605 msr tcr_el1, x2
606
607 /* Setup SCTLR */
608 ldr x2, sctlr_set
609 ldr x3, sctlr_clear
610 mrs x1, sctlr_el1
611 bic x1, x1, x3 /* Clear the required bits */
612 orr x1, x1, x2 /* Set the required bits */
613 msr sctlr_el1, x1
614 isb
615
616 ret
617
618 .align 3
619 mair:
620 .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, 0) | \
621 MAIR_ATTR(MAIR_NORMAL_NC, 1) | \
622 MAIR_ATTR(MAIR_NORMAL_WB, 2) | \
623 MAIR_ATTR(MAIR_NORMAL_WT, 3)
624 tcr:
625 .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K | \
626 TCR_CACHE_ATTRS | TCR_SMP_ATTRS)
627 sctlr_set:
628 /* Bits to set */
629 .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \
630 SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
631 SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | SCTLR_M)
632 sctlr_clear:
633 /* Bits to clear */
634 .quad (SCTLR_EE | SCTLR_EOE | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \
635 SCTLR_ITD | SCTLR_THEE | SCTLR_CP15BEN | SCTLR_A)
636
637 .globl abort
638 abort:
639 b abort
640
641 //.section .init_pagetable
642 .align 12 /* 4KiB aligned */
643 /*
644 * 3 initial tables (in the following order):
645 * L2 for kernel (High addresses)
646 * L1 for kernel
647 * L1 for user (Low addresses)
648 */
649 pagetable:
650 .space PAGE_SIZE
651 pagetable_l1_ttbr1:
652 .space PAGE_SIZE
653 pagetable_l0_ttbr1:
654 .space PAGE_SIZE
655 pagetable_l1_ttbr0:
656 .space PAGE_SIZE
657 pagetable_l0_ttbr0:
658 .space PAGE_SIZE
659
660 .globl pagetable_dmap
661 pagetable_dmap:
662 .space PAGE_SIZE * DMAP_TABLES
663 pagetable_end:
664
665 el2_pagetable:
666 .space PAGE_SIZE
667
668 .globl init_pt_va
669 init_pt_va:
670 .quad pagetable /* XXX: Keep page tables VA */
671
672 .align 4
673 initstack:
674 .space (PAGE_SIZE * KSTACK_PAGES)
675 initstack_end:
676
677
678 ENTRY(sigcode)
679 mov x0, sp
680 add x0, x0, #SF_UC
681
682 1:
683 mov x8, #SYS_sigreturn
684 svc 0
685
686 /* sigreturn failed, exit */
687 mov x8, #SYS_exit
688 svc 0
689
690 b 1b
691 END(sigcode)
692 /* This may be copied to the stack, keep it 16-byte aligned */
693 .align 3
694 esigcode:
695
696 .data
697 .align 3
698 .global szsigcode
699 szsigcode:
700 .quad esigcode - sigcode
Cache object: 6763c50f9c27d514fb72c3de5243badd
|