1 /* $Id: head.S,v 1.86 2001/12/05 01:02:16 davem Exp $
2 * head.S: Initial boot code for the Sparc64 port of Linux.
3 *
4 * Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 * Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
8 */
9
10 #include <linux/config.h>
11 #include <linux/version.h>
12 #include <linux/errno.h>
13 #include <asm/asm_offsets.h>
14 #include <asm/asi.h>
15 #include <asm/pstate.h>
16 #include <asm/ptrace.h>
17 #include <asm/spitfire.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/errno.h>
21 #include <asm/signal.h>
22 #include <asm/processor.h>
23 #include <asm/lsu.h>
24 #include <asm/dcr.h>
25 #include <asm/dcu.h>
26 #include <asm/head.h>
27 #include <asm/ttable.h>
28
29 /* This section from from _start to sparc64_boot_end should fit into
30 * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
31 * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
32 * 0x0000.0000.0040.6000 and empty_bad_page, which is from
33 * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
34 */
35
36 .text
37 .globl start, _start, stext, _stext
38 _start:
39 start:
40 _stext:
41 stext:
42 bootup_user_stack:
43 ! 0x0000000000404000
44 b sparc64_boot
45 flushw /* Flush register file. */
46
47 /* This stuff has to be in sync with SILO and other potential boot loaders
48 * Fields should be kept upward compatible and whenever any change is made,
49 * HdrS version should be incremented.
50 */
51 .global root_flags, ram_flags, root_dev
52 .global sparc_ramdisk_image, sparc_ramdisk_size
53 .globl silo_args
54
55 .ascii "HdrS"
56 .word LINUX_VERSION_CODE
57 .half 0x0203 /* HdrS version */
58 root_flags:
59 .half 1
60 root_dev:
61 .half 0
62 ram_flags:
63 .half 0
64 sparc_ramdisk_image:
65 .word 0
66 sparc_ramdisk_size:
67 .word 0
68 .xword reboot_command
69 .xword bootstr_info
70 .word _end
71
72 /* We must be careful, 32-bit OpenBOOT will get confused if it
73 * tries to save away a register window to a 64-bit kernel
74 * stack address. Flush all windows, disable interrupts,
75 * remap if necessary, jump onto kernel trap table, then kernel
76 * stack, or else we die.
77 *
78 * PROM entry point is on %o4
79 */
80 sparc64_boot:
81 BRANCH_IF_CHEETAH_BASE(g1,g5,cheetah_boot)
82 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g5,cheetah_plus_boot)
83 ba,pt %xcc, spitfire_boot
84 nop
85
86 cheetah_plus_boot:
87 /* Preserve OBP choosen DCU and DCR register settings. */
88 ba,pt %xcc, cheetah_generic_boot
89 nop
90
91 cheetah_boot:
92 mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
93 wr %g1, %asr18
94
95 sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
96 or %g5, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g5
97 sllx %g5, 32, %g5
98 or %g5, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g5
99 stxa %g5, [%g0] ASI_DCU_CONTROL_REG
100 membar #Sync
101
102 cheetah_generic_boot:
103 mov TSB_EXTENSION_P, %g3
104 stxa %g0, [%g3] ASI_DMMU
105 stxa %g0, [%g3] ASI_IMMU
106 membar #Sync
107
108 mov TSB_EXTENSION_S, %g3
109 stxa %g0, [%g3] ASI_DMMU
110 membar #Sync
111
112 mov TSB_EXTENSION_N, %g3
113 stxa %g0, [%g3] ASI_DMMU
114 stxa %g0, [%g3] ASI_IMMU
115 membar #Sync
116
117 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
118 wr %g0, 0, %fprs
119
120 /* Just like for Spitfire, we probe itlb-2 for a mapping which
121 * matches our current %pc. We take the physical address in
122 * that mapping and use it to make our own.
123 */
124
125 /* %g5 holds the tlb data */
126 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
127 sllx %g5, 32, %g5
128 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
129
130 /* Put PADDR tlb data mask into %g3. */
131 sethi %uhi(_PAGE_PADDR), %g3
132 or %g3, %ulo(_PAGE_PADDR), %g3
133 sllx %g3, 32, %g3
134 sethi %hi(_PAGE_PADDR), %g7
135 or %g7, %lo(_PAGE_PADDR), %g7
136 or %g3, %g7, %g3
137
138 set 2 << 16, %l0 /* TLB entry walker. */
139 set 0x1fff, %l2 /* Page mask. */
140 rd %pc, %l3
141 andn %l3, %l2, %g2 /* vaddr comparator */
142
143 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
144 membar #Sync
145 andn %g1, %l2, %g1
146 cmp %g1, %g2
147 be,pn %xcc, cheetah_got_tlbentry
148 nop
149 and %l0, (127 << 3), %g1
150 cmp %g1, (127 << 3)
151 blu,pt %xcc, 1b
152 add %l0, (1 << 3), %l0
153
154 cheetah_got_tlbentry:
155 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
156 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
157 membar #Sync
158 and %g1, %g3, %g1
159 sub %g1, %g2, %g1
160 or %g5, %g1, %g5
161
162 /* Clear out any KERNBASE area entries. */
163 set 2 << 16, %l0
164 sethi %hi(KERNBASE), %g3
165 sethi %hi(KERNBASE<<1), %g7
166 mov TLB_TAG_ACCESS, %l7
167
168 /* First, check ITLB */
169 1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
170 membar #Sync
171 andn %g1, %l2, %g1
172 cmp %g1, %g3
173 blu,pn %xcc, 2f
174 cmp %g1, %g7
175 bgeu,pn %xcc, 2f
176 nop
177 stxa %g0, [%l7] ASI_IMMU
178 membar #Sync
179 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
180 membar #Sync
181
182 2: and %l0, (127 << 3), %g1
183 cmp %g1, (127 << 3)
184 blu,pt %xcc, 1b
185 add %l0, (1 << 3), %l0
186
187 /* Next, check DTLB */
188 set 2 << 16, %l0
189 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
190 membar #Sync
191 andn %g1, %l2, %g1
192 cmp %g1, %g3
193 blu,pn %xcc, 2f
194 cmp %g1, %g7
195 bgeu,pn %xcc, 2f
196 nop
197 stxa %g0, [%l7] ASI_DMMU
198 membar #Sync
199 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
200 membar #Sync
201
202 2: and %l0, (511 << 3), %g1
203 cmp %g1, (511 << 3)
204 blu,pt %xcc, 1b
205 add %l0, (1 << 3), %l0
206
207 /* On Cheetah+, have to check second DTLB. */
208 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
209 ba,pt %xcc, 9f
210 nop
211
212 2: set 3 << 16, %l0
213 1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
214 membar #Sync
215 andn %g1, %l2, %g1
216 cmp %g1, %g3
217 blu,pn %xcc, 2f
218 cmp %g1, %g7
219 bgeu,pn %xcc, 2f
220 nop
221 stxa %g0, [%l7] ASI_DMMU
222 membar #Sync
223 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
224 membar #Sync
225
226 2: and %l0, (511 << 3), %g1
227 cmp %g1, (511 << 3)
228 blu,pt %xcc, 1b
229 add %l0, (1 << 3), %l0
230
231 9:
232
233 /* Now lock the TTE we created into ITLB-0 and DTLB-0,
234 * entry 15 (and maybe 14 too).
235 */
236 sethi %hi(KERNBASE), %g3
237 set (0 << 16) | (15 << 3), %g7
238 stxa %g3, [%l7] ASI_DMMU
239 membar #Sync
240 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
241 membar #Sync
242 stxa %g3, [%l7] ASI_IMMU
243 membar #Sync
244 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
245 membar #Sync
246 flush %g3
247 membar #Sync
248 sethi %hi(_end), %g3 /* Check for bigkernel case */
249 or %g3, %lo(_end), %g3
250 srl %g3, 23, %g3 /* Check if _end > 8M */
251 brz,pt %g3, 1f
252 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
253 sethi %hi(0x400000), %g3
254 or %g3, %lo(0x400000), %g3
255 add %g5, %g3, %g5 /* New tte data */
256 andn %g5, (_PAGE_G), %g5
257 sethi %hi(KERNBASE+0x400000), %g3
258 or %g3, %lo(KERNBASE+0x400000), %g3
259 set (0 << 16) | (14 << 3), %g7
260 stxa %g3, [%l7] ASI_DMMU
261 membar #Sync
262 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
263 membar #Sync
264 stxa %g3, [%l7] ASI_IMMU
265 membar #Sync
266 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
267 membar #Sync
268 flush %g3
269 membar #Sync
270 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
271 ba,pt %xcc, 1f
272 nop
273
274 1: set sun4u_init, %g2
275 jmpl %g2 + %g0, %g0
276 nop
277
278 spitfire_boot:
279 /* Typically PROM has already enabled both MMU's and both on-chip
280 * caches, but we do it here anyway just to be paranoid.
281 */
282 mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
283 stxa %g1, [%g0] ASI_LSU_CONTROL
284 membar #Sync
285
286 /*
287 * Make sure we are in privileged mode, have address masking,
288 * using the ordinary globals and have enabled floating
289 * point.
290 *
291 * Again, typically PROM has left %pil at 13 or similar, and
292 * (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
293 */
294 wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
295 wr %g0, 0, %fprs
296
297 spitfire_create_mappings:
298 /* %g5 holds the tlb data */
299 sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
300 sllx %g5, 32, %g5
301 or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
302
303 /* Base of physical memory cannot reliably be assumed to be
304 * at 0x0! Figure out where it happens to be. -DaveM
305 */
306
307 /* Put PADDR tlb data mask into %g3. */
308 sethi %uhi(_PAGE_PADDR_SF), %g3
309 or %g3, %ulo(_PAGE_PADDR_SF), %g3
310 sllx %g3, 32, %g3
311 sethi %hi(_PAGE_PADDR_SF), %g7
312 or %g7, %lo(_PAGE_PADDR_SF), %g7
313 or %g3, %g7, %g3
314
315 /* Walk through entire ITLB, looking for entry which maps
316 * our %pc currently, stick PADDR from there into %g5 tlb data.
317 */
318 clr %l0 /* TLB entry walker. */
319 set 0x1fff, %l2 /* Page mask. */
320 rd %pc, %l3
321 andn %l3, %l2, %g2 /* vaddr comparator */
322 1:
323 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
324 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
325 nop
326 nop
327 nop
328 andn %g1, %l2, %g1 /* Get vaddr */
329 cmp %g1, %g2
330 be,a,pn %xcc, spitfire_got_tlbentry
331 ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
332 cmp %l0, (63 << 3)
333 blu,pt %xcc, 1b
334 add %l0, (1 << 3), %l0
335
336 spitfire_got_tlbentry:
337 /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
338 nop
339 nop
340 nop
341 and %g1, %g3, %g1 /* Mask to just get paddr bits. */
342 sub %g1, %g2, %g1 /* Get rid of %pc offset to get base. */
343
344 /* NOTE: We hold on to %g1 paddr base as we need it below to lock
345 * NOTE: the PROM cif code into the TLB.
346 */
347
348 or %g5, %g1, %g5 /* Or it into TAG being built. */
349
350 clr %l0 /* TLB entry walker. */
351 sethi %hi(KERNBASE), %g3 /* 4M lower limit */
352 sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
353 mov TLB_TAG_ACCESS, %l7
354 1:
355 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
356 ldxa [%l0] ASI_ITLB_TAG_READ, %g1
357 nop
358 nop
359 nop
360 andn %g1, %l2, %g1 /* Get vaddr */
361 cmp %g1, %g3
362 blu,pn %xcc, 2f
363 cmp %g1, %g7
364 bgeu,pn %xcc, 2f
365 nop
366 stxa %g0, [%l7] ASI_IMMU
367 stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
368 membar #Sync
369 2:
370 cmp %l0, (63 << 3)
371 blu,pt %xcc, 1b
372 add %l0, (1 << 3), %l0
373
374 nop; nop; nop
375
376 clr %l0 /* TLB entry walker. */
377 1:
378 /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
379 ldxa [%l0] ASI_DTLB_TAG_READ, %g1
380 nop
381 nop
382 nop
383 andn %g1, %l2, %g1 /* Get vaddr */
384 cmp %g1, %g3
385 blu,pn %xcc, 2f
386 cmp %g1, %g7
387 bgeu,pn %xcc, 2f
388 nop
389 stxa %g0, [%l7] ASI_DMMU
390 stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
391 membar #Sync
392 2:
393 cmp %l0, (63 << 3)
394 blu,pt %xcc, 1b
395 add %l0, (1 << 3), %l0
396
397 nop; nop; nop
398
399
400 /* PROM never puts any TLB entries into the MMU with the lock bit
401 * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
402 */
403
404 sethi %hi(KERNBASE), %g3
405 mov (63 << 3), %g7
406 stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
407 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
408 membar #Sync
409 stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
410 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
411 membar #Sync
412 flush %g3
413 membar #Sync
414 sethi %hi(_end), %g3 /* Check for bigkernel case */
415 or %g3, %lo(_end), %g3
416 srl %g3, 23, %g3 /* Check if _end > 8M */
417 brz,pt %g3, 2f
418 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
419 sethi %hi(0x400000), %g3
420 or %g3, %lo(0x400000), %g3
421 add %g5, %g3, %g5 /* New tte data */
422 andn %g5, (_PAGE_G), %g5
423 sethi %hi(KERNBASE+0x400000), %g3
424 or %g3, %lo(KERNBASE+0x400000), %g3
425 mov (62 << 3), %g7
426 stxa %g3, [%l7] ASI_DMMU
427 stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
428 membar #Sync
429 stxa %g3, [%l7] ASI_IMMU
430 stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
431 membar #Sync
432 flush %g3
433 membar #Sync
434 sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
435 2: ba,pt %xcc, 1f
436 nop
437 1:
438 set sun4u_init, %g2
439 jmpl %g2 + %g0, %g0
440 nop
441
442 sun4u_init:
443 /* Set ctx 0 */
444 mov PRIMARY_CONTEXT, %g7
445 stxa %g0, [%g7] ASI_DMMU
446 membar #Sync
447
448 mov SECONDARY_CONTEXT, %g7
449 stxa %g0, [%g7] ASI_DMMU
450 membar #Sync
451
452 sethi %uhi(PAGE_OFFSET), %g4
453 sllx %g4, 32, %g4
454
455 /* We are now safely (we hope) in Nucleus context (0), rewrite
456 * the KERNBASE TTE's so they no longer have the global bit set.
457 * Don't forget to setup TAG_ACCESS first 8-)
458 */
459 mov TLB_TAG_ACCESS, %g2
460 stxa %g3, [%g2] ASI_IMMU
461 stxa %g3, [%g2] ASI_DMMU
462 membar #Sync
463
464 BRANCH_IF_ANY_CHEETAH(g1,g5,cheetah_tlb_fixup)
465
466 ba,pt %xcc, spitfire_tlb_fixup
467 nop
468
469 cheetah_tlb_fixup:
470 set (0 << 16) | (15 << 3), %g7
471 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
472 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
473 andn %g1, (_PAGE_G), %g1
474 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
475 membar #Sync
476
477 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
478 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
479 andn %g1, (_PAGE_G), %g1
480 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
481 membar #Sync
482
483 /* Kill instruction prefetch queues. */
484 flush %g3
485 membar #Sync
486
487 mov 2, %g2 /* Set TLB type to cheetah+. */
488 BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g5,g7,1f)
489
490 mov 1, %g2 /* Set TLB type to cheetah. */
491
492 1: sethi %hi(tlb_type), %g5
493 stw %g2, [%g5 + %lo(tlb_type)]
494
495 /* Patch copy/page operations to cheetah optimized versions. */
496 call cheetah_patch_copyops
497 nop
498 call cheetah_patch_pgcopyops
499 nop
500 call cheetah_patch_cachetlbops
501 nop
502
503 ba,pt %xcc, tlb_fixup_done
504 nop
505
506 spitfire_tlb_fixup:
507 mov (63 << 3), %g7
508 ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
509 andn %g1, (_PAGE_G), %g1
510 stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
511 membar #Sync
512
513 ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
514 andn %g1, (_PAGE_G), %g1
515 stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
516 membar #Sync
517
518 /* Kill instruction prefetch queues. */
519 flush %g3
520 membar #Sync
521
522 /* Set TLB type to spitfire. */
523 mov 0, %g2
524 sethi %hi(tlb_type), %g5
525 stw %g2, [%g5 + %lo(tlb_type)]
526
527 tlb_fixup_done:
528 sethi %hi(init_task_union), %g6
529 or %g6, %lo(init_task_union), %g6
530 mov %sp, %l6
531 mov %o4, %l7
532
533 #if 0 /* We don't do it like this anymore, but for historical hack value
534 * I leave this snippet here to show how crazy we can be sometimes. 8-)
535 */
536
537 /* Setup "Linux Current Register", thanks Sun 8-) */
538 wr %g0, 0x1, %pcr
539
540 /* Blackbird errata workaround. See commentary in
541 * smp.c:smp_percpu_timer_interrupt() for more
542 * information.
543 */
544 ba,pt %xcc, 99f
545 nop
546 .align 64
547 99: wr %g6, %g0, %pic
548 rd %pic, %g0
549 #endif
550
551 wr %g0, ASI_P, %asi
552 mov 1, %g5
553 sllx %g5, THREAD_SHIFT, %g5
554 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
555 add %g6, %g5, %sp
556 mov 0, %fp
557
558 wrpr %g0, 0, %wstate
559 wrpr %g0, 0x0, %tl
560
561 /* Clear the bss */
562 sethi %hi(__bss_start), %o0
563 or %o0, %lo(__bss_start), %o0
564 sethi %hi(_end), %o1
565 or %o1, %lo(_end), %o1
566 call __bzero
567 sub %o1, %o0, %o1
568
569 mov %l6, %o1 ! OpenPROM stack
570 call prom_init
571 mov %l7, %o0 ! OpenPROM cif handler
572
573 /* Off we go.... */
574 call start_kernel
575 nop
576 /* Not reached... */
577
578 /* IMPORTANT NOTE: Whenever making changes here, check
579 * trampoline.S as well. -jj */
580 .globl setup_tba
581 setup_tba: /* i0 = is_starfire */
582 save %sp, -160, %sp
583
584 rdpr %tba, %g7
585 sethi %hi(prom_tba), %o1
586 or %o1, %lo(prom_tba), %o1
587 stx %g7, [%o1]
588
589 /* Setup "Linux" globals 8-) */
590 rdpr %pstate, %o1
591 mov %g6, %o2
592 wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
593 sethi %hi(sparc64_ttable_tl0), %g5
594 wrpr %g5, %tba
595 mov %o2, %g6
596
597 /* Set up MMU globals */
598 wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
599
600 /* Set fixed globals used by dTLB miss handler. */
601 #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
602 #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
603
604 mov TSB_REG, %g1
605 stxa %g0, [%g1] ASI_DMMU
606 membar #Sync
607 mov TLB_SFSR, %g1
608 sethi %uhi(KERN_HIGHBITS), %g2
609 or %g2, %ulo(KERN_HIGHBITS), %g2
610 sllx %g2, 32, %g2
611 or %g2, KERN_LOWBITS, %g2
612
613 BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
614 ba,pt %xcc, spitfire_vpte_base
615 nop
616
617 cheetah_vpte_base:
618 sethi %uhi(VPTE_BASE_CHEETAH), %g3
619 or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
620 ba,pt %xcc, 2f
621 sllx %g3, 32, %g3
622
623 spitfire_vpte_base:
624 sethi %uhi(VPTE_BASE_SPITFIRE), %g3
625 or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
626 sllx %g3, 32, %g3
627
628 2:
629 clr %g7
630 #undef KERN_HIGHBITS
631 #undef KERN_LOWBITS
632
633 /* Setup Interrupt globals */
634 wrpr %o1, (PSTATE_IG|PSTATE_IE), %pstate
635 #ifndef CONFIG_SMP
636 sethi %hi(__up_workvec), %g5
637 or %g5, %lo(__up_workvec), %g6
638 #else
639 /* By definition of where we are, this is boot_cpu. */
640 brz,pt %i0, not_starfire
641 sethi %hi(0x1fff4000), %g1
642 or %g1, %lo(0x1fff4000), %g1
643 sllx %g1, 12, %g1
644 or %g1, 0xd0, %g1
645 lduwa [%g1] ASI_PHYS_BYPASS_EC_E, %g1
646 b,pt %xcc, set_worklist
647 nop
648
649 not_starfire:
650 BRANCH_IF_ANY_CHEETAH(g1,g5,is_cheetah)
651
652 ba,pt %xcc, not_cheetah
653 nop
654
655 is_cheetah:
656 ldxa [%g0] ASI_SAFARI_CONFIG, %g1
657 srlx %g1, 17, %g1
658 ba,pt %xcc, set_worklist
659 and %g1, 0x3ff, %g1 ! 10bit Safari Agent ID
660
661 not_cheetah:
662 ldxa [%g0] ASI_UPA_CONFIG, %g1
663 srlx %g1, 17, %g1
664 and %g1, 0x1f, %g1
665
666 /* In theory this is: &(cpu_data[boot_cpu_id].irq_worklists[0]) */
667 set_worklist:
668 sethi %hi(cpu_data), %g5
669 or %g5, %lo(cpu_data), %g5
670 sllx %g1, 7, %g1
671 add %g5, %g1, %g5
672 add %g5, 64, %g6
673 #endif
674
675 /* Kill PROM timer */
676 sethi %hi(0x80000000), %g1
677 sllx %g1, 32, %g1
678 wr %g1, 0, %tick_cmpr
679
680 BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
681
682 ba,pt %xcc, 2f
683 nop
684
685 /* Disable STICK_INT interrupts. */
686 1:
687 sethi %hi(0x80000000), %g1
688 sllx %g1, 32, %g1
689 wr %g1, %asr25
690
691 /* Ok, we're done setting up all the state our trap mechanims needs,
692 * now get back into normal globals and let the PROM know what is up.
693 */
694 2:
695 wrpr %g0, %g0, %wstate
696 wrpr %o1, PSTATE_IE, %pstate
697
698 sethi %hi(sparc64_ttable_tl0), %g5
699 call prom_set_trap_table
700 mov %g5, %o0
701
702 rdpr %pstate, %o1
703 or %o1, PSTATE_IE, %o1
704 wrpr %o1, 0, %pstate
705
706 ret
707 restore
708
709 /*
710 * The following skips make sure the trap table in ttable.S is aligned
711 * on a 32K boundary as required by the v9 specs for TBA register.
712 */
713 sparc64_boot_end:
714 .skip 0x2000 + _start - sparc64_boot_end
715 bootup_user_stack_end:
716 .skip 0x2000
717
718 #ifdef CONFIG_SBUS
719 /* This is just a hack to fool make depend config.h discovering
720 strategy: As the .S files below need config.h, but
721 make depend does not find it for them, we include config.h
722 in head.S */
723 #endif
724
725 ! 0x0000000000408000
726
727 #include "ttable.S"
728 #include "systbls.S"
729
730 .align 1024
731 .globl swapper_pg_dir
732 swapper_pg_dir:
733 .word 0
734
735 #include "etrap.S"
736 #include "rtrap.S"
737 #include "winfixup.S"
738 #include "entry.S"
739
740 /* This is just anal retentiveness on my part... */
741 .align 16384
742
743 .data
744 .align 8
745 .globl prom_tba, tlb_type
746 prom_tba: .xword 0
747 tlb_type: .word 0 /* Must NOT end up in BSS */
748 .section ".fixup",#alloc,#execinstr
749 .globl __ret_efault
750 __ret_efault:
751 ret
752 restore %g0, -EFAULT, %o0
753
Cache object: 837951254141806987273fe33330730e
|