1 /*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
29 */
30 /*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #include <machine/asm.h>
57 __FBSDID("$FreeBSD$");
58
59 #include "opt_compat.h"
60 #include "opt_ddb.h"
61
62 #include <machine/asi.h>
63 #include <machine/asmacros.h>
64 #include <machine/frame.h>
65 #include <machine/fsr.h>
66 #include <machine/intr_machdep.h>
67 #include <machine/ktr.h>
68 #include <machine/pcb.h>
69 #include <machine/pstate.h>
70 #include <machine/trap.h>
71 #include <machine/tsb.h>
72 #include <machine/tstate.h>
73 #include <machine/utrap.h>
74 #include <machine/wstate.h>
75
76 #include "assym.s"
77
78 #define TSB_ASI 0x0
79 #define TSB_KERNEL 0x0
80 #define TSB_KERNEL_MASK 0x0
81 #define TSB_KERNEL_PHYS 0x0
82 #define TSB_KERNEL_PHYS_END 0x0
83 #define TSB_QUAD_LDD 0x0
84
85 .register %g2,#ignore
86 .register %g3,#ignore
87 .register %g6,#ignore
88 .register %g7,#ignore
89
90 /*
91 * Atomically set a bit in a TTE.
92 */
93 #define TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
94 add r1, TTE_DATA, r1 ; \
95 LD(x, a) [r1] asi, r2 ; \
96 9: or r2, bit, r3 ; \
97 CAS(x, a) [r1] asi, r2, r3 ; \
98 cmp r2, r3 ; \
99 bne,pn %xcc, 9b ; \
100 mov r3, r2
101
102 #define TTE_SET_REF(r1, r2, r3, a, asi) TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
103 #define TTE_SET_W(r1, r2, r3, a, asi) TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
104
105 /*
106 * Macros for spilling and filling live windows.
107 *
108 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
109 * handler will not use more than 24 instructions total, to leave room for
110 * resume vectors which occupy the last 8 instructions.
111 */
112
113 #define SPILL(storer, base, size, asi) \
114 storer %l0, [base + (0 * size)] asi ; \
115 storer %l1, [base + (1 * size)] asi ; \
116 storer %l2, [base + (2 * size)] asi ; \
117 storer %l3, [base + (3 * size)] asi ; \
118 storer %l4, [base + (4 * size)] asi ; \
119 storer %l5, [base + (5 * size)] asi ; \
120 storer %l6, [base + (6 * size)] asi ; \
121 storer %l7, [base + (7 * size)] asi ; \
122 storer %i0, [base + (8 * size)] asi ; \
123 storer %i1, [base + (9 * size)] asi ; \
124 storer %i2, [base + (10 * size)] asi ; \
125 storer %i3, [base + (11 * size)] asi ; \
126 storer %i4, [base + (12 * size)] asi ; \
127 storer %i5, [base + (13 * size)] asi ; \
128 storer %i6, [base + (14 * size)] asi ; \
129 storer %i7, [base + (15 * size)] asi
130
131 #define FILL(loader, base, size, asi) \
132 loader [base + (0 * size)] asi, %l0 ; \
133 loader [base + (1 * size)] asi, %l1 ; \
134 loader [base + (2 * size)] asi, %l2 ; \
135 loader [base + (3 * size)] asi, %l3 ; \
136 loader [base + (4 * size)] asi, %l4 ; \
137 loader [base + (5 * size)] asi, %l5 ; \
138 loader [base + (6 * size)] asi, %l6 ; \
139 loader [base + (7 * size)] asi, %l7 ; \
140 loader [base + (8 * size)] asi, %i0 ; \
141 loader [base + (9 * size)] asi, %i1 ; \
142 loader [base + (10 * size)] asi, %i2 ; \
143 loader [base + (11 * size)] asi, %i3 ; \
144 loader [base + (12 * size)] asi, %i4 ; \
145 loader [base + (13 * size)] asi, %i5 ; \
146 loader [base + (14 * size)] asi, %i6 ; \
147 loader [base + (15 * size)] asi, %i7
148
149 #define ERRATUM50(reg) mov reg, reg
150
151 #define KSTACK_SLOP 1024
152
153 /*
154 * Sanity check the kernel stack and bail out if it's wrong.
155 * XXX: doesn't handle being on the panic stack.
156 */
157 #define KSTACK_CHECK \
158 dec 16, ASP_REG ; \
159 stx %g1, [ASP_REG + 0] ; \
160 stx %g2, [ASP_REG + 8] ; \
161 add %sp, SPOFF, %g1 ; \
162 andcc %g1, (1 << PTR_SHIFT) - 1, %g0 ; \
163 bnz,a %xcc, tl1_kstack_fault ; \
164 inc 16, ASP_REG ; \
165 ldx [PCPU(CURTHREAD)], %g2 ; \
166 ldx [%g2 + TD_KSTACK], %g2 ; \
167 add %g2, KSTACK_SLOP, %g2 ; \
168 subcc %g1, %g2, %g1 ; \
169 ble,a %xcc, tl1_kstack_fault ; \
170 inc 16, ASP_REG ; \
171 set KSTACK_PAGES * PAGE_SIZE, %g2 ; \
172 cmp %g1, %g2 ; \
173 bgt,a %xcc, tl1_kstack_fault ; \
174 inc 16, ASP_REG ; \
175 ldx [ASP_REG + 8], %g2 ; \
176 ldx [ASP_REG + 0], %g1 ; \
177 inc 16, ASP_REG
178
179 .globl tl_text_begin
180 tl_text_begin:
181 nop
182
183 ENTRY(tl1_kstack_fault)
184 rdpr %tl, %g1
185 1: cmp %g1, 2
186 be,a 2f
187 nop
188
189 #if KTR_COMPILE & KTR_TRAP
190 CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
191 , %g2, %g3, %g4, 7, 8, 9)
192 rdpr %tl, %g3
193 stx %g3, [%g2 + KTR_PARM1]
194 rdpr %tpc, %g3
195 stx %g3, [%g2 + KTR_PARM1]
196 rdpr %tnpc, %g3
197 stx %g3, [%g2 + KTR_PARM1]
198 9:
199 #endif
200
201 sub %g1, 1, %g1
202 wrpr %g1, 0, %tl
203 ba,a %xcc, 1b
204 nop
205
206 2:
207 #if KTR_COMPILE & KTR_TRAP
208 CATR(KTR_TRAP,
209 "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
210 , %g1, %g2, %g3, 7, 8, 9)
211 add %sp, SPOFF, %g2
212 stx %g2, [%g1 + KTR_PARM1]
213 ldx [PCPU(CURTHREAD)], %g2
214 ldx [%g2 + TD_KSTACK], %g2
215 stx %g2, [%g1 + KTR_PARM2]
216 rdpr %canrestore, %g2
217 stx %g2, [%g1 + KTR_PARM3]
218 rdpr %cansave, %g2
219 stx %g2, [%g1 + KTR_PARM4]
220 rdpr %otherwin, %g2
221 stx %g2, [%g1 + KTR_PARM5]
222 rdpr %wstate, %g2
223 stx %g2, [%g1 + KTR_PARM6]
224 9:
225 #endif
226
227 wrpr %g0, 0, %canrestore
228 wrpr %g0, 6, %cansave
229 wrpr %g0, 0, %otherwin
230 wrpr %g0, WSTATE_KERNEL, %wstate
231
232 sub ASP_REG, SPOFF + CCFSZ, %sp
233 clr %fp
234
235 set trap, %o2
236 ba %xcc, tl1_trap
237 mov T_KSTACK_FAULT | T_KERNEL, %o0
238 END(tl1_kstack_fault)
239
240 /*
241 * Magic to resume from a spill or fill trap. If we get an alignment or an
242 * MMU fault during a spill or a fill, this macro will detect the fault and
243 * resume at a set instruction offset in the trap handler.
244 *
245 * To check if the previous trap was a spill/fill we convert the trapped pc
246 * to a trap type and verify that it is in the range of spill/fill vectors.
247 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
248 * tl bit allows us to detect both ranges with one test.
249 *
250 * This is:
251 * 0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
252 *
253 * To calculate the new pc we take advantage of the xor feature of wrpr.
254 * Forcing all the low bits of the trapped pc on we can produce any offset
255 * into the spill/fill vector. The size of a spill/fill trap vector is 0x80.
256 *
257 * 0x7f ^ 0x1f == 0x60
258 * 0x1f == (0x80 - 0x60) - 1
259 *
260 * Which are the offset and xor value used to resume from alignment faults.
261 */
262
263 /*
264 * Determine if we have trapped inside of a spill/fill vector, and if so resume
265 * at a fixed instruction offset in the trap vector. Must be called on
266 * alternate globals.
267 */
268 #define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
269 dec 16, ASP_REG ; \
270 stx %g1, [ASP_REG + 0] ; \
271 stx %g2, [ASP_REG + 8] ; \
272 rdpr %tpc, %g1 ; \
273 ERRATUM50(%g1) ; \
274 rdpr %tba, %g2 ; \
275 sub %g1, %g2, %g2 ; \
276 srlx %g2, 5, %g2 ; \
277 andn %g2, 0x200, %g2 ; \
278 cmp %g2, 0x80 ; \
279 blu,pt %xcc, 9f ; \
280 cmp %g2, 0x100 ; \
281 bgeu,pt %xcc, 9f ; \
282 or %g1, 0x7f, %g1 ; \
283 wrpr %g1, xor, %tnpc ; \
284 stxa_g0_sfsr ; \
285 ldx [ASP_REG + 8], %g2 ; \
286 ldx [ASP_REG + 0], %g1 ; \
287 inc 16, ASP_REG ; \
288 done ; \
289 9: ldx [ASP_REG + 8], %g2 ; \
290 ldx [ASP_REG + 0], %g1 ; \
291 inc 16, ASP_REG
292
293 /*
294 * For certain faults we need to clear the SFSR MMU register before returning.
295 */
296 #define RSF_CLR_SFSR \
297 wr %g0, ASI_DMMU, %asi ; \
298 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
299
300 #define RSF_XOR(off) ((0x80 - off) - 1)
301
302 /*
303 * Instruction offsets in spill and fill trap handlers for handling certain
304 * nested traps, and corresponding xor constants for wrpr.
305 */
306 #define RSF_OFF_ALIGN 0x60
307 #define RSF_OFF_MMU 0x70
308
309 #define RESUME_SPILLFILL_ALIGN \
310 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
311 #define RESUME_SPILLFILL_MMU \
312 RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
313 #define RESUME_SPILLFILL_MMU_CLR_SFSR \
314 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
315
316 /*
317 * Constant to add to %tnpc when taking a fill trap just before returning to
318 * user mode.
319 */
320 #define RSF_FILL_INC tl0_ret_fill_end - tl0_ret_fill
321
322 /*
323 * Generate a T_SPILL or T_FILL trap if the window operation fails.
324 */
325 #define RSF_TRAP(type) \
326 ba %xcc, tl0_sftrap ; \
327 mov type, %g2 ; \
328 .align 16
329
330 /*
331 * Game over if the window operation fails.
332 */
333 #define RSF_FATAL(type) \
334 ba %xcc, rsf_fatal ; \
335 mov type, %g2 ; \
336 .align 16
337
338 /*
339 * Magic to resume from a failed fill a few instructions after the corrsponding
340 * restore. This is used on return from the kernel to usermode.
341 */
342 #define RSF_FILL_MAGIC \
343 rdpr %tnpc, %g1 ; \
344 add %g1, RSF_FILL_INC, %g1 ; \
345 wrpr %g1, 0, %tnpc ; \
346 done ; \
347 .align 16
348
349 /*
350 * Spill to the pcb if a spill to the user stack in kernel mode fails.
351 */
352 #define RSF_SPILL_TOPCB \
353 ba,a %xcc, tl1_spill_topcb ; \
354 nop ; \
355 .align 16
356
357 ENTRY(rsf_fatal)
358 #if KTR_COMPILE & KTR_TRAP
359 CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
360 , %g1, %g3, %g4, 7, 8, 9)
361 rdpr %tt, %g3
362 stx %g3, [%g1 + KTR_PARM1]
363 stx %g2, [%g1 + KTR_PARM2]
364 9:
365 #endif
366
367 KSTACK_CHECK
368
369 sir
370 END(rsf_fatal)
371
372 .data
373 _ALIGN_DATA
374 .globl intrnames, eintrnames
375 intrnames:
376 .space (IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
377 eintrnames:
378 .globl intrcnt, eintrcnt
379 intrcnt:
380 .space (IV_MAX + PIL_MAX) * 8
381 eintrcnt:
382
383 .text
384
385 /*
386 * Trap table and associated macros
387 *
388 * Due to its size a trap table is an inherently hard thing to represent in
389 * code in a clean way. There are approximately 1024 vectors, of 8 or 32
390 * instructions each, many of which are identical. The way that this is
391 * laid out is the instructions (8 or 32) for the actual trap vector appear
392 * as an AS macro. In general this code branches to tl0_trap or tl1_trap,
393 * but if not supporting code can be placed just after the definition of the
394 * macro. The macros are then instantiated in a different section (.trap),
395 * which is setup to be placed by the linker at the beginning of .text, and the
396 * code around the macros is moved to the end of trap table. In this way the
397 * code that must be sequential in memory can be split up, and located near
398 * its supporting code so that it is easier to follow.
399 */
400
401 /*
402 * Clean window traps occur when %cleanwin is zero to ensure that data
403 * is not leaked between address spaces in registers.
404 */
405 .macro clean_window
406 clr %o0
407 clr %o1
408 clr %o2
409 clr %o3
410 clr %o4
411 clr %o5
412 clr %o6
413 clr %o7
414 clr %l0
415 clr %l1
416 clr %l2
417 clr %l3
418 clr %l4
419 clr %l5
420 clr %l6
421 rdpr %cleanwin, %l7
422 inc %l7
423 wrpr %l7, 0, %cleanwin
424 clr %l7
425 retry
426 .align 128
427 .endm
428
429 /*
430 * Stack fixups for entry from user mode. We are still running on the
431 * user stack, and with its live registers, so we must save soon. We
432 * are on alternate globals so we do have some registers. Set the
433 * transitional window state, and do the save. If this traps we
434 * attempt to spill a window to the user stack. If this fails, we
435 * spill the window to the pcb and continue. Spilling to the pcb
436 * must not fail.
437 *
438 * NOTE: Must be called with alternate globals and clobbers %g1.
439 */
440
441 .macro tl0_split
442 rdpr %wstate, %g1
443 wrpr %g1, WSTATE_TRANSITION, %wstate
444 save
445 .endm
446
447 .macro tl0_setup type
448 tl0_split
449 clr %o1
450 set trap, %o2
451 ba %xcc, tl0_utrap
452 mov \type, %o0
453 .endm
454
455 /*
456 * Generic trap type. Call trap() with the specified type.
457 */
458 .macro tl0_gen type
459 tl0_setup \type
460 .align 32
461 .endm
462
463 /*
464 * This is used to suck up the massive swaths of reserved trap types.
465 * Generates count "reserved" trap vectors.
466 */
467 .macro tl0_reserved count
468 .rept \count
469 tl0_gen T_RESERVED
470 .endr
471 .endm
472
473 .macro tl1_split
474 rdpr %wstate, %g1
475 wrpr %g1, WSTATE_NESTED, %wstate
476 save %sp, -(CCFSZ + TF_SIZEOF), %sp
477 .endm
478
479 .macro tl1_setup type
480 tl1_split
481 clr %o1
482 set trap, %o2
483 ba %xcc, tl1_trap
484 mov \type | T_KERNEL, %o0
485 .endm
486
487 .macro tl1_gen type
488 tl1_setup \type
489 .align 32
490 .endm
491
492 .macro tl1_reserved count
493 .rept \count
494 tl1_gen T_RESERVED
495 .endr
496 .endm
497
498 .macro tl0_insn_excptn
499 wrpr %g0, PSTATE_ALT, %pstate
500 wr %g0, ASI_IMMU, %asi
501 rdpr %tpc, %g3
502 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
503 /*
504 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
505 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
506 * this triggers a RED state exception though.
507 */
508 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
509 membar #Sync
510 ba %xcc, tl0_sfsr_trap
511 mov T_INSTRUCTION_EXCEPTION, %g2
512 .align 32
513 .endm
514
515 .macro tl0_data_excptn
516 wrpr %g0, PSTATE_ALT, %pstate
517 wr %g0, ASI_DMMU, %asi
518 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
519 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
520 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
521 membar #Sync
522 ba %xcc, tl0_sfsr_trap
523 mov T_DATA_EXCEPTION, %g2
524 .align 32
525 .endm
526
527 .macro tl0_align
528 wr %g0, ASI_DMMU, %asi
529 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
530 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
531 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
532 membar #Sync
533 ba %xcc, tl0_sfsr_trap
534 mov T_MEM_ADDRESS_NOT_ALIGNED, %g2
535 .align 32
536 .endm
537
538 ENTRY(tl0_sfsr_trap)
539 tl0_split
540 clr %o1
541 set trap, %o2
542 mov %g3, %o4
543 mov %g4, %o5
544 ba %xcc, tl0_utrap
545 mov %g2, %o0
546 END(tl0_sfsr_trap)
547
548 .macro tl0_intr level, mask
549 tl0_split
550 set \mask, %o1
551 ba %xcc, tl0_intr
552 mov \level, %o0
553 .align 32
554 .endm
555
556 #define INTR(level, traplvl) \
557 tl ## traplvl ## _intr level, 1 << level
558
559 #define TICK(traplvl) \
560 tl ## traplvl ## _intr PIL_TICK, 0x10001
561
562 #define INTR_LEVEL(tl) \
563 INTR(1, tl) ; \
564 INTR(2, tl) ; \
565 INTR(3, tl) ; \
566 INTR(4, tl) ; \
567 INTR(5, tl) ; \
568 INTR(6, tl) ; \
569 INTR(7, tl) ; \
570 INTR(8, tl) ; \
571 INTR(9, tl) ; \
572 INTR(10, tl) ; \
573 INTR(11, tl) ; \
574 INTR(12, tl) ; \
575 INTR(13, tl) ; \
576 TICK(tl) ; \
577 INTR(15, tl) ;
578
579 .macro tl0_intr_level
580 INTR_LEVEL(0)
581 .endm
582
583 .macro intr_vector
584 ldxa [%g0] ASI_INTR_RECEIVE, %g1
585 andcc %g1, IRSR_BUSY, %g0
586 bnz,a,pt %xcc, intr_vector
587 nop
588 ba,a,pt %xcc, intr_vector_stray
589 nop
590 .align 32
591 .endm
592
593 .macro tl0_immu_miss
594 /*
595 * Load the context and the virtual page number from the tag access
596 * register. We ignore the context.
597 */
598 wr %g0, ASI_IMMU, %asi
599 ldxa [%g0 + AA_IMMU_TAR] %asi, %g1
600
601 /*
602 * Initialize the page size walker.
603 */
604 mov TS_MIN, %g2
605
606 /*
607 * Loop over all supported page sizes.
608 */
609
610 /*
611 * Compute the page shift for the page size we are currently looking
612 * for.
613 */
614 1: add %g2, %g2, %g3
615 add %g3, %g2, %g3
616 add %g3, PAGE_SHIFT, %g3
617
618 /*
619 * Extract the virtual page number from the contents of the tag
620 * access register.
621 */
622 srlx %g1, %g3, %g3
623
624 /*
625 * Compute the TTE bucket address.
626 */
627 ldxa [%g0 + AA_IMMU_TSB] %asi, %g5
628 and %g3, TSB_BUCKET_MASK, %g4
629 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
630 add %g4, %g5, %g4
631
632 /*
633 * Compute the TTE tag target.
634 */
635 sllx %g3, TV_SIZE_BITS, %g3
636 or %g3, %g2, %g3
637
638 /*
639 * Loop over the TTEs in this bucket.
640 */
641
642 /*
643 * Load the TTE. Note that this instruction may fault, clobbering
644 * the contents of the tag access register, %g5, %g6, and %g7. We
645 * do not use %g5, and %g6 and %g7 are not used until this instruction
646 * completes successfully.
647 */
648 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
649
650 /*
651 * Check that it's valid and executable and that the TTE tags match.
652 */
653 brgez,pn %g7, 3f
654 andcc %g7, TD_EXEC, %g0
655 bz,pn %xcc, 3f
656 cmp %g3, %g6
657 bne,pn %xcc, 3f
658 EMPTY
659
660 /*
661 * We matched a TTE, load the TLB.
662 */
663
664 /*
665 * Set the reference bit, if it's currently clear.
666 */
667 andcc %g7, TD_REF, %g0
668 bz,a,pn %xcc, tl0_immu_miss_set_ref
669 nop
670
671 /*
672 * Load the TTE tag and data into the TLB and retry the instruction.
673 */
674 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
675 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
676 retry
677
678 /*
679 * Advance to the next TTE in this bucket, and check the low bits
680 * of the bucket pointer to see if we've finished the bucket.
681 */
682 3: add %g4, 1 << TTE_SHIFT, %g4
683 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
684 bnz,pt %xcc, 2b
685 EMPTY
686
687 /*
688 * See if we just checked the largest page size, and advance to the
689 * next one if not.
690 */
691 cmp %g2, TS_MAX
692 bne,pt %xcc, 1b
693 add %g2, 1, %g2
694
695 /*
696 * Not in user TSB, call C code.
697 */
698 ba,a %xcc, tl0_immu_miss_trap
699 .align 128
700 .endm
701
702 ENTRY(tl0_immu_miss_set_ref)
703 /*
704 * Set the reference bit.
705 */
706 TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
707
708 /*
709 * May have become invalid during casxa, in which case start over.
710 */
711 brgez,pn %g2, 1f
712 nop
713
714 /*
715 * Load the TTE tag and data into the TLB and retry the instruction.
716 */
717 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
718 stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
719 1: retry
720 END(tl0_immu_miss_set_ref)
721
722 ENTRY(tl0_immu_miss_trap)
723 /*
724 * Put back the contents of the tag access register, in case we
725 * faulted.
726 */
727 sethi %hi(KERNBASE), %g2
728 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
729 flush %g2
730
731 /*
732 * Switch to alternate globals.
733 */
734 wrpr %g0, PSTATE_ALT, %pstate
735
736 /*
737 * Reload the tag access register.
738 */
739 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
740
741 /*
742 * Save the tag access register, and call common trap code.
743 */
744 tl0_split
745 clr %o1
746 set trap, %o2
747 mov %g2, %o3
748 ba %xcc, tl0_utrap
749 mov T_INSTRUCTION_MISS, %o0
750 END(tl0_immu_miss_trap)
751
752 .macro tl0_dmmu_miss
753 /*
754 * Load the context and the virtual page number from the tag access
755 * register. We ignore the context.
756 */
757 wr %g0, ASI_DMMU, %asi
758 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
759
760 /*
761 * Initialize the page size walker.
762 */
763 tl1_dmmu_miss_user:
764 mov TS_MIN, %g2
765
766 /*
767 * Loop over all supported page sizes.
768 */
769
770 /*
771 * Compute the page shift for the page size we are currently looking
772 * for.
773 */
774 1: add %g2, %g2, %g3
775 add %g3, %g2, %g3
776 add %g3, PAGE_SHIFT, %g3
777
778 /*
779 * Extract the virtual page number from the contents of the tag
780 * access register.
781 */
782 srlx %g1, %g3, %g3
783
784 /*
785 * Compute the TTE bucket address.
786 */
787 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
788 and %g3, TSB_BUCKET_MASK, %g4
789 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
790 add %g4, %g5, %g4
791
792 /*
793 * Compute the TTE tag target.
794 */
795 sllx %g3, TV_SIZE_BITS, %g3
796 or %g3, %g2, %g3
797
798 /*
799 * Loop over the TTEs in this bucket.
800 */
801
802 /*
803 * Load the TTE. Note that this instruction may fault, clobbering
804 * the contents of the tag access register, %g5, %g6, and %g7. We
805 * do not use %g5, and %g6 and %g7 are not used until this instruction
806 * completes successfully.
807 */
808 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
809
810 /*
811 * Check that it's valid and that the virtual page numbers match.
812 */
813 brgez,pn %g7, 3f
814 cmp %g3, %g6
815 bne,pn %xcc, 3f
816 EMPTY
817
818 /*
819 * We matched a TTE, load the TLB.
820 */
821
822 /*
823 * Set the reference bit, if it's currently clear.
824 */
825 andcc %g7, TD_REF, %g0
826 bz,a,pn %xcc, tl0_dmmu_miss_set_ref
827 nop
828
829 /*
830 * Load the TTE tag and data into the TLB and retry the instruction.
831 */
832 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
833 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
834 retry
835
836 /*
837 * Advance to the next TTE in this bucket, and check the low bits
838 * of the bucket pointer to see if we've finished the bucket.
839 */
840 3: add %g4, 1 << TTE_SHIFT, %g4
841 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
842 bnz,pt %xcc, 2b
843 EMPTY
844
845 /*
846 * See if we just checked the largest page size, and advance to the
847 * next one if not.
848 */
849 cmp %g2, TS_MAX
850 bne,pt %xcc, 1b
851 add %g2, 1, %g2
852
853 /*
854 * Not in user TSB, call C code.
855 */
856 ba,a %xcc, tl0_dmmu_miss_trap
857 .align 128
858 .endm
859
860 ENTRY(tl0_dmmu_miss_set_ref)
861 /*
862 * Set the reference bit.
863 */
864 TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
865
866 /*
867 * May have become invalid during casxa, in which case start over.
868 */
869 brgez,pn %g2, 1f
870 nop
871
872 /*
873 * Load the TTE tag and data into the TLB and retry the instruction.
874 */
875 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
876 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
877 1: retry
878 END(tl0_dmmu_miss_set_ref)
879
880 ENTRY(tl0_dmmu_miss_trap)
881 /*
882 * Put back the contents of the tag access register, in case we
883 * faulted.
884 */
885 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
886 membar #Sync
887
888 /*
889 * Switch to alternate globals.
890 */
891 wrpr %g0, PSTATE_ALT, %pstate
892
893 /*
894 * Check if we actually came from the kernel.
895 */
896 rdpr %tl, %g1
897 cmp %g1, 1
898 bgt,a,pn %xcc, 1f
899 nop
900
901 /*
902 * Reload the tag access register.
903 */
904 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
905
906 /*
907 * Save the tag access register and call common trap code.
908 */
909 tl0_split
910 clr %o1
911 set trap, %o2
912 mov %g2, %o3
913 ba %xcc, tl0_utrap
914 mov T_DATA_MISS, %o0
915
916 /*
917 * Handle faults during window spill/fill.
918 */
919 1: RESUME_SPILLFILL_MMU
920
921 /*
922 * Reload the tag access register.
923 */
924 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
925
926 tl1_split
927 clr %o1
928 set trap, %o2
929 mov %g2, %o3
930 ba %xcc, tl1_trap
931 mov T_DATA_MISS | T_KERNEL, %o0
932 END(tl0_dmmu_miss_trap)
933
934 .macro tl0_dmmu_prot
935 ba,a %xcc, tl0_dmmu_prot_1
936 nop
937 .align 128
938 .endm
939
940 ENTRY(tl0_dmmu_prot_1)
941 /*
942 * Load the context and the virtual page number from the tag access
943 * register. We ignore the context.
944 */
945 wr %g0, ASI_DMMU, %asi
946 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
947
948 /*
949 * Initialize the page size walker.
950 */
951 tl1_dmmu_prot_user:
952 mov TS_MIN, %g2
953
954 /*
955 * Loop over all supported page sizes.
956 */
957
958 /*
959 * Compute the page shift for the page size we are currently looking
960 * for.
961 */
962 1: add %g2, %g2, %g3
963 add %g3, %g2, %g3
964 add %g3, PAGE_SHIFT, %g3
965
966 /*
967 * Extract the virtual page number from the contents of the tag
968 * access register.
969 */
970 srlx %g1, %g3, %g3
971
972 /*
973 * Compute the TTE bucket address.
974 */
975 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
976 and %g3, TSB_BUCKET_MASK, %g4
977 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
978 add %g4, %g5, %g4
979
980 /*
981 * Compute the TTE tag target.
982 */
983 sllx %g3, TV_SIZE_BITS, %g3
984 or %g3, %g2, %g3
985
986 /*
987 * Loop over the TTEs in this bucket.
988 */
989
990 /*
991 * Load the TTE. Note that this instruction may fault, clobbering
992 * the contents of the tag access register, %g5, %g6, and %g7. We
993 * do not use %g5, and %g6 and %g7 are not used until this instruction
994 * completes successfully.
995 */
996 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
997
998 /*
999 * Check that it's valid and writable and that the virtual page
1000 * numbers match.
1001 */
1002 brgez,pn %g7, 4f
1003 andcc %g7, TD_SW, %g0
1004 bz,pn %xcc, 4f
1005 cmp %g3, %g6
1006 bne,pn %xcc, 4f
1007 nop
1008
1009 /*
1010 * Set the hardware write bit.
1011 */
1012 TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
1013
1014 /*
1015 * Delete the old TLB entry and clear the SFSR.
1016 */
1017 srlx %g1, PAGE_SHIFT, %g3
1018 sllx %g3, PAGE_SHIFT, %g3
1019 stxa %g0, [%g3] ASI_DMMU_DEMAP
1020 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1021 membar #Sync
1022
1023 /*
1024 * May have become invalid during casxa, in which case start over.
1025 */
1026 brgez,pn %g2, 3f
1027 or %g2, TD_W, %g2
1028
1029 /*
1030 * Load the TTE data into the TLB and retry the instruction.
1031 */
1032 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1033 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
1034 3: retry
1035
1036 /*
1037 * Check the low bits to see if we've finished the bucket.
1038 */
1039 4: add %g4, 1 << TTE_SHIFT, %g4
1040 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1041 bnz,pt %xcc, 2b
1042 EMPTY
1043
1044 /*
1045 * See if we just checked the largest page size, and advance to the
1046 * next one if not.
1047 */
1048 cmp %g2, TS_MAX
1049 bne,pt %xcc, 1b
1050 add %g2, 1, %g2
1051
1052 /*
1053 * Not in user TSB, call C code.
1054 */
1055 ba,a %xcc, tl0_dmmu_prot_trap
1056 nop
1057 END(tl0_dmmu_prot_1)
1058
1059 ENTRY(tl0_dmmu_prot_trap)
1060 /*
1061 * Put back the contents of the tag access register, in case we
1062 * faulted.
1063 */
1064 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1065 membar #Sync
1066
1067 /*
1068 * Switch to alternate globals.
1069 */
1070 wrpr %g0, PSTATE_ALT, %pstate
1071
1072 /*
1073 * Check if we actually came from the kernel.
1074 */
1075 rdpr %tl, %g1
1076 cmp %g1, 1
1077 bgt,a,pn %xcc, 1f
1078 nop
1079
1080 /*
1081 * Load the SFAR, SFSR and TAR.
1082 */
1083 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1084 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1085 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1086 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1087 membar #Sync
1088
1089 /*
1090 * Save the MMU registers and call common trap code.
1091 */
1092 tl0_split
1093 clr %o1
1094 set trap, %o2
1095 mov %g2, %o3
1096 mov %g3, %o4
1097 mov %g4, %o5
1098 ba %xcc, tl0_utrap
1099 mov T_DATA_PROTECTION, %o0
1100
1101 /*
1102 * Handle faults during window spill/fill.
1103 */
1104 1: RESUME_SPILLFILL_MMU_CLR_SFSR
1105
1106 /*
1107 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1108 */
1109 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1110 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1111 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1112 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1113 membar #Sync
1114
1115 tl1_split
1116 clr %o1
1117 set trap, %o2
1118 mov %g2, %o3
1119 mov %g3, %o4
1120 mov %g4, %o5
1121 ba %xcc, tl1_trap
1122 mov T_DATA_PROTECTION | T_KERNEL, %o0
1123 END(tl0_dmmu_prot_trap)
1124
1125 .macro tl0_spill_0_n
1126 wr %g0, ASI_AIUP, %asi
1127 SPILL(stxa, %sp + SPOFF, 8, %asi)
1128 saved
1129 retry
1130 .align 32
1131 RSF_TRAP(T_SPILL)
1132 RSF_TRAP(T_SPILL)
1133 .endm
1134
1135 .macro tl0_spill_1_n
1136 wr %g0, ASI_AIUP, %asi
1137 SPILL(stwa, %sp, 4, %asi)
1138 saved
1139 retry
1140 .align 32
1141 RSF_TRAP(T_SPILL)
1142 RSF_TRAP(T_SPILL)
1143 .endm
1144
1145 .macro tl0_fill_0_n
1146 wr %g0, ASI_AIUP, %asi
1147 FILL(ldxa, %sp + SPOFF, 8, %asi)
1148 restored
1149 retry
1150 .align 32
1151 RSF_TRAP(T_FILL)
1152 RSF_TRAP(T_FILL)
1153 .endm
1154
1155 .macro tl0_fill_1_n
1156 wr %g0, ASI_AIUP, %asi
1157 FILL(lduwa, %sp, 4, %asi)
1158 restored
1159 retry
1160 .align 32
1161 RSF_TRAP(T_FILL)
1162 RSF_TRAP(T_FILL)
1163 .endm
1164
1165 ENTRY(tl0_sftrap)
1166 rdpr %tstate, %g1
1167 and %g1, TSTATE_CWP_MASK, %g1
1168 wrpr %g1, 0, %cwp
1169 tl0_split
1170 clr %o1
1171 set trap, %o2
1172 ba %xcc, tl0_trap
1173 mov %g2, %o0
1174 END(tl0_sftrap)
1175
1176 .macro tl0_spill_bad count
1177 .rept \count
1178 sir
1179 .align 128
1180 .endr
1181 .endm
1182
1183 .macro tl0_fill_bad count
1184 .rept \count
1185 sir
1186 .align 128
1187 .endr
1188 .endm
1189
1190 .macro tl0_syscall
1191 tl0_split
1192 clr %o1
1193 set syscall, %o2
1194 ba %xcc, tl0_trap
1195 mov T_SYSCALL, %o0
1196 .align 32
1197 .endm
1198
1199 .macro tl0_fp_restore
1200 ba,a %xcc, tl0_fp_restore
1201 nop
1202 .align 32
1203 .endm
1204
1205 ENTRY(tl0_fp_restore)
1206 ldx [PCB_REG + PCB_FLAGS], %g1
1207 andn %g1, PCB_FEF, %g1
1208 stx %g1, [PCB_REG + PCB_FLAGS]
1209
1210 wr %g0, FPRS_FEF, %fprs
1211 wr %g0, ASI_BLK_S, %asi
1212 ldda [PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1213 ldda [PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1214 ldda [PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1215 ldda [PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1216 membar #Sync
1217 done
1218 END(tl0_fp_restore)
1219
1220 .macro tl1_insn_excptn
1221 wrpr %g0, PSTATE_ALT, %pstate
1222 wr %g0, ASI_IMMU, %asi
1223 rdpr %tpc, %g3
1224 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
1225 /*
1226 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1227 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
1228 * this triggers a RED state exception though.
1229 */
1230 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
1231 membar #Sync
1232 ba %xcc, tl1_insn_exceptn_trap
1233 mov T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1234 .align 32
1235 .endm
1236
1237 ENTRY(tl1_insn_exceptn_trap)
1238 tl1_split
1239 clr %o1
1240 set trap, %o2
1241 mov %g3, %o4
1242 mov %g4, %o5
1243 ba %xcc, tl1_trap
1244 mov %g2, %o0
1245 END(tl1_insn_exceptn_trap)
1246
1247 .macro tl1_fp_disabled
1248 ba,a %xcc, tl1_fp_disabled_1
1249 nop
1250 .align 32
1251 .endm
1252
1253 ENTRY(tl1_fp_disabled_1)
1254 rdpr %tpc, %g1
1255 set fpu_fault_begin, %g2
1256 sub %g1, %g2, %g1
1257 cmp %g1, fpu_fault_size
1258 bgeu,a,pn %xcc, 1f
1259 nop
1260
1261 wr %g0, FPRS_FEF, %fprs
1262 wr %g0, ASI_BLK_S, %asi
1263 ldda [PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1264 ldda [PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1265 ldda [PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1266 ldda [PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1267 membar #Sync
1268 retry
1269
1270 1: tl1_split
1271 clr %o1
1272 set trap, %o2
1273 ba %xcc, tl1_trap
1274 mov T_FP_DISABLED | T_KERNEL, %o0
1275 END(tl1_fp_disabled_1)
1276
1277 .macro tl1_data_excptn
1278 wrpr %g0, PSTATE_ALT, %pstate
1279 ba,a %xcc, tl1_data_excptn_trap
1280 nop
1281 .align 32
1282 .endm
1283
1284 ENTRY(tl1_data_excptn_trap)
1285 RESUME_SPILLFILL_MMU_CLR_SFSR
1286 ba %xcc, tl1_sfsr_trap
1287 mov T_DATA_EXCEPTION | T_KERNEL, %g2
1288 END(tl1_data_excptn_trap)
1289
1290 .macro tl1_align
1291 wrpr %g0, PSTATE_ALT, %pstate
1292 ba,a %xcc, tl1_align_trap
1293 nop
1294 .align 32
1295 .endm
1296
1297 ENTRY(tl1_align_trap)
1298 RESUME_SPILLFILL_ALIGN
1299 ba %xcc, tl1_sfsr_trap
1300 mov T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1301 END(tl1_align_trap)
1302
1303 ENTRY(tl1_sfsr_trap)
1304 wr %g0, ASI_DMMU, %asi
1305 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1306 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1307 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1308 membar #Sync
1309
1310 tl1_split
1311 clr %o1
1312 set trap, %o2
1313 mov %g3, %o4
1314 mov %g4, %o5
1315 ba %xcc, tl1_trap
1316 mov %g2, %o0
1317 END(tl1_sfsr_trap)
1318
1319 .macro tl1_intr level, mask
1320 tl1_split
1321 set \mask, %o1
1322 ba %xcc, tl1_intr
1323 mov \level, %o0
1324 .align 32
1325 .endm
1326
1327 .macro tl1_intr_level
1328 INTR_LEVEL(1)
1329 .endm
1330
1331 .macro tl1_immu_miss
1332 /*
1333 * Load the context and the virtual page number from the tag access
1334 * register. We ignore the context.
1335 */
1336 wr %g0, ASI_IMMU, %asi
1337 ldxa [%g0 + AA_IMMU_TAR] %asi, %g5
1338
1339 /*
1340 * Compute the address of the TTE. The TSB mask and address of the
1341 * TSB are patched at startup.
1342 */
1343 .globl tl1_immu_miss_patch_tsb_1
1344 tl1_immu_miss_patch_tsb_1:
1345 sethi %uhi(TSB_KERNEL), %g6
1346 or %g6, %ulo(TSB_KERNEL), %g6
1347 sllx %g6, 32, %g6
1348 sethi %hi(TSB_KERNEL), %g7
1349 or %g7, %g6, %g7
1350 .globl tl1_immu_miss_patch_tsb_mask_1
1351 tl1_immu_miss_patch_tsb_mask_1:
1352 sethi %hi(TSB_KERNEL_MASK), %g6
1353 or %g6, %lo(TSB_KERNEL_MASK), %g6
1354
1355 srlx %g5, TAR_VPN_SHIFT, %g5
1356 and %g5, %g6, %g6
1357 sllx %g6, TTE_SHIFT, %g6
1358 add %g6, %g7, %g6
1359
1360 /*
1361 * Load the TTE.
1362 */
1363 .globl tl1_immu_miss_patch_quad_ldd_1
1364 tl1_immu_miss_patch_quad_ldd_1:
1365 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1366
1367 /*
1368 * Check that it's valid and executable and that the virtual page
1369 * numbers match.
1370 */
1371 brgez,pn %g7, tl1_immu_miss_trap
1372 andcc %g7, TD_EXEC, %g0
1373 bz,pn %xcc, tl1_immu_miss_trap
1374 srlx %g6, TV_SIZE_BITS, %g6
1375 cmp %g5, %g6
1376 bne,pn %xcc, tl1_immu_miss_trap
1377 EMPTY
1378
1379 /*
1380 * Set the reference bit if it's currently clear.
1381 */
1382 andcc %g7, TD_REF, %g0
1383 bz,a,pn %xcc, tl1_immu_miss_set_ref
1384 nop
1385
1386 /*
1387 * Load the TTE data into the TLB and retry the instruction.
1388 */
1389 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
1390 retry
1391 .align 128
1392 .endm
1393
1394 ENTRY(tl1_immu_miss_set_ref)
1395 /*
1396 * Recompute the TTE address, which we clobbered loading the TTE.
1397 * The TSB mask and address of the TSB are patched at startup.
1398 */
1399 .globl tl1_immu_miss_patch_tsb_2
1400 tl1_immu_miss_patch_tsb_2:
1401 sethi %uhi(TSB_KERNEL), %g6
1402 or %g6, %ulo(TSB_KERNEL), %g6
1403 sllx %g6, 32, %g6
1404 sethi %hi(TSB_KERNEL), %g7
1405 or %g7, %g6, %g7
1406 .globl tl1_immu_miss_patch_tsb_mask_2
1407 tl1_immu_miss_patch_tsb_mask_2:
1408 sethi %hi(TSB_KERNEL_MASK), %g6
1409 or %g6, %lo(TSB_KERNEL_MASK), %g6
1410
1411 and %g5, %g6, %g5
1412 sllx %g5, TTE_SHIFT, %g5
1413 add %g5, %g7, %g5
1414
1415 /*
1416 * Set the reference bit.
1417 */
1418 .globl tl1_immu_miss_patch_asi_1
1419 tl1_immu_miss_patch_asi_1:
1420 wr %g0, TSB_ASI, %asi
1421 TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1422
1423 /*
1424 * May have become invalid during casxa, in which case start over.
1425 */
1426 brgez,pn %g6, 1f
1427 nop
1428
1429 /*
1430 * Load the TTE data into the TLB and retry the instruction.
1431 */
1432 stxa %g6, [%g0] ASI_ITLB_DATA_IN_REG
1433 1: retry
1434 END(tl1_immu_miss_set_ref)
1435
1436 ENTRY(tl1_immu_miss_trap)
1437 /*
1438 * Switch to alternate globals.
1439 */
1440 wrpr %g0, PSTATE_ALT, %pstate
1441
1442 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
1443
1444 tl1_split
1445 clr %o1
1446 set trap, %o2
1447 mov %g2, %o3
1448 ba %xcc, tl1_trap
1449 mov T_INSTRUCTION_MISS | T_KERNEL, %o0
1450 END(tl1_immu_miss_trap)
1451
1452 .macro tl1_dmmu_miss
1453 /*
1454 * Load the context and the virtual page number from the tag access
1455 * register.
1456 */
1457 wr %g0, ASI_DMMU, %asi
1458 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1459
1460 /*
1461 * Extract the context from the contents of the tag access register.
1462 * If it's non-zero this is a fault on a user address. Note that the
1463 * faulting address is passed in %g1.
1464 */
1465 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1466 brnz,a,pn %g6, tl1_dmmu_miss_user
1467 mov %g5, %g1
1468
1469 /*
1470 * Check for the direct mapped physical region. These addresses have
1471 * the high bit set so they are negative.
1472 */
1473 brlz,pn %g5, tl1_dmmu_miss_direct
1474 EMPTY
1475
1476 /*
1477 * Compute the address of the TTE. The TSB mask and address of the
1478 * TSB are patched at startup.
1479 */
1480 .globl tl1_dmmu_miss_patch_tsb_1
1481 tl1_dmmu_miss_patch_tsb_1:
1482 sethi %uhi(TSB_KERNEL), %g6
1483 or %g6, %ulo(TSB_KERNEL), %g6
1484 sllx %g6, 32, %g6
1485 sethi %hi(TSB_KERNEL), %g7
1486 or %g7, %g6, %g7
1487 .globl tl1_dmmu_miss_patch_tsb_mask_1
1488 tl1_dmmu_miss_patch_tsb_mask_1:
1489 sethi %hi(TSB_KERNEL_MASK), %g6
1490 or %g6, %lo(TSB_KERNEL_MASK), %g6
1491
1492 srlx %g5, TAR_VPN_SHIFT, %g5
1493 and %g5, %g6, %g6
1494 sllx %g6, TTE_SHIFT, %g6
1495 add %g6, %g7, %g6
1496
1497 /*
1498 * Load the TTE.
1499 */
1500 .globl tl1_dmmu_miss_patch_quad_ldd_1
1501 tl1_dmmu_miss_patch_quad_ldd_1:
1502 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1503
1504 /*
1505 * Check that it's valid and that the virtual page numbers match.
1506 */
1507 brgez,pn %g7, tl1_dmmu_miss_trap
1508 srlx %g6, TV_SIZE_BITS, %g6
1509 cmp %g5, %g6
1510 bne,pn %xcc, tl1_dmmu_miss_trap
1511 EMPTY
1512
1513 /*
1514 * Set the reference bit if it's currently clear.
1515 */
1516 andcc %g7, TD_REF, %g0
1517 bz,a,pt %xcc, tl1_dmmu_miss_set_ref
1518 nop
1519
1520 /*
1521 * Load the TTE data into the TLB and retry the instruction.
1522 */
1523 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
1524 retry
1525 .align 128
1526 .endm
1527
1528 ENTRY(tl1_dmmu_miss_set_ref)
1529 /*
1530 * Recompute the TTE address, which we clobbered loading the TTE.
1531 * The TSB mask and address of the TSB are patched at startup.
1532 */
1533 .globl tl1_dmmu_miss_patch_tsb_mask_2
1534 tl1_dmmu_miss_patch_tsb_2:
1535 sethi %uhi(TSB_KERNEL), %g6
1536 or %g6, %ulo(TSB_KERNEL), %g6
1537 sllx %g6, 32, %g6
1538 sethi %hi(TSB_KERNEL), %g7
1539 or %g7, %g6, %g7
1540 .globl tl1_dmmu_miss_patch_tsb_2
1541 tl1_dmmu_miss_patch_tsb_mask_2:
1542 sethi %hi(TSB_KERNEL_MASK), %g6
1543 or %g6, %lo(TSB_KERNEL_MASK), %g6
1544
1545 and %g5, %g6, %g5
1546 sllx %g5, TTE_SHIFT, %g5
1547 add %g5, %g7, %g5
1548
1549 /*
1550 * Set the reference bit.
1551 */
1552 .globl tl1_dmmu_miss_patch_asi_1
1553 tl1_dmmu_miss_patch_asi_1:
1554 wr %g0, TSB_ASI, %asi
1555 TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1556
1557 /*
1558 * May have become invalid during casxa, in which case start over.
1559 */
1560 brgez,pn %g6, 1f
1561 nop
1562
1563 /*
1564 * Load the TTE data into the TLB and retry the instruction.
1565 */
1566 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1567 1: retry
1568 END(tl1_dmmu_miss_set_ref)
1569
1570 ENTRY(tl1_dmmu_miss_trap)
1571 /*
1572 * Switch to alternate globals.
1573 */
1574 wrpr %g0, PSTATE_ALT, %pstate
1575
1576 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1577
1578 KSTACK_CHECK
1579
1580 tl1_split
1581 clr %o1
1582 set trap, %o2
1583 mov %g2, %o3
1584 ba %xcc, tl1_trap
1585 mov T_DATA_MISS | T_KERNEL, %o0
1586 END(tl1_dmmu_miss_trap)
1587
1588 ENTRY(tl1_dmmu_miss_direct)
1589 /*
1590 * Mask off the high bits of the virtual address to get the physical
1591 * address, and or in the TTE bits. The virtual address bits that
1592 * correspond to the TTE valid and page size bits are left set, so
1593 * they don't have to be included in the TTE bits below. We know they
1594 * are set because the virtual address is in the upper va hole.
1595 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1596 * and we get a miss on the directly accessed kernel TSB we must not
1597 * set TD_CV in order to access it uniformly bypassing the D$.
1598 */
1599 setx TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1600 and %g5, %g4, %g4
1601 setx TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1602 and %g5, %g6, %g5
1603 .globl tl1_dmmu_miss_direct_patch_tsb_phys_1
1604 tl1_dmmu_miss_direct_patch_tsb_phys_1:
1605 sethi %uhi(TSB_KERNEL_PHYS), %g3
1606 or %g3, %ulo(TSB_KERNEL_PHYS), %g3
1607 sllx %g3, 32, %g3
1608 sethi %hi(TSB_KERNEL_PHYS), %g3
1609 or %g7, %g3, %g7
1610 cmp %g4, %g7
1611 bl,pt %xcc, 1f
1612 or %g5, TD_CP | TD_W, %g5
1613 .globl tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1614 tl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1615 sethi %uhi(TSB_KERNEL_PHYS_END), %g3
1616 or %g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1617 sllx %g3, 32, %g3
1618 sethi %hi(TSB_KERNEL_PHYS_END), %g7
1619 or %g7, %g3, %g7
1620 cmp %g4, %g7
1621 bg,a,pt %xcc, 1f
1622 nop
1623 ba,pt %xcc, 2f
1624 nop
1625 1: or %g5, TD_CV, %g5
1626
1627 /*
1628 * Load the TTE data into the TLB and retry the instruction.
1629 */
1630 2: stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
1631 retry
1632 END(tl1_dmmu_miss_direct)
1633
1634 .macro tl1_dmmu_prot
1635 ba,a %xcc, tl1_dmmu_prot_1
1636 nop
1637 .align 128
1638 .endm
1639
1640 ENTRY(tl1_dmmu_prot_1)
1641 /*
1642 * Load the context and the virtual page number from the tag access
1643 * register.
1644 */
1645 wr %g0, ASI_DMMU, %asi
1646 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1647
1648 /*
1649 * Extract the context from the contents of the tag access register.
1650 * If it's non-zero this is a fault on a user address. Note that the
1651 * faulting address is passed in %g1.
1652 */
1653 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1654 brnz,a,pn %g6, tl1_dmmu_prot_user
1655 mov %g5, %g1
1656
1657 /*
1658 * Compute the address of the TTE. The TSB mask and address of the
1659 * TSB are patched at startup.
1660 */
1661 .globl tl1_dmmu_prot_patch_tsb_1
1662 tl1_dmmu_prot_patch_tsb_1:
1663 sethi %uhi(TSB_KERNEL), %g6
1664 or %g6, %ulo(TSB_KERNEL), %g6
1665 sllx %g6, 32, %g6
1666 sethi %hi(TSB_KERNEL), %g7
1667 or %g7, %g6, %g7
1668 .globl tl1_dmmu_prot_patch_tsb_mask_1
1669 tl1_dmmu_prot_patch_tsb_mask_1:
1670 sethi %hi(TSB_KERNEL_MASK), %g6
1671 or %g6, %lo(TSB_KERNEL_MASK), %g6
1672
1673 srlx %g5, TAR_VPN_SHIFT, %g5
1674 and %g5, %g6, %g6
1675 sllx %g6, TTE_SHIFT, %g6
1676 add %g6, %g7, %g6
1677
1678 /*
1679 * Load the TTE.
1680 */
1681 .globl tl1_dmmu_prot_patch_quad_ldd_1
1682 tl1_dmmu_prot_patch_quad_ldd_1:
1683 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1684
1685 /*
1686 * Check that it's valid and writeable and that the virtual page
1687 * numbers match.
1688 */
1689 brgez,pn %g7, tl1_dmmu_prot_trap
1690 andcc %g7, TD_SW, %g0
1691 bz,pn %xcc, tl1_dmmu_prot_trap
1692 srlx %g6, TV_SIZE_BITS, %g6
1693 cmp %g5, %g6
1694 bne,pn %xcc, tl1_dmmu_prot_trap
1695 EMPTY
1696
1697 /*
1698 * Delete the old TLB entry and clear the SFSR.
1699 */
1700 sllx %g5, TAR_VPN_SHIFT, %g6
1701 or %g6, TLB_DEMAP_NUCLEUS, %g6
1702 stxa %g0, [%g6] ASI_DMMU_DEMAP
1703 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1704 membar #Sync
1705
1706 /*
1707 * Recompute the TTE address, which we clobbered loading the TTE.
1708 * The TSB mask and address of the TSB are patched at startup.
1709 */
1710 .globl tl1_dmmu_prot_patch_tsb_2
1711 tl1_dmmu_prot_patch_tsb_2:
1712 sethi %uhi(TSB_KERNEL), %g6
1713 or %g6, %ulo(TSB_KERNEL), %g6
1714 sllx %g6, 32, %g6
1715 sethi %hi(TSB_KERNEL), %g7
1716 or %g7, %g6, %g7
1717 .globl tl1_dmmu_prot_patch_tsb_mask_2
1718 tl1_dmmu_prot_patch_tsb_mask_2:
1719 sethi %hi(TSB_KERNEL_MASK), %g6
1720 or %g6, %lo(TSB_KERNEL_MASK), %g6
1721 and %g5, %g6, %g5
1722 sllx %g5, TTE_SHIFT, %g5
1723 add %g5, %g7, %g5
1724
1725 /*
1726 * Set the hardware write bit.
1727 */
1728 .globl tl1_dmmu_prot_patch_asi_1
1729 tl1_dmmu_prot_patch_asi_1:
1730 wr %g0, TSB_ASI, %asi
1731 TTE_SET_W(%g5, %g6, %g7, a, %asi)
1732
1733 /*
1734 * May have become invalid during casxa, in which case start over.
1735 */
1736 brgez,pn %g6, 1f
1737 or %g6, TD_W, %g6
1738
1739 /*
1740 * Load the TTE data into the TLB and retry the instruction.
1741 */
1742 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1743 1: retry
1744 END(tl1_dmmu_prot_1)
1745
1746 ENTRY(tl1_dmmu_prot_trap)
1747 /*
1748 * Switch to alternate globals.
1749 */
1750 wrpr %g0, PSTATE_ALT, %pstate
1751
1752 /*
1753 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1754 */
1755 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1756 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1757 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1758 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1759 membar #Sync
1760
1761 tl1_split
1762 clr %o1
1763 set trap, %o2
1764 mov %g2, %o3
1765 mov %g3, %o4
1766 mov %g4, %o5
1767 ba %xcc, tl1_trap
1768 mov T_DATA_PROTECTION | T_KERNEL, %o0
1769 END(tl1_dmmu_prot_trap)
1770
1771 .macro tl1_spill_0_n
1772 SPILL(stx, %sp + SPOFF, 8, EMPTY)
1773 saved
1774 retry
1775 .align 32
1776 RSF_FATAL(T_SPILL)
1777 RSF_FATAL(T_SPILL)
1778 .endm
1779
1780 .macro tl1_spill_2_n
1781 wr %g0, ASI_AIUP, %asi
1782 SPILL(stxa, %sp + SPOFF, 8, %asi)
1783 saved
1784 retry
1785 .align 32
1786 RSF_SPILL_TOPCB
1787 RSF_SPILL_TOPCB
1788 .endm
1789
1790 .macro tl1_spill_3_n
1791 wr %g0, ASI_AIUP, %asi
1792 SPILL(stwa, %sp, 4, %asi)
1793 saved
1794 retry
1795 .align 32
1796 RSF_SPILL_TOPCB
1797 RSF_SPILL_TOPCB
1798 .endm
1799
1800 .macro tl1_spill_7_n
1801 btst 1, %sp
1802 bnz,a,pn %xcc, tl1_spill_0_n
1803 nop
1804 srl %sp, 0, %sp
1805 SPILL(stw, %sp, 4, EMPTY)
1806 saved
1807 retry
1808 .align 32
1809 RSF_FATAL(T_SPILL)
1810 RSF_FATAL(T_SPILL)
1811 .endm
1812
1813 .macro tl1_spill_0_o
1814 wr %g0, ASI_AIUP, %asi
1815 SPILL(stxa, %sp + SPOFF, 8, %asi)
1816 saved
1817 retry
1818 .align 32
1819 RSF_SPILL_TOPCB
1820 RSF_SPILL_TOPCB
1821 .endm
1822
1823 .macro tl1_spill_1_o
1824 wr %g0, ASI_AIUP, %asi
1825 SPILL(stwa, %sp, 4, %asi)
1826 saved
1827 retry
1828 .align 32
1829 RSF_SPILL_TOPCB
1830 RSF_SPILL_TOPCB
1831 .endm
1832
1833 .macro tl1_spill_2_o
1834 RSF_SPILL_TOPCB
1835 .align 128
1836 .endm
1837
1838 .macro tl1_fill_0_n
1839 FILL(ldx, %sp + SPOFF, 8, EMPTY)
1840 restored
1841 retry
1842 .align 32
1843 RSF_FATAL(T_FILL)
1844 RSF_FATAL(T_FILL)
1845 .endm
1846
1847 .macro tl1_fill_2_n
1848 wr %g0, ASI_AIUP, %asi
1849 FILL(ldxa, %sp + SPOFF, 8, %asi)
1850 restored
1851 retry
1852 .align 32
1853 RSF_FILL_MAGIC
1854 RSF_FILL_MAGIC
1855 .endm
1856
1857 .macro tl1_fill_3_n
1858 wr %g0, ASI_AIUP, %asi
1859 FILL(lduwa, %sp, 4, %asi)
1860 restored
1861 retry
1862 .align 32
1863 RSF_FILL_MAGIC
1864 RSF_FILL_MAGIC
1865 .endm
1866
1867 .macro tl1_fill_7_n
1868 btst 1, %sp
1869 bnz,a,pt %xcc, tl1_fill_0_n
1870 nop
1871 srl %sp, 0, %sp
1872 FILL(lduw, %sp, 4, EMPTY)
1873 restored
1874 retry
1875 .align 32
1876 RSF_FATAL(T_FILL)
1877 RSF_FATAL(T_FILL)
1878 .endm
1879
1880 /*
1881 * This is used to spill windows that are still occupied with user
1882 * data on kernel entry to the pcb.
1883 */
1884 ENTRY(tl1_spill_topcb)
1885 wrpr %g0, PSTATE_ALT, %pstate
1886
1887 /* Free some globals for our use. */
1888 dec 24, ASP_REG
1889 stx %g1, [ASP_REG + 0]
1890 stx %g2, [ASP_REG + 8]
1891 stx %g3, [ASP_REG + 16]
1892
1893 ldx [PCB_REG + PCB_NSAVED], %g1
1894
1895 sllx %g1, PTR_SHIFT, %g2
1896 add %g2, PCB_REG, %g2
1897 stx %sp, [%g2 + PCB_RWSP]
1898
1899 sllx %g1, RW_SHIFT, %g2
1900 add %g2, PCB_REG, %g2
1901 SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1902
1903 inc %g1
1904 stx %g1, [PCB_REG + PCB_NSAVED]
1905
1906 #if KTR_COMPILE & KTR_TRAP
1907 CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1908 , %g1, %g2, %g3, 7, 8, 9)
1909 rdpr %tpc, %g2
1910 stx %g2, [%g1 + KTR_PARM1]
1911 rdpr %tnpc, %g2
1912 stx %g2, [%g1 + KTR_PARM2]
1913 stx %sp, [%g1 + KTR_PARM3]
1914 ldx [PCB_REG + PCB_NSAVED], %g2
1915 stx %g2, [%g1 + KTR_PARM4]
1916 9:
1917 #endif
1918
1919 saved
1920
1921 ldx [ASP_REG + 16], %g3
1922 ldx [ASP_REG + 8], %g2
1923 ldx [ASP_REG + 0], %g1
1924 inc 24, ASP_REG
1925 retry
1926 END(tl1_spill_topcb)
1927
1928 .macro tl1_spill_bad count
1929 .rept \count
1930 sir
1931 .align 128
1932 .endr
1933 .endm
1934
1935 .macro tl1_fill_bad count
1936 .rept \count
1937 sir
1938 .align 128
1939 .endr
1940 .endm
1941
1942 .macro tl1_soft count
1943 .rept \count
1944 tl1_gen T_SOFT | T_KERNEL
1945 .endr
1946 .endm
1947
1948 .sect .trap
1949 .globl tl_trap_begin
1950 tl_trap_begin:
1951 nop
1952
1953 .align 0x8000
1954 .globl tl0_base
1955
1956 tl0_base:
1957 tl0_reserved 8 ! 0x0-0x7
1958 tl0_insn_excptn:
1959 tl0_insn_excptn ! 0x8
1960 tl0_reserved 1 ! 0x9
1961 tl0_insn_error:
1962 tl0_gen T_INSTRUCTION_ERROR ! 0xa
1963 tl0_reserved 5 ! 0xb-0xf
1964 tl0_insn_illegal:
1965 tl0_gen T_ILLEGAL_INSTRUCTION ! 0x10
1966 tl0_priv_opcode:
1967 tl0_gen T_PRIVILEGED_OPCODE ! 0x11
1968 tl0_reserved 14 ! 0x12-0x1f
1969 tl0_fp_disabled:
1970 tl0_gen T_FP_DISABLED ! 0x20
1971 tl0_fp_ieee:
1972 tl0_gen T_FP_EXCEPTION_IEEE_754 ! 0x21
1973 tl0_fp_other:
1974 tl0_gen T_FP_EXCEPTION_OTHER ! 0x22
1975 tl0_tag_ovflw:
1976 tl0_gen T_TAG_OVERFLOW ! 0x23
1977 tl0_clean_window:
1978 clean_window ! 0x24
1979 tl0_divide:
1980 tl0_gen T_DIVISION_BY_ZERO ! 0x28
1981 tl0_reserved 7 ! 0x29-0x2f
1982 tl0_data_excptn:
1983 tl0_data_excptn ! 0x30
1984 tl0_reserved 1 ! 0x31
1985 tl0_data_error:
1986 tl0_gen T_DATA_ERROR ! 0x32
1987 tl0_reserved 1 ! 0x33
1988 tl0_align:
1989 tl0_align ! 0x34
1990 tl0_align_lddf:
1991 tl0_gen T_RESERVED ! 0x35
1992 tl0_align_stdf:
1993 tl0_gen T_RESERVED ! 0x36
1994 tl0_priv_action:
1995 tl0_gen T_PRIVILEGED_ACTION ! 0x37
1996 tl0_reserved 9 ! 0x38-0x40
1997 tl0_intr_level:
1998 tl0_intr_level ! 0x41-0x4f
1999 tl0_reserved 16 ! 0x50-0x5f
2000 tl0_intr_vector:
2001 intr_vector ! 0x60
2002 tl0_watch_phys:
2003 tl0_gen T_PA_WATCHPOINT ! 0x61
2004 tl0_watch_virt:
2005 tl0_gen T_VA_WATCHPOINT ! 0x62
2006 tl0_ecc:
2007 tl0_gen T_CORRECTED_ECC_ERROR ! 0x63
2008 tl0_immu_miss:
2009 tl0_immu_miss ! 0x64
2010 tl0_dmmu_miss:
2011 tl0_dmmu_miss ! 0x68
2012 tl0_dmmu_prot:
2013 tl0_dmmu_prot ! 0x6c
2014 tl0_reserved 16 ! 0x70-0x7f
2015 tl0_spill_0_n:
2016 tl0_spill_0_n ! 0x80
2017 tl0_spill_1_n:
2018 tl0_spill_1_n ! 0x84
2019 tl0_spill_bad 14 ! 0x88-0xbf
2020 tl0_fill_0_n:
2021 tl0_fill_0_n ! 0xc0
2022 tl0_fill_1_n:
2023 tl0_fill_1_n ! 0xc4
2024 tl0_fill_bad 14 ! 0xc8-0xff
2025 tl0_soft:
2026 tl0_gen T_SYSCALL ! 0x100
2027 tl0_gen T_BREAKPOINT ! 0x101
2028 tl0_gen T_DIVISION_BY_ZERO ! 0x102
2029 tl0_reserved 1 ! 0x103
2030 tl0_gen T_CLEAN_WINDOW ! 0x104
2031 tl0_gen T_RANGE_CHECK ! 0x105
2032 tl0_gen T_FIX_ALIGNMENT ! 0x106
2033 tl0_gen T_INTEGER_OVERFLOW ! 0x107
2034 tl0_gen T_SYSCALL ! 0x108
2035 tl0_gen T_SYSCALL ! 0x109
2036 tl0_fp_restore ! 0x10a
2037 tl0_reserved 5 ! 0x10b-0x10f
2038 tl0_gen T_TRAP_INSTRUCTION_16 ! 0x110
2039 tl0_gen T_TRAP_INSTRUCTION_17 ! 0x111
2040 tl0_gen T_TRAP_INSTRUCTION_18 ! 0x112
2041 tl0_gen T_TRAP_INSTRUCTION_19 ! 0x113
2042 tl0_gen T_TRAP_INSTRUCTION_20 ! 0x114
2043 tl0_gen T_TRAP_INSTRUCTION_21 ! 0x115
2044 tl0_gen T_TRAP_INSTRUCTION_22 ! 0x116
2045 tl0_gen T_TRAP_INSTRUCTION_23 ! 0x117
2046 tl0_gen T_TRAP_INSTRUCTION_24 ! 0x118
2047 tl0_gen T_TRAP_INSTRUCTION_25 ! 0x119
2048 tl0_gen T_TRAP_INSTRUCTION_26 ! 0x11a
2049 tl0_gen T_TRAP_INSTRUCTION_27 ! 0x11b
2050 tl0_gen T_TRAP_INSTRUCTION_28 ! 0x11c
2051 tl0_gen T_TRAP_INSTRUCTION_29 ! 0x11d
2052 tl0_gen T_TRAP_INSTRUCTION_30 ! 0x11e
2053 tl0_gen T_TRAP_INSTRUCTION_31 ! 0x11f
2054 tl0_reserved 32 ! 0x120-0x13f
2055 tl0_gen T_SYSCALL ! 0x140
2056 tl0_syscall ! 0x141
2057 tl0_gen T_SYSCALL ! 0x142
2058 tl0_gen T_SYSCALL ! 0x143
2059 tl0_reserved 188 ! 0x144-0x1ff
2060
2061 tl1_base:
2062 tl1_reserved 8 ! 0x200-0x207
2063 tl1_insn_excptn:
2064 tl1_insn_excptn ! 0x208
2065 tl1_reserved 1 ! 0x209
2066 tl1_insn_error:
2067 tl1_gen T_INSTRUCTION_ERROR ! 0x20a
2068 tl1_reserved 5 ! 0x20b-0x20f
2069 tl1_insn_illegal:
2070 tl1_gen T_ILLEGAL_INSTRUCTION ! 0x210
2071 tl1_priv_opcode:
2072 tl1_gen T_PRIVILEGED_OPCODE ! 0x211
2073 tl1_reserved 14 ! 0x212-0x21f
2074 tl1_fp_disabled:
2075 tl1_fp_disabled ! 0x220
2076 tl1_fp_ieee:
2077 tl1_gen T_FP_EXCEPTION_IEEE_754 ! 0x221
2078 tl1_fp_other:
2079 tl1_gen T_FP_EXCEPTION_OTHER ! 0x222
2080 tl1_tag_ovflw:
2081 tl1_gen T_TAG_OVERFLOW ! 0x223
2082 tl1_clean_window:
2083 clean_window ! 0x224
2084 tl1_divide:
2085 tl1_gen T_DIVISION_BY_ZERO ! 0x228
2086 tl1_reserved 7 ! 0x229-0x22f
2087 tl1_data_excptn:
2088 tl1_data_excptn ! 0x230
2089 tl1_reserved 1 ! 0x231
2090 tl1_data_error:
2091 tl1_gen T_DATA_ERROR ! 0x232
2092 tl1_reserved 1 ! 0x233
2093 tl1_align:
2094 tl1_align ! 0x234
2095 tl1_align_lddf:
2096 tl1_gen T_RESERVED ! 0x235
2097 tl1_align_stdf:
2098 tl1_gen T_RESERVED ! 0x236
2099 tl1_priv_action:
2100 tl1_gen T_PRIVILEGED_ACTION ! 0x237
2101 tl1_reserved 9 ! 0x238-0x240
2102 tl1_intr_level:
2103 tl1_intr_level ! 0x241-0x24f
2104 tl1_reserved 16 ! 0x250-0x25f
2105 tl1_intr_vector:
2106 intr_vector ! 0x260
2107 tl1_watch_phys:
2108 tl1_gen T_PA_WATCHPOINT ! 0x261
2109 tl1_watch_virt:
2110 tl1_gen T_VA_WATCHPOINT ! 0x262
2111 tl1_ecc:
2112 tl1_gen T_CORRECTED_ECC_ERROR ! 0x263
2113 tl1_immu_miss:
2114 tl1_immu_miss ! 0x264
2115 tl1_dmmu_miss:
2116 tl1_dmmu_miss ! 0x268
2117 tl1_dmmu_prot:
2118 tl1_dmmu_prot ! 0x26c
2119 tl1_reserved 16 ! 0x270-0x27f
2120 tl1_spill_0_n:
2121 tl1_spill_0_n ! 0x280
2122 tl1_spill_bad 1 ! 0x284
2123 tl1_spill_2_n:
2124 tl1_spill_2_n ! 0x288
2125 tl1_spill_3_n:
2126 tl1_spill_3_n ! 0x28c
2127 tl1_spill_bad 3 ! 0x290-0x29b
2128 tl1_spill_7_n:
2129 tl1_spill_7_n ! 0x29c
2130 tl1_spill_0_o:
2131 tl1_spill_0_o ! 0x2a0
2132 tl1_spill_1_o:
2133 tl1_spill_1_o ! 0x2a4
2134 tl1_spill_2_o:
2135 tl1_spill_2_o ! 0x2a8
2136 tl1_spill_bad 5 ! 0x2ac-0x2bf
2137 tl1_fill_0_n:
2138 tl1_fill_0_n ! 0x2c0
2139 tl1_fill_bad 1 ! 0x2c4
2140 tl1_fill_2_n:
2141 tl1_fill_2_n ! 0x2c8
2142 tl1_fill_3_n:
2143 tl1_fill_3_n ! 0x2cc
2144 tl1_fill_bad 3 ! 0x2d0-0x2db
2145 tl1_fill_7_n:
2146 tl1_fill_7_n ! 0x2dc
2147 tl1_fill_bad 8 ! 0x2e0-0x2ff
2148 tl1_reserved 1 ! 0x300
2149 tl1_breakpoint:
2150 tl1_gen T_BREAKPOINT ! 0x301
2151 tl1_gen T_RSTRWP_PHYS ! 0x302
2152 tl1_gen T_RSTRWP_VIRT ! 0x303
2153 tl1_reserved 252 ! 0x304-0x3ff
2154
2155 .globl tl_trap_end
2156 tl_trap_end:
2157 nop
2158
2159 /*
2160 * User trap entry point
2161 *
2162 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2163 * u_long sfsr)
2164 *
2165 * This handles redirecting a trap back to usermode as a user trap. The user
2166 * program must have first registered a trap handler with the kernel using
2167 * sysarch(SPARC_UTRAP_INSTALL). The trap handler is passed enough state
2168 * for it to return to the trapping code directly, it will not return through
2169 * the kernel. The trap type is passed in %o0, all out registers must be
2170 * passed through to tl0_trap or to usermode untouched. Note that the
2171 * parameters passed in out registers may be used by the user trap handler.
2172 * Do not change the registers they are passed in or you will break the ABI.
2173 *
2174 * If the trap type allows user traps, setup state to execute the user trap
2175 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2176 */
2177 ENTRY(tl0_utrap)
2178 /*
2179 * Check if the trap type allows user traps.
2180 */
2181 cmp %o0, UT_MAX
2182 bge,a,pt %xcc, tl0_trap
2183 nop
2184
2185 /*
2186 * Load the user trap handler from the utrap table.
2187 */
2188 ldx [PCPU(CURTHREAD)], %l0
2189 ldx [%l0 + TD_PROC], %l0
2190 ldx [%l0 + P_MD + MD_UTRAP], %l0
2191 brz,pt %l0, tl0_trap
2192 sllx %o0, PTR_SHIFT, %l1
2193 ldx [%l0 + %l1], %l0
2194 brz,a,pt %l0, tl0_trap
2195 nop
2196
2197 /*
2198 * If the save we did on entry to the kernel had to spill a window
2199 * to the pcb, pretend we took a spill trap instead. Any windows
2200 * that are in the pcb must be copied out or the fill handler will
2201 * not be able to find them, since the user trap handler returns
2202 * directly to the trapping code. Note that we only support precise
2203 * user traps, which implies that the condition that caused the trap
2204 * in the first place is still valid, so it will occur again when we
2205 * re-execute the trapping instruction.
2206 */
2207 ldx [PCB_REG + PCB_NSAVED], %l1
2208 brnz,a,pn %l1, tl0_trap
2209 mov T_SPILL, %o0
2210
2211 /*
2212 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2213 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2214 * it may be clobbered by an interrupt before the user trap code
2215 * can read it, and we must pass %tstate in order to restore %ccr
2216 * and %asi. The %fsr must be stored to memory, so we use the
2217 * temporary stack for that.
2218 */
2219 rd %fprs, %l1
2220 or %l1, FPRS_FEF, %l2
2221 wr %l2, 0, %fprs
2222 dec 8, ASP_REG
2223 stx %fsr, [ASP_REG]
2224 ldx [ASP_REG], %l4
2225 inc 8, ASP_REG
2226 wr %l1, 0, %fprs
2227
2228 rdpr %tstate, %l5
2229 rdpr %tpc, %l6
2230 rdpr %tnpc, %l7
2231
2232 /*
2233 * Setup %tnpc to return to.
2234 */
2235 wrpr %l0, 0, %tnpc
2236
2237 /*
2238 * Setup %wstate for return, clear WSTATE_TRANSITION.
2239 */
2240 rdpr %wstate, %l1
2241 and %l1, WSTATE_NORMAL_MASK, %l1
2242 wrpr %l1, 0, %wstate
2243
2244 /*
2245 * Setup %tstate for return, change the saved cwp to point to the
2246 * current window instead of the window at the time of the trap.
2247 */
2248 andn %l5, TSTATE_CWP_MASK, %l1
2249 rdpr %cwp, %l2
2250 wrpr %l1, %l2, %tstate
2251
2252 /*
2253 * Setup %sp. Userland processes will crash if this is not setup.
2254 */
2255 sub %fp, CCFSZ, %sp
2256
2257 /*
2258 * Execute the user trap handler.
2259 */
2260 done
2261 END(tl0_utrap)
2262
2263 /*
2264 * (Real) User trap entry point
2265 *
2266 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2267 * u_int sfsr)
2268 *
2269 * The following setup has been performed:
2270 * - the windows have been split and the active user window has been saved
2271 * (maybe just to the pcb)
2272 * - we are on alternate globals and interrupts are disabled
2273 *
2274 * We switch to the kernel stack, build a trapframe, switch to normal
2275 * globals, enable interrupts and call trap.
2276 *
2277 * NOTE: We must be very careful setting up the per-cpu pointer. We know that
2278 * it has been pre-set in alternate globals, so we read it from there and setup
2279 * the normal %g7 *before* enabling interrupts. This avoids any possibility
2280 * of cpu migration and using the wrong pcpup.
2281 */
2282 ENTRY(tl0_trap)
2283 /*
2284 * Force kernel store order.
2285 */
2286 wrpr %g0, PSTATE_ALT, %pstate
2287
2288 rdpr %tstate, %l0
2289 rdpr %tpc, %l1
2290 rdpr %tnpc, %l2
2291 rd %y, %l3
2292 rd %fprs, %l4
2293 rdpr %wstate, %l5
2294
2295 #if KTR_COMPILE & KTR_TRAP
2296 CATR(KTR_TRAP,
2297 "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2298 , %g1, %g2, %g3, 7, 8, 9)
2299 ldx [PCPU(CURTHREAD)], %g2
2300 stx %g2, [%g1 + KTR_PARM1]
2301 stx %o0, [%g1 + KTR_PARM2]
2302 rdpr %pil, %g2
2303 stx %g2, [%g1 + KTR_PARM3]
2304 stx %l1, [%g1 + KTR_PARM4]
2305 stx %l2, [%g1 + KTR_PARM5]
2306 stx %i6, [%g1 + KTR_PARM6]
2307 9:
2308 #endif
2309
2310 1: and %l5, WSTATE_NORMAL_MASK, %l5
2311 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2312 wrpr %l5, WSTATE_KERNEL, %wstate
2313 rdpr %canrestore, %l6
2314 wrpr %l6, 0, %otherwin
2315 wrpr %g0, 0, %canrestore
2316
2317 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2318
2319 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2320 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2321 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2322 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2323 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2324
2325 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2326 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2327 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2328 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2329 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2330 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2331
2332 wr %g0, FPRS_FEF, %fprs
2333 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2334 rd %gsr, %l6
2335 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2336 wr %g0, 0, %fprs
2337
2338 mov PCB_REG, %l0
2339 mov PCPU_REG, %l1
2340 wrpr %g0, PSTATE_NORMAL, %pstate
2341
2342 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2343 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2344
2345 mov %l0, PCB_REG
2346 mov %l1, PCPU_REG
2347 wrpr %g0, PSTATE_KERNEL, %pstate
2348
2349 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2350 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2351 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2352 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2353 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2354 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2355 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2356 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2357
2358 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2359 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2360 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2361 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2362 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2363
2364 set tl0_ret - 8, %o7
2365 jmpl %o2, %g0
2366 add %sp, CCFSZ + SPOFF, %o0
2367 END(tl0_trap)
2368
2369 /*
2370 * void tl0_intr(u_int level, u_int mask)
2371 */
2372 ENTRY(tl0_intr)
2373 /*
2374 * Force kernel store order.
2375 */
2376 wrpr %g0, PSTATE_ALT, %pstate
2377
2378 rdpr %tstate, %l0
2379 rdpr %tpc, %l1
2380 rdpr %tnpc, %l2
2381 rd %y, %l3
2382 rd %fprs, %l4
2383 rdpr %wstate, %l5
2384
2385 #if KTR_COMPILE & KTR_INTR
2386 CATR(KTR_INTR,
2387 "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2388 , %g1, %g2, %g3, 7, 8, 9)
2389 ldx [PCPU(CURTHREAD)], %g2
2390 stx %g2, [%g1 + KTR_PARM1]
2391 stx %o0, [%g1 + KTR_PARM2]
2392 rdpr %pil, %g2
2393 stx %g2, [%g1 + KTR_PARM3]
2394 stx %l1, [%g1 + KTR_PARM4]
2395 stx %l2, [%g1 + KTR_PARM5]
2396 stx %i6, [%g1 + KTR_PARM6]
2397 9:
2398 #endif
2399
2400 wrpr %o0, 0, %pil
2401 wr %o1, 0, %clear_softint
2402
2403 and %l5, WSTATE_NORMAL_MASK, %l5
2404 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2405 wrpr %l5, WSTATE_KERNEL, %wstate
2406 rdpr %canrestore, %l6
2407 wrpr %l6, 0, %otherwin
2408 wrpr %g0, 0, %canrestore
2409
2410 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2411
2412 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2413 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2414 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2415 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2416 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2417 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2418
2419 wr %g0, FPRS_FEF, %fprs
2420 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2421 rd %gsr, %l6
2422 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2423 wr %g0, 0, %fprs
2424
2425 mov %o0, %l3
2426 mov T_INTERRUPT, %o1
2427
2428 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2429 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2430
2431 mov PCB_REG, %l0
2432 mov PCPU_REG, %l1
2433 wrpr %g0, PSTATE_NORMAL, %pstate
2434
2435 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2436 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2437 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2438 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2439 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2440 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2441 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2442
2443 mov %l0, PCB_REG
2444 mov %l1, PCPU_REG
2445 wrpr %g0, PSTATE_KERNEL, %pstate
2446
2447 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2448 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2449 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2450 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2451 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2452 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2453 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2454 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2455
2456 SET(intr_handlers, %l1, %l0)
2457 sllx %l3, IH_SHIFT, %l1
2458 ldx [%l0 + %l1], %l1
2459 KASSERT(%l1, "tl0_intr: ih null")
2460 call %l1
2461 add %sp, CCFSZ + SPOFF, %o0
2462
2463 /* %l3 contains PIL */
2464 SET(intrcnt, %l1, %l2)
2465 prefetcha [%l2] ASI_N, 1
2466 SET(pil_countp, %l1, %l0)
2467 sllx %l3, 1, %l1
2468 lduh [%l0 + %l1], %l0
2469 sllx %l0, 3, %l0
2470 add %l0, %l2, %l0
2471 ldx [%l0], %l1
2472 inc %l1
2473 stx %l1, [%l0]
2474
2475 lduw [PCPU(CNT) + V_INTR], %l0
2476 inc %l0
2477 stw %l0, [PCPU(CNT) + V_INTR]
2478
2479 ba,a %xcc, tl0_ret
2480 nop
2481 END(tl0_intr)
2482
2483 /*
2484 * Initiate return to usermode.
2485 *
2486 * Called with a trapframe on the stack. The window that was setup in
2487 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2488 * leaf functions, so all ins and locals may have been clobbered since
2489 * then.
2490 *
2491 * This code is rather long and complicated.
2492 */
2493 ENTRY(tl0_ret)
2494 /*
2495 * Check for pending asts atomically with returning. We must raise
2496 * the PIL before checking, and if no asts are found the PIL must
2497 * remain raised until the retry is executed, or we risk missing asts
2498 * caused by interrupts occurring after the test. If the PIL is
2499 * lowered, as it is when we call ast, the check must be re-executed.
2500 */
2501 wrpr %g0, PIL_TICK, %pil
2502 ldx [PCPU(CURTHREAD)], %l0
2503 lduw [%l0 + TD_FLAGS], %l1
2504 set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2505 and %l1, %l2, %l1
2506 brz,a,pt %l1, 1f
2507 nop
2508
2509 /*
2510 * We have an AST. Re-enable interrupts and handle it, then restart
2511 * the return sequence.
2512 */
2513 wrpr %g0, 0, %pil
2514 call ast
2515 add %sp, CCFSZ + SPOFF, %o0
2516 ba,a %xcc, tl0_ret
2517 nop
2518
2519 /*
2520 * Check for windows that were spilled to the pcb and need to be
2521 * copied out. This must be the last thing that is done before the
2522 * return to usermode. If there are still user windows in the cpu
2523 * and we call a nested function after this, which causes them to be
2524 * spilled to the pcb, they will not be copied out and the stack will
2525 * be inconsistent.
2526 */
2527 1: ldx [PCB_REG + PCB_NSAVED], %l1
2528 brz,a,pt %l1, 2f
2529 nop
2530 wrpr %g0, 0, %pil
2531 mov T_SPILL, %o0
2532 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2533 call trap
2534 add %sp, SPOFF + CCFSZ, %o0
2535 ba,a %xcc, tl0_ret
2536 nop
2537
2538 /*
2539 * Restore the out and most global registers from the trapframe.
2540 * The ins will become the outs when we restore below.
2541 */
2542 2: ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2543 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2544 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2545 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2546 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2547 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2548 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2549 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2550
2551 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2552 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2553 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2554 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2555 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2556
2557 /*
2558 * Load everything we need to restore below before disabling
2559 * interrupts.
2560 */
2561 ldx [%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2562 ldx [%sp + SPOFF + CCFSZ + TF_GSR], %l1
2563 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2564 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l3
2565 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2566 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l5
2567 ldx [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2568
2569 /*
2570 * Disable interrupts to restore the special globals. They are not
2571 * saved and restored for all kernel traps, so an interrupt at the
2572 * wrong time would clobber them.
2573 */
2574 wrpr %g0, PSTATE_NORMAL, %pstate
2575
2576 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2577 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2578
2579 /*
2580 * Switch to alternate globals. This frees up some registers we
2581 * can use after the restore changes our window.
2582 */
2583 wrpr %g0, PSTATE_ALT, %pstate
2584
2585 /*
2586 * Drop %pil to zero. It must have been zero at the time of the
2587 * trap, since we were in usermode, but it was raised above in
2588 * order to check for asts atomically. We have interrupts disabled
2589 * so any interrupts will not be serviced until we complete the
2590 * return to usermode.
2591 */
2592 wrpr %g0, 0, %pil
2593
2594 /*
2595 * Save %fprs in an alternate global so it can be restored after the
2596 * restore instruction below. If we restore it before the restore,
2597 * and the restore traps we may run for a while with floating point
2598 * enabled in the kernel, which we want to avoid.
2599 */
2600 mov %l0, %g1
2601
2602 /*
2603 * Restore %fsr and %gsr. These need floating point enabled in %fprs,
2604 * so we set it temporarily and then clear it.
2605 */
2606 wr %g0, FPRS_FEF, %fprs
2607 ldx [%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2608 wr %l1, 0, %gsr
2609 wr %g0, 0, %fprs
2610
2611 /*
2612 * Restore program counters. This could be done after the restore
2613 * but we're out of alternate globals to store them in...
2614 */
2615 wrpr %l2, 0, %tnpc
2616 wrpr %l3, 0, %tpc
2617
2618 /*
2619 * Save %tstate in an alternate global and clear the %cwp field. %cwp
2620 * will be affected by the restore below and we need to make sure it
2621 * points to the current window at that time, not the window that was
2622 * active at the time of the trap.
2623 */
2624 andn %l4, TSTATE_CWP_MASK, %g2
2625
2626 /*
2627 * Restore %y. Could also be below if we had more alternate globals.
2628 */
2629 wr %l5, 0, %y
2630
2631 /*
2632 * Setup %wstate for return. We need to restore the user window state
2633 * which we saved in wstate.other when we trapped. We also need to
2634 * set the transition bit so the restore will be handled specially
2635 * if it traps, use the xor feature of wrpr to do that.
2636 */
2637 srlx %l6, WSTATE_OTHER_SHIFT, %g3
2638 wrpr %g3, WSTATE_TRANSITION, %wstate
2639
2640 /*
2641 * Setup window management registers for return. If not all user
2642 * windows were spilled in the kernel %otherwin will be non-zero,
2643 * so we need to transfer it to %canrestore to correctly restore
2644 * those windows. Otherwise everything gets set to zero and the
2645 * restore below will fill a window directly from the user stack.
2646 */
2647 rdpr %otherwin, %o0
2648 wrpr %o0, 0, %canrestore
2649 wrpr %g0, 0, %otherwin
2650 wrpr %o0, 0, %cleanwin
2651
2652 /*
2653 * Now do the restore. If this instruction causes a fill trap which
2654 * fails to fill a window from the user stack, we will resume at
2655 * tl0_ret_fill_end and call back into the kernel.
2656 */
2657 restore
2658 tl0_ret_fill:
2659
2660 /*
2661 * We made it. We're back in the window that was active at the time
2662 * of the trap, and ready to return to usermode.
2663 */
2664
2665 /*
2666 * Restore %frps. This was saved in an alternate global above.
2667 */
2668 wr %g1, 0, %fprs
2669
2670 /*
2671 * Fixup %tstate so the saved %cwp points to the current window and
2672 * restore it.
2673 */
2674 rdpr %cwp, %g4
2675 wrpr %g2, %g4, %tstate
2676
2677 /*
2678 * Restore the user window state. The transition bit was set above
2679 * for special handling of the restore, this clears it.
2680 */
2681 wrpr %g3, 0, %wstate
2682
2683 #if KTR_COMPILE & KTR_TRAP
2684 CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2685 , %g2, %g3, %g4, 7, 8, 9)
2686 ldx [PCPU(CURTHREAD)], %g3
2687 stx %g3, [%g2 + KTR_PARM1]
2688 rdpr %pil, %g3
2689 stx %g3, [%g2 + KTR_PARM2]
2690 rdpr %tpc, %g3
2691 stx %g3, [%g2 + KTR_PARM3]
2692 rdpr %tnpc, %g3
2693 stx %g3, [%g2 + KTR_PARM4]
2694 stx %sp, [%g2 + KTR_PARM5]
2695 9:
2696 #endif
2697
2698 /*
2699 * Return to usermode.
2700 */
2701 retry
2702 tl0_ret_fill_end:
2703
2704 #if KTR_COMPILE & KTR_TRAP
2705 CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2706 , %l0, %l1, %l2, 7, 8, 9)
2707 rdpr %pstate, %l1
2708 stx %l1, [%l0 + KTR_PARM1]
2709 stx %l6, [%l0 + KTR_PARM2]
2710 stx %sp, [%l0 + KTR_PARM3]
2711 9:
2712 #endif
2713
2714 /*
2715 * The restore above caused a fill trap and the fill handler was
2716 * unable to fill a window from the user stack. The special fill
2717 * handler recognized this and punted, sending us here. We need
2718 * to carefully undo any state that was restored before the restore
2719 * was executed and call trap again. Trap will copyin a window
2720 * from the user stack which will fault in the page we need so the
2721 * restore above will succeed when we try again. If this fails
2722 * the process has trashed its stack, so we kill it.
2723 */
2724
2725 /*
2726 * Restore the kernel window state. This was saved in %l6 above, and
2727 * since the restore failed we're back in the same window.
2728 */
2729 wrpr %l6, 0, %wstate
2730
2731 /*
2732 * Restore the normal globals which have predefined values in the
2733 * kernel. We clobbered them above restoring the user's globals
2734 * so this is very important.
2735 * XXX PSTATE_ALT must already be set.
2736 */
2737 wrpr %g0, PSTATE_ALT, %pstate
2738 mov PCB_REG, %o0
2739 mov PCPU_REG, %o1
2740 wrpr %g0, PSTATE_NORMAL, %pstate
2741 mov %o0, PCB_REG
2742 mov %o1, PCPU_REG
2743 wrpr %g0, PSTATE_KERNEL, %pstate
2744
2745 /*
2746 * Simulate a fill trap and then start the whole return sequence over
2747 * again. This is special because it only copies in 1 window, not 2
2748 * as we would for a normal failed fill. This may be the first time
2749 * the process has been run, so there may not be 2 windows worth of
2750 * stack to copyin.
2751 */
2752 mov T_FILL_RET, %o0
2753 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2754 call trap
2755 add %sp, SPOFF + CCFSZ, %o0
2756 ba,a %xcc, tl0_ret
2757 nop
2758 END(tl0_ret)
2759
2760 /*
2761 * Kernel trap entry point
2762 *
2763 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2764 * u_int sfsr)
2765 *
2766 * This is easy because the stack is already setup and the windows don't need
2767 * to be split. We build a trapframe and call trap(), the same as above, but
2768 * the outs don't need to be saved.
2769 */
2770 ENTRY(tl1_trap)
2771 rdpr %tstate, %l0
2772 rdpr %tpc, %l1
2773 rdpr %tnpc, %l2
2774 rdpr %pil, %l3
2775 rd %y, %l4
2776 rdpr %wstate, %l5
2777
2778 #if KTR_COMPILE & KTR_TRAP
2779 CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2780 , %g1, %g2, %g3, 7, 8, 9)
2781 ldx [PCPU(CURTHREAD)], %g2
2782 stx %g2, [%g1 + KTR_PARM1]
2783 stx %o0, [%g1 + KTR_PARM2]
2784 stx %l3, [%g1 + KTR_PARM3]
2785 stx %l1, [%g1 + KTR_PARM4]
2786 stx %i6, [%g1 + KTR_PARM5]
2787 9:
2788 #endif
2789
2790 wrpr %g0, 1, %tl
2791
2792 and %l5, WSTATE_OTHER_MASK, %l5
2793 wrpr %l5, WSTATE_KERNEL, %wstate
2794
2795 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2796 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2797 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2798 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2799 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2800
2801 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2802 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2803 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2804 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2805 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2806
2807 mov PCB_REG, %l0
2808 mov PCPU_REG, %l1
2809 wrpr %g0, PSTATE_NORMAL, %pstate
2810
2811 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2812 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2813
2814 mov %l0, PCB_REG
2815 mov %l1, PCPU_REG
2816 wrpr %g0, PSTATE_KERNEL, %pstate
2817
2818 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2819 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2820 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2821 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2822 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2823 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2824 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2825 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2826
2827 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2828 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2829 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2830 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2831 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2832
2833 set tl1_ret - 8, %o7
2834 jmpl %o2, %g0
2835 add %sp, CCFSZ + SPOFF, %o0
2836 END(tl1_trap)
2837
2838 ENTRY(tl1_ret)
2839 ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2840 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2841 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2842 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2843 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2844 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2845 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2846 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2847
2848 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2849 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2850 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2851 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2852 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2853
2854 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2855 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
2856 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2857 ldx [%sp + SPOFF + CCFSZ + TF_PIL], %l3
2858 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2859
2860 set VM_MIN_PROM_ADDRESS, %l5
2861 cmp %l1, %l5
2862 bl,a,pt %xcc, 1f
2863 nop
2864 set VM_MAX_PROM_ADDRESS, %l5
2865 cmp %l1, %l5
2866 bg,a,pt %xcc, 1f
2867 nop
2868
2869 wrpr %g0, PSTATE_NORMAL, %pstate
2870
2871 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2872 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2873
2874 1: wrpr %g0, PSTATE_ALT, %pstate
2875
2876 andn %l0, TSTATE_CWP_MASK, %g1
2877 mov %l1, %g2
2878 mov %l2, %g3
2879
2880 wrpr %l3, 0, %pil
2881 wr %l4, 0, %y
2882
2883 restore
2884
2885 wrpr %g0, 2, %tl
2886
2887 rdpr %cwp, %g4
2888 wrpr %g1, %g4, %tstate
2889 wrpr %g2, 0, %tpc
2890 wrpr %g3, 0, %tnpc
2891
2892 #if KTR_COMPILE & KTR_TRAP
2893 CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2894 , %g2, %g3, %g4, 7, 8, 9)
2895 ldx [PCPU(CURTHREAD)], %g3
2896 stx %g3, [%g2 + KTR_PARM1]
2897 rdpr %pil, %g3
2898 stx %g3, [%g2 + KTR_PARM2]
2899 rdpr %tstate, %g3
2900 stx %g3, [%g2 + KTR_PARM3]
2901 rdpr %tpc, %g3
2902 stx %g3, [%g2 + KTR_PARM4]
2903 stx %sp, [%g2 + KTR_PARM5]
2904 9:
2905 #endif
2906
2907 retry
2908 END(tl1_ret)
2909
2910 /*
2911 * void tl1_intr(u_int level, u_int mask)
2912 */
2913 ENTRY(tl1_intr)
2914 rdpr %tstate, %l0
2915 rdpr %tpc, %l1
2916 rdpr %tnpc, %l2
2917 rdpr %pil, %l3
2918 rd %y, %l4
2919 rdpr %wstate, %l5
2920
2921 #if KTR_COMPILE & KTR_INTR
2922 CATR(KTR_INTR,
2923 "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2924 , %g1, %g2, %g3, 7, 8, 9)
2925 ldx [PCPU(CURTHREAD)], %g2
2926 stx %g2, [%g1 + KTR_PARM1]
2927 stx %o0, [%g1 + KTR_PARM2]
2928 stx %l3, [%g1 + KTR_PARM3]
2929 stx %l1, [%g1 + KTR_PARM4]
2930 stx %i6, [%g1 + KTR_PARM5]
2931 9:
2932 #endif
2933
2934 wrpr %o0, 0, %pil
2935 wr %o1, 0, %clear_softint
2936
2937 wrpr %g0, 1, %tl
2938
2939 and %l5, WSTATE_OTHER_MASK, %l5
2940 wrpr %l5, WSTATE_KERNEL, %wstate
2941
2942 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2943 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2944 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2945 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2946 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2947
2948 mov %o0, %l7
2949 mov T_INTERRUPT | T_KERNEL, %o1
2950
2951 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2952 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2953
2954 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2955 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2956
2957 mov PCB_REG, %l4
2958 mov PCPU_REG, %l5
2959 wrpr %g0, PSTATE_NORMAL, %pstate
2960
2961 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2962 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2963 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2964 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2965 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2966
2967 mov %l4, PCB_REG
2968 mov %l5, PCPU_REG
2969 wrpr %g0, PSTATE_KERNEL, %pstate
2970
2971 SET(intr_handlers, %l5, %l4)
2972 sllx %l7, IH_SHIFT, %l5
2973 ldx [%l4 + %l5], %l5
2974 KASSERT(%l5, "tl1_intr: ih null")
2975 call %l5
2976 add %sp, CCFSZ + SPOFF, %o0
2977
2978 /* %l7 contains PIL */
2979 SET(intrcnt, %l5, %l4)
2980 prefetcha [%l4] ASI_N, 1
2981 SET(pil_countp, %l5, %l6)
2982 sllx %l7, 1, %l5
2983 lduh [%l5 + %l6], %l5
2984 sllx %l5, 3, %l5
2985 add %l5, %l4, %l4
2986 ldx [%l4], %l5
2987 inc %l5
2988 stx %l5, [%l4]
2989
2990 lduw [PCPU(CNT) + V_INTR], %l4
2991 inc %l4
2992 stw %l4, [PCPU(CNT) + V_INTR]
2993
2994 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2995
2996 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2997 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2998 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2999 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
3000 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
3001
3002 wrpr %g0, PSTATE_ALT, %pstate
3003
3004 andn %l0, TSTATE_CWP_MASK, %g1
3005 mov %l1, %g2
3006 mov %l2, %g3
3007 wrpr %l3, 0, %pil
3008 wr %l4, 0, %y
3009
3010 restore
3011
3012 wrpr %g0, 2, %tl
3013
3014 rdpr %cwp, %g4
3015 wrpr %g1, %g4, %tstate
3016 wrpr %g2, 0, %tpc
3017 wrpr %g3, 0, %tnpc
3018
3019 #if KTR_COMPILE & KTR_INTR
3020 CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3021 , %g2, %g3, %g4, 7, 8, 9)
3022 ldx [PCPU(CURTHREAD)], %g3
3023 stx %g3, [%g2 + KTR_PARM1]
3024 rdpr %pil, %g3
3025 stx %g3, [%g2 + KTR_PARM2]
3026 rdpr %tstate, %g3
3027 stx %g3, [%g2 + KTR_PARM3]
3028 rdpr %tpc, %g3
3029 stx %g3, [%g2 + KTR_PARM4]
3030 stx %sp, [%g2 + KTR_PARM5]
3031 9:
3032 #endif
3033
3034 retry
3035 END(tl1_intr)
3036
3037 .globl tl_text_end
3038 tl_text_end:
3039 nop
3040
3041 /*
3042 * Freshly forked processes come here when switched to for the first time.
3043 * The arguments to fork_exit() have been setup in the locals, we must move
3044 * them to the outs.
3045 */
3046 ENTRY(fork_trampoline)
3047 #if KTR_COMPILE & KTR_PROC
3048 CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
3049 , %g1, %g2, %g3, 7, 8, 9)
3050 ldx [PCPU(CURTHREAD)], %g2
3051 stx %g2, [%g1 + KTR_PARM1]
3052 ldx [%g2 + TD_PROC], %g2
3053 add %g2, P_COMM, %g2
3054 stx %g2, [%g1 + KTR_PARM2]
3055 rdpr %cwp, %g2
3056 stx %g2, [%g1 + KTR_PARM3]
3057 9:
3058 #endif
3059 mov %l0, %o0
3060 mov %l1, %o1
3061 call fork_exit
3062 mov %l2, %o2
3063 ba,a %xcc, tl0_ret
3064 nop
3065 END(fork_trampoline)
Cache object: a6b639875ef14a5a5976c25ec4d22f60
|