1 /*-
2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
14 * written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
29 */
30 /*-
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #include <machine/asm.h>
57 __FBSDID("$FreeBSD$");
58
59 #include "opt_compat.h"
60 #include "opt_ddb.h"
61
62 #include <machine/asi.h>
63 #include <machine/asmacros.h>
64 #include <machine/frame.h>
65 #include <machine/fsr.h>
66 #include <machine/intr_machdep.h>
67 #include <machine/ktr.h>
68 #include <machine/pcb.h>
69 #include <machine/pstate.h>
70 #include <machine/trap.h>
71 #include <machine/tsb.h>
72 #include <machine/tstate.h>
73 #include <machine/utrap.h>
74 #include <machine/wstate.h>
75
76 #include "assym.s"
77
78 #define TSB_KERNEL_MASK 0x0
79 #define TSB_KERNEL 0x0
80
81 .register %g2,#ignore
82 .register %g3,#ignore
83 .register %g6,#ignore
84 .register %g7,#ignore
85
86 /*
87 * Atomically set the reference bit in a tte.
88 */
89 #define TTE_SET_BIT(r1, r2, r3, bit) \
90 add r1, TTE_DATA, r1 ; \
91 ldx [r1], r2 ; \
92 9: or r2, bit, r3 ; \
93 casxa [r1] ASI_N, r2, r3 ; \
94 cmp r2, r3 ; \
95 bne,pn %xcc, 9b ; \
96 mov r3, r2
97
98 #define TTE_SET_REF(r1, r2, r3) TTE_SET_BIT(r1, r2, r3, TD_REF)
99 #define TTE_SET_W(r1, r2, r3) TTE_SET_BIT(r1, r2, r3, TD_W)
100
101 /*
102 * Macros for spilling and filling live windows.
103 *
104 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
105 * handler will not use more than 24 instructions total, to leave room for
106 * resume vectors which occupy the last 8 instructions.
107 */
108
109 #define SPILL(storer, base, size, asi) \
110 storer %l0, [base + (0 * size)] asi ; \
111 storer %l1, [base + (1 * size)] asi ; \
112 storer %l2, [base + (2 * size)] asi ; \
113 storer %l3, [base + (3 * size)] asi ; \
114 storer %l4, [base + (4 * size)] asi ; \
115 storer %l5, [base + (5 * size)] asi ; \
116 storer %l6, [base + (6 * size)] asi ; \
117 storer %l7, [base + (7 * size)] asi ; \
118 storer %i0, [base + (8 * size)] asi ; \
119 storer %i1, [base + (9 * size)] asi ; \
120 storer %i2, [base + (10 * size)] asi ; \
121 storer %i3, [base + (11 * size)] asi ; \
122 storer %i4, [base + (12 * size)] asi ; \
123 storer %i5, [base + (13 * size)] asi ; \
124 storer %i6, [base + (14 * size)] asi ; \
125 storer %i7, [base + (15 * size)] asi
126
127 #define FILL(loader, base, size, asi) \
128 loader [base + (0 * size)] asi, %l0 ; \
129 loader [base + (1 * size)] asi, %l1 ; \
130 loader [base + (2 * size)] asi, %l2 ; \
131 loader [base + (3 * size)] asi, %l3 ; \
132 loader [base + (4 * size)] asi, %l4 ; \
133 loader [base + (5 * size)] asi, %l5 ; \
134 loader [base + (6 * size)] asi, %l6 ; \
135 loader [base + (7 * size)] asi, %l7 ; \
136 loader [base + (8 * size)] asi, %i0 ; \
137 loader [base + (9 * size)] asi, %i1 ; \
138 loader [base + (10 * size)] asi, %i2 ; \
139 loader [base + (11 * size)] asi, %i3 ; \
140 loader [base + (12 * size)] asi, %i4 ; \
141 loader [base + (13 * size)] asi, %i5 ; \
142 loader [base + (14 * size)] asi, %i6 ; \
143 loader [base + (15 * size)] asi, %i7
144
145 #define ERRATUM50(reg) mov reg, reg
146
147 #define KSTACK_SLOP 1024
148
149 /*
150 * Sanity check the kernel stack and bail out if its wrong.
151 * XXX: doesn't handle being on the panic stack.
152 */
153 #define KSTACK_CHECK \
154 dec 16, ASP_REG ; \
155 stx %g1, [ASP_REG + 0] ; \
156 stx %g2, [ASP_REG + 8] ; \
157 add %sp, SPOFF, %g1 ; \
158 andcc %g1, (1 << PTR_SHIFT) - 1, %g0 ; \
159 bnz,a %xcc, tl1_kstack_fault ; \
160 inc 16, ASP_REG ; \
161 ldx [PCPU(CURTHREAD)], %g2 ; \
162 ldx [%g2 + TD_KSTACK], %g2 ; \
163 add %g2, KSTACK_SLOP, %g2 ; \
164 subcc %g1, %g2, %g1 ; \
165 ble,a %xcc, tl1_kstack_fault ; \
166 inc 16, ASP_REG ; \
167 set KSTACK_PAGES * PAGE_SIZE, %g2 ; \
168 cmp %g1, %g2 ; \
169 bgt,a %xcc, tl1_kstack_fault ; \
170 inc 16, ASP_REG ; \
171 ldx [ASP_REG + 8], %g2 ; \
172 ldx [ASP_REG + 0], %g1 ; \
173 inc 16, ASP_REG
174
175 .globl tl_text_begin
176 tl_text_begin:
177 nop
178
179 ENTRY(tl1_kstack_fault)
180 rdpr %tl, %g1
181 1: cmp %g1, 2
182 be,a 2f
183 nop
184
185 #if KTR_COMPILE & KTR_TRAP
186 CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
187 , %g2, %g3, %g4, 7, 8, 9)
188 rdpr %tl, %g3
189 stx %g3, [%g2 + KTR_PARM1]
190 rdpr %tpc, %g3
191 stx %g3, [%g2 + KTR_PARM1]
192 rdpr %tnpc, %g3
193 stx %g3, [%g2 + KTR_PARM1]
194 9:
195 #endif
196
197 sub %g1, 1, %g1
198 wrpr %g1, 0, %tl
199 ba,a %xcc, 1b
200 nop
201
202 2:
203 #if KTR_COMPILE & KTR_TRAP
204 CATR(KTR_TRAP,
205 "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
206 , %g1, %g2, %g3, 7, 8, 9)
207 add %sp, SPOFF, %g2
208 stx %g2, [%g1 + KTR_PARM1]
209 ldx [PCPU(CURTHREAD)], %g2
210 ldx [%g2 + TD_KSTACK], %g2
211 stx %g2, [%g1 + KTR_PARM2]
212 rdpr %canrestore, %g2
213 stx %g2, [%g1 + KTR_PARM3]
214 rdpr %cansave, %g2
215 stx %g2, [%g1 + KTR_PARM4]
216 rdpr %otherwin, %g2
217 stx %g2, [%g1 + KTR_PARM5]
218 rdpr %wstate, %g2
219 stx %g2, [%g1 + KTR_PARM6]
220 9:
221 #endif
222
223 wrpr %g0, 0, %canrestore
224 wrpr %g0, 6, %cansave
225 wrpr %g0, 0, %otherwin
226 wrpr %g0, WSTATE_KERNEL, %wstate
227
228 sub ASP_REG, SPOFF + CCFSZ, %sp
229 clr %fp
230
231 set trap, %o2
232 ba %xcc, tl1_trap
233 mov T_KSTACK_FAULT | T_KERNEL, %o0
234 END(tl1_kstack_fault)
235
236 /*
237 * Magic to resume from a spill or fill trap. If we get an alignment or an
238 * mmu fault during a spill or a fill, this macro will detect the fault and
239 * resume at a set instruction offset in the trap handler.
240 *
241 * To check if the previous trap was a spill/fill we convert the trapped pc
242 * to a trap type and verify that it is in the range of spill/fill vectors.
243 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
244 * tl bit allows us to detect both ranges with one test.
245 *
246 * This is:
247 * 0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
248 *
249 * To calculate the new pc we take advantage of the xor feature of wrpr.
250 * Forcing all the low bits of the trapped pc on we can produce any offset
251 * into the spill/fill vector. The size of a spill/fill trap vector is 0x80.
252 *
253 * 0x7f ^ 0x1f == 0x60
254 * 0x1f == (0x80 - 0x60) - 1
255 *
256 * Which are the offset and xor value used to resume from alignment faults.
257 */
258
259 /*
260 * Determine if we have trapped inside of a spill/fill vector, and if so resume
261 * at a fixed instruction offset in the trap vector. Must be called on
262 * alternate globals.
263 */
264 #define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
265 dec 16, ASP_REG ; \
266 stx %g1, [ASP_REG + 0] ; \
267 stx %g2, [ASP_REG + 8] ; \
268 rdpr %tpc, %g1 ; \
269 ERRATUM50(%g1) ; \
270 rdpr %tba, %g2 ; \
271 sub %g1, %g2, %g2 ; \
272 srlx %g2, 5, %g2 ; \
273 andn %g2, 0x200, %g2 ; \
274 cmp %g2, 0x80 ; \
275 blu,pt %xcc, 9f ; \
276 cmp %g2, 0x100 ; \
277 bgeu,pt %xcc, 9f ; \
278 or %g1, 0x7f, %g1 ; \
279 wrpr %g1, xor, %tnpc ; \
280 stxa_g0_sfsr ; \
281 ldx [ASP_REG + 8], %g2 ; \
282 ldx [ASP_REG + 0], %g1 ; \
283 inc 16, ASP_REG ; \
284 done ; \
285 9: ldx [ASP_REG + 8], %g2 ; \
286 ldx [ASP_REG + 0], %g1 ; \
287 inc 16, ASP_REG
288
289 /*
290 * For certain faults we need to clear the sfsr mmu register before returning.
291 */
292 #define RSF_CLR_SFSR \
293 wr %g0, ASI_DMMU, %asi ; \
294 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
295
296 #define RSF_XOR(off) ((0x80 - off) - 1)
297
298 /*
299 * Instruction offsets in spill and fill trap handlers for handling certain
300 * nested traps, and corresponding xor constants for wrpr.
301 */
302 #define RSF_OFF_ALIGN 0x60
303 #define RSF_OFF_MMU 0x70
304
305 #define RESUME_SPILLFILL_ALIGN \
306 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
307 #define RESUME_SPILLFILL_MMU \
308 RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
309 #define RESUME_SPILLFILL_MMU_CLR_SFSR \
310 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
311
312 /*
313 * Constant to add to %tnpc when taking a fill trap just before returning to
314 * user mode.
315 */
316 #define RSF_FILL_INC tl0_ret_fill_end - tl0_ret_fill
317
318 /*
319 * Retry a spill or fill with a different wstate due to an alignment fault.
320 * We may just be using the wrong stack offset.
321 */
322 #define RSF_ALIGN_RETRY(ws) \
323 wrpr %g0, (ws), %wstate ; \
324 retry ; \
325 .align 16
326
327 /*
328 * Generate a T_SPILL or T_FILL trap if the window operation fails.
329 */
330 #define RSF_TRAP(type) \
331 ba %xcc, tl0_sftrap ; \
332 mov type, %g2 ; \
333 .align 16
334
335 /*
336 * Game over if the window operation fails.
337 */
338 #define RSF_FATAL(type) \
339 ba %xcc, rsf_fatal ; \
340 mov type, %g2 ; \
341 .align 16
342
343 /*
344 * Magic to resume from a failed fill a few instructions after the corrsponding
345 * restore. This is used on return from the kernel to usermode.
346 */
347 #define RSF_FILL_MAGIC \
348 rdpr %tnpc, %g1 ; \
349 add %g1, RSF_FILL_INC, %g1 ; \
350 wrpr %g1, 0, %tnpc ; \
351 done ; \
352 .align 16
353
354 /*
355 * Spill to the pcb if a spill to the user stack in kernel mode fails.
356 */
357 #define RSF_SPILL_TOPCB \
358 ba,a %xcc, tl1_spill_topcb ; \
359 nop ; \
360 .align 16
361
362 ENTRY(rsf_fatal)
363 #if KTR_COMPILE & KTR_TRAP
364 CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
365 , %g1, %g3, %g4, 7, 8, 9)
366 rdpr %tt, %g3
367 stx %g3, [%g1 + KTR_PARM1]
368 stx %g2, [%g1 + KTR_PARM2]
369 9:
370 #endif
371
372 KSTACK_CHECK
373
374 sir
375 END(rsf_fatal)
376
377 .comm intrnames, IV_NAMLEN
378 .comm eintrnames, 0
379
380 .comm intrcnt, IV_MAX * 8
381 .comm eintrcnt, 0
382
383 /*
384 * Trap table and associated macros
385 *
386 * Due to its size a trap table is an inherently hard thing to represent in
387 * code in a clean way. There are approximately 1024 vectors, of 8 or 32
388 * instructions each, many of which are identical. The way that this is
389 * layed out is the instructions (8 or 32) for the actual trap vector appear
390 * as an AS macro. In general this code branches to tl0_trap or tl1_trap,
391 * but if not supporting code can be placed just after the definition of the
392 * macro. The macros are then instantiated in a different section (.trap),
393 * which is setup to be placed by the linker at the beginning of .text, and the
394 * code around the macros is moved to the end of trap table. In this way the
395 * code that must be sequential in memory can be split up, and located near
396 * its supporting code so that it is easier to follow.
397 */
398
399 /*
400 * Clean window traps occur when %cleanwin is zero to ensure that data
401 * is not leaked between address spaces in registers.
402 */
403 .macro clean_window
404 clr %o0
405 clr %o1
406 clr %o2
407 clr %o3
408 clr %o4
409 clr %o5
410 clr %o6
411 clr %o7
412 clr %l0
413 clr %l1
414 clr %l2
415 clr %l3
416 clr %l4
417 clr %l5
418 clr %l6
419 rdpr %cleanwin, %l7
420 inc %l7
421 wrpr %l7, 0, %cleanwin
422 clr %l7
423 retry
424 .align 128
425 .endm
426
427 /*
428 * Stack fixups for entry from user mode. We are still running on the
429 * user stack, and with its live registers, so we must save soon. We
430 * are on alternate globals so we do have some registers. Set the
431 * transitional window state, and do the save. If this traps we
432 * we attempt to spill a window to the user stack. If this fails,
433 * we spill the window to the pcb and continue. Spilling to the pcb
434 * must not fail.
435 *
436 * NOTE: Must be called with alternate globals and clobbers %g1.
437 */
438
439 .macro tl0_split
440 rdpr %wstate, %g1
441 wrpr %g1, WSTATE_TRANSITION, %wstate
442 save
443 .endm
444
445 .macro tl0_setup type
446 tl0_split
447 clr %o1
448 set trap, %o2
449 ba %xcc, tl0_utrap
450 mov \type, %o0
451 .endm
452
453 /*
454 * Generic trap type. Call trap() with the specified type.
455 */
456 .macro tl0_gen type
457 tl0_setup \type
458 .align 32
459 .endm
460
461 /*
462 * This is used to suck up the massive swaths of reserved trap types.
463 * Generates count "reserved" trap vectors.
464 */
465 .macro tl0_reserved count
466 .rept \count
467 tl0_gen T_RESERVED
468 .endr
469 .endm
470
471 .macro tl1_split
472 rdpr %wstate, %g1
473 wrpr %g1, WSTATE_NESTED, %wstate
474 save %sp, -(CCFSZ + TF_SIZEOF), %sp
475 .endm
476
477 .macro tl1_setup type
478 tl1_split
479 clr %o1
480 set trap, %o2
481 ba %xcc, tl1_trap
482 mov \type | T_KERNEL, %o0
483 .endm
484
485 .macro tl1_gen type
486 tl1_setup \type
487 .align 32
488 .endm
489
490 .macro tl1_reserved count
491 .rept \count
492 tl1_gen T_RESERVED
493 .endr
494 .endm
495
496 .macro tl0_insn_excptn
497 wrpr %g0, PSTATE_ALT, %pstate
498 wr %g0, ASI_IMMU, %asi
499 rdpr %tpc, %g3
500 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
501 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
502 membar #Sync
503 ba %xcc, tl0_sfsr_trap
504 mov T_INSTRUCTION_EXCEPTION, %g2
505 .align 32
506 .endm
507
508 .macro tl0_data_excptn
509 wrpr %g0, PSTATE_ALT, %pstate
510 wr %g0, ASI_DMMU, %asi
511 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
512 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
513 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
514 membar #Sync
515 ba %xcc, tl0_sfsr_trap
516 mov T_DATA_EXCEPTION, %g2
517 .align 32
518 .endm
519
520 .macro tl0_align
521 wr %g0, ASI_DMMU, %asi
522 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
523 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
524 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
525 membar #Sync
526 ba %xcc, tl0_sfsr_trap
527 mov T_MEM_ADDRESS_NOT_ALIGNED, %g2
528 .align 32
529 .endm
530
531 ENTRY(tl0_sfsr_trap)
532 tl0_split
533 clr %o1
534 set trap, %o2
535 mov %g3, %o4
536 mov %g4, %o5
537 ba %xcc, tl0_utrap
538 mov %g2, %o0
539 END(tl0_sfsr_trap)
540
541 .macro tl0_intr level, mask
542 tl0_split
543 set \mask, %o1
544 ba %xcc, tl0_intr
545 mov \level, %o0
546 .align 32
547 .endm
548
549 #define INTR(level, traplvl) \
550 tl ## traplvl ## _intr level, 1 << level
551
552 #define TICK(traplvl) \
553 tl ## traplvl ## _intr PIL_TICK, 1
554
555 #define INTR_LEVEL(tl) \
556 INTR(1, tl) ; \
557 INTR(2, tl) ; \
558 INTR(3, tl) ; \
559 INTR(4, tl) ; \
560 INTR(5, tl) ; \
561 INTR(6, tl) ; \
562 INTR(7, tl) ; \
563 INTR(8, tl) ; \
564 INTR(9, tl) ; \
565 INTR(10, tl) ; \
566 INTR(11, tl) ; \
567 INTR(12, tl) ; \
568 INTR(13, tl) ; \
569 TICK(tl) ; \
570 INTR(15, tl) ;
571
572 .macro tl0_intr_level
573 INTR_LEVEL(0)
574 .endm
575
576 .macro intr_vector
577 ldxa [%g0] ASI_INTR_RECEIVE, %g1
578 andcc %g1, IRSR_BUSY, %g0
579 bnz,a,pt %xcc, intr_vector
580 nop
581 sir
582 .align 32
583 .endm
584
585 .macro tl0_immu_miss
586 /*
587 * Load the virtual page number and context from the tag access
588 * register. We ignore the context.
589 */
590 wr %g0, ASI_IMMU, %asi
591 ldxa [%g0 + AA_IMMU_TAR] %asi, %g1
592
593 /*
594 * Initialize the page size walker.
595 */
596 mov TS_MIN, %g2
597
598 /*
599 * Loop over all supported page sizes.
600 */
601
602 /*
603 * Compute the page shift for the page size we are currently looking
604 * for.
605 */
606 1: add %g2, %g2, %g3
607 add %g3, %g2, %g3
608 add %g3, PAGE_SHIFT, %g3
609
610 /*
611 * Extract the virtual page number from the contents of the tag
612 * access register.
613 */
614 srlx %g1, %g3, %g3
615
616 /*
617 * Compute the tte bucket address.
618 */
619 ldxa [%g0 + AA_IMMU_TSB] %asi, %g5
620 and %g3, TSB_BUCKET_MASK, %g4
621 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
622 add %g4, %g5, %g4
623
624 /*
625 * Compute the tte tag target.
626 */
627 sllx %g3, TV_SIZE_BITS, %g3
628 or %g3, %g2, %g3
629
630 /*
631 * Loop over the ttes in this bucket
632 */
633
634 /*
635 * Load the tte. Note that this instruction may fault, clobbering
636 * the contents of the tag access register, %g5, %g6, and %g7. We
637 * do not use %g5, and %g6 and %g7 are not used until this instruction
638 * completes successfully.
639 */
640 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
641
642 /*
643 * Check that its valid and executable and that the tte tags match.
644 */
645 brgez,pn %g7, 3f
646 andcc %g7, TD_EXEC, %g0
647 bz,pn %xcc, 3f
648 cmp %g3, %g6
649 bne,pn %xcc, 3f
650 EMPTY
651
652 /*
653 * We matched a tte, load the tlb.
654 */
655
656 /*
657 * Set the reference bit, if it's currently clear.
658 */
659 andcc %g7, TD_REF, %g0
660 bz,a,pn %xcc, tl0_immu_miss_set_ref
661 nop
662
663 /*
664 * Load the tte tag and data into the tlb and retry the instruction.
665 */
666 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
667 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
668 retry
669
670 /*
671 * Advance to the next tte in this bucket, and check the low bits
672 * of the bucket pointer to see if we've finished the bucket.
673 */
674 3: add %g4, 1 << TTE_SHIFT, %g4
675 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
676 bnz,pt %xcc, 2b
677 EMPTY
678
679 /*
680 * See if we just checked the largest page size, and advance to the
681 * next one if not.
682 */
683 cmp %g2, TS_MAX
684 bne,pt %xcc, 1b
685 add %g2, 1, %g2
686
687 /*
688 * Not in user tsb, call c code.
689 */
690 ba,a %xcc, tl0_immu_miss_trap
691 .align 128
692 .endm
693
694 ENTRY(tl0_immu_miss_set_ref)
695 /*
696 * Set the reference bit.
697 */
698 TTE_SET_REF(%g4, %g2, %g3)
699
700 /*
701 * May have become invalid during casxa, in which case start over.
702 */
703 brgez,pn %g2, 1f
704 nop
705
706 /*
707 * Load the tte tag and data into the tlb and retry the instruction.
708 */
709 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
710 stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
711 1: retry
712 END(tl0_immu_miss_set_ref)
713
714 ENTRY(tl0_immu_miss_trap)
715 /*
716 * Put back the contents of the tag access register, in case we
717 * faulted.
718 */
719 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
720 membar #Sync
721
722 /*
723 * Switch to alternate globals.
724 */
725 wrpr %g0, PSTATE_ALT, %pstate
726
727 /*
728 * Reload the tag access register.
729 */
730 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
731
732 /*
733 * Save the tag access register, and call common trap code.
734 */
735 tl0_split
736 clr %o1
737 set trap, %o2
738 mov %g2, %o3
739 ba %xcc, tl0_utrap
740 mov T_INSTRUCTION_MISS, %o0
741 END(tl0_immu_miss_trap)
742
743 .macro tl0_dmmu_miss
744 /*
745 * Load the virtual page number and context from the tag access
746 * register. We ignore the context.
747 */
748 wr %g0, ASI_DMMU, %asi
749 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
750
751 /*
752 * Initialize the page size walker.
753 */
754 tl1_dmmu_miss_user:
755 mov TS_MIN, %g2
756
757 /*
758 * Loop over all supported page sizes.
759 */
760
761 /*
762 * Compute the page shift for the page size we are currently looking
763 * for.
764 */
765 1: add %g2, %g2, %g3
766 add %g3, %g2, %g3
767 add %g3, PAGE_SHIFT, %g3
768
769 /*
770 * Extract the virtual page number from the contents of the tag
771 * access register.
772 */
773 srlx %g1, %g3, %g3
774
775 /*
776 * Compute the tte bucket address.
777 */
778 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
779 and %g3, TSB_BUCKET_MASK, %g4
780 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
781 add %g4, %g5, %g4
782
783 /*
784 * Compute the tte tag target.
785 */
786 sllx %g3, TV_SIZE_BITS, %g3
787 or %g3, %g2, %g3
788
789 /*
790 * Loop over the ttes in this bucket
791 */
792
793 /*
794 * Load the tte. Note that this instruction may fault, clobbering
795 * the contents of the tag access register, %g5, %g6, and %g7. We
796 * do not use %g5, and %g6 and %g7 are not used until this instruction
797 * completes successfully.
798 */
799 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
800
801 /*
802 * Check that its valid and that the virtual page numbers match.
803 */
804 brgez,pn %g7, 3f
805 cmp %g3, %g6
806 bne,pn %xcc, 3f
807 EMPTY
808
809 /*
810 * We matched a tte, load the tlb.
811 */
812
813 /*
814 * Set the reference bit, if it's currently clear.
815 */
816 andcc %g7, TD_REF, %g0
817 bz,a,pn %xcc, tl0_dmmu_miss_set_ref
818 nop
819
820 /*
821 * Load the tte tag and data into the tlb and retry the instruction.
822 */
823 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
824 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
825 retry
826
827 /*
828 * Advance to the next tte in this bucket, and check the low bits
829 * of the bucket pointer to see if we've finished the bucket.
830 */
831 3: add %g4, 1 << TTE_SHIFT, %g4
832 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
833 bnz,pt %xcc, 2b
834 EMPTY
835
836 /*
837 * See if we just checked the largest page size, and advance to the
838 * next one if not.
839 */
840 cmp %g2, TS_MAX
841 bne,pt %xcc, 1b
842 add %g2, 1, %g2
843
844 /*
845 * Not in user tsb, call c code.
846 */
847 ba,a %xcc, tl0_dmmu_miss_trap
848 .align 128
849 .endm
850
851 ENTRY(tl0_dmmu_miss_set_ref)
852 /*
853 * Set the reference bit.
854 */
855 TTE_SET_REF(%g4, %g2, %g3)
856
857 /*
858 * May have become invalid during casxa, in which case start over.
859 */
860 brgez,pn %g2, 1f
861 nop
862
863 /*
864 * Load the tte tag and data into the tlb and retry the instruction.
865 */
866 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
867 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
868 1: retry
869 END(tl0_dmmu_miss_set_ref)
870
871 ENTRY(tl0_dmmu_miss_trap)
872 /*
873 * Put back the contents of the tag access register, in case we
874 * faulted.
875 */
876 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
877 membar #Sync
878
879 /*
880 * Switch to alternate globals.
881 */
882 wrpr %g0, PSTATE_ALT, %pstate
883
884 /*
885 * Check if we actually came from the kernel.
886 */
887 rdpr %tl, %g1
888 cmp %g1, 1
889 bgt,a,pn %xcc, 1f
890 nop
891
892 /*
893 * Reload the tag access register.
894 */
895 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
896
897 /*
898 * Save the tag access register and call common trap code.
899 */
900 tl0_split
901 clr %o1
902 set trap, %o2
903 mov %g2, %o3
904 ba %xcc, tl0_utrap
905 mov T_DATA_MISS, %o0
906
907 /*
908 * Handle faults during window spill/fill.
909 */
910 1: RESUME_SPILLFILL_MMU
911
912 /*
913 * Reload the tag access register.
914 */
915 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
916
917 tl1_split
918 clr %o1
919 set trap, %o2
920 mov %g2, %o3
921 ba %xcc, tl1_trap
922 mov T_DATA_MISS | T_KERNEL, %o0
923 END(tl0_dmmu_miss_trap)
924
925 .macro tl0_dmmu_prot
926 ba,a %xcc, tl0_dmmu_prot_1
927 nop
928 .align 128
929 .endm
930
931 ENTRY(tl0_dmmu_prot_1)
932 /*
933 * Load the virtual page number and context from the tag access
934 * register. We ignore the context.
935 */
936 wr %g0, ASI_DMMU, %asi
937 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
938
939 /*
940 * Initialize the page size walker.
941 */
942 tl1_dmmu_prot_user:
943 mov TS_MIN, %g2
944
945 /*
946 * Loop over all supported page sizes.
947 */
948
949 /*
950 * Compute the page shift for the page size we are currently looking
951 * for.
952 */
953 1: add %g2, %g2, %g3
954 add %g3, %g2, %g3
955 add %g3, PAGE_SHIFT, %g3
956
957 /*
958 * Extract the virtual page number from the contents of the tag
959 * access register.
960 */
961 srlx %g1, %g3, %g3
962
963 /*
964 * Compute the tte bucket address.
965 */
966 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
967 and %g3, TSB_BUCKET_MASK, %g4
968 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
969 add %g4, %g5, %g4
970
971 /*
972 * Compute the tte tag target.
973 */
974 sllx %g3, TV_SIZE_BITS, %g3
975 or %g3, %g2, %g3
976
977 /*
978 * Loop over the ttes in this bucket
979 */
980
981 /*
982 * Load the tte. Note that this instruction may fault, clobbering
983 * the contents of the tag access register, %g5, %g6, and %g7. We
984 * do not use %g5, and %g6 and %g7 are not used until this instruction
985 * completes successfully.
986 */
987 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
988
989 /*
990 * Check that its valid and writable and that the virtual page
991 * numbers match.
992 */
993 brgez,pn %g7, 4f
994 andcc %g7, TD_SW, %g0
995 bz,pn %xcc, 4f
996 cmp %g3, %g6
997 bne,pn %xcc, 4f
998 nop
999
1000 /*
1001 * Set the hardware write bit.
1002 */
1003 TTE_SET_W(%g4, %g2, %g3)
1004
1005 /*
1006 * Delete the old TLB entry and clear the sfsr.
1007 */
1008 srlx %g1, PAGE_SHIFT, %g3
1009 sllx %g3, PAGE_SHIFT, %g3
1010 stxa %g0, [%g3] ASI_DMMU_DEMAP
1011 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1012 membar #Sync
1013
1014 /*
1015 * May have become invalid during casxa, in which case start over.
1016 */
1017 brgez,pn %g2, 3f
1018 or %g2, TD_W, %g2
1019
1020 /*
1021 * Load the tte data into the tlb and retry the instruction.
1022 */
1023 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1024 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
1025 3: retry
1026
1027 /*
1028 * Check the low bits to see if we've finished the bucket.
1029 */
1030 4: add %g4, 1 << TTE_SHIFT, %g4
1031 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1032 bnz,pt %xcc, 2b
1033 EMPTY
1034
1035 /*
1036 * See if we just checked the largest page size, and advance to the
1037 * next one if not.
1038 */
1039 cmp %g2, TS_MAX
1040 bne,pt %xcc, 1b
1041 add %g2, 1, %g2
1042
1043 /*
1044 * Not in user tsb, call c code.
1045 */
1046 ba,a %xcc, tl0_dmmu_prot_trap
1047 nop
1048 END(tl0_dmmu_prot_1)
1049
1050 ENTRY(tl0_dmmu_prot_trap)
1051 /*
1052 * Put back the contents of the tag access register, in case we
1053 * faulted.
1054 */
1055 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1056 membar #Sync
1057
1058 /*
1059 * Switch to alternate globals.
1060 */
1061 wrpr %g0, PSTATE_ALT, %pstate
1062
1063 /*
1064 * Check if we actually came from the kernel.
1065 */
1066 rdpr %tl, %g1
1067 cmp %g1, 1
1068 bgt,a,pn %xcc, 1f
1069 nop
1070
1071 /*
1072 * Load the tar, sfar and sfsr.
1073 */
1074 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1075 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1076 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1077 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1078 membar #Sync
1079
1080 /*
1081 * Save the mmu registers and call common trap code.
1082 */
1083 tl0_split
1084 clr %o1
1085 set trap, %o2
1086 mov %g2, %o3
1087 mov %g3, %o4
1088 mov %g4, %o5
1089 ba %xcc, tl0_utrap
1090 mov T_DATA_PROTECTION, %o0
1091
1092 /*
1093 * Handle faults during window spill/fill.
1094 */
1095 1: RESUME_SPILLFILL_MMU_CLR_SFSR
1096
1097 /*
1098 * Load the sfar, sfsr and tar. Clear the sfsr.
1099 */
1100 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1101 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1102 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1103 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1104 membar #Sync
1105
1106 tl1_split
1107 clr %o1
1108 set trap, %o2
1109 mov %g2, %o3
1110 mov %g3, %o4
1111 mov %g4, %o5
1112 ba %xcc, tl1_trap
1113 mov T_DATA_PROTECTION | T_KERNEL, %o0
1114 END(tl0_dmmu_prot_trap)
1115
1116 .macro tl0_spill_0_n
1117 wr %g0, ASI_AIUP, %asi
1118 SPILL(stxa, %sp + SPOFF, 8, %asi)
1119 saved
1120 retry
1121 .align 32
1122 RSF_TRAP(T_SPILL)
1123 RSF_TRAP(T_SPILL)
1124 .endm
1125
1126 .macro tl0_spill_1_n
1127 wr %g0, ASI_AIUP, %asi
1128 SPILL(stwa, %sp, 4, %asi)
1129 saved
1130 retry
1131 .align 32
1132 RSF_TRAP(T_SPILL)
1133 RSF_TRAP(T_SPILL)
1134 .endm
1135
1136 .macro tl0_fill_0_n
1137 wr %g0, ASI_AIUP, %asi
1138 FILL(ldxa, %sp + SPOFF, 8, %asi)
1139 restored
1140 retry
1141 .align 32
1142 RSF_TRAP(T_FILL)
1143 RSF_TRAP(T_FILL)
1144 .endm
1145
1146 .macro tl0_fill_1_n
1147 wr %g0, ASI_AIUP, %asi
1148 FILL(lduwa, %sp, 4, %asi)
1149 restored
1150 retry
1151 .align 32
1152 RSF_TRAP(T_FILL)
1153 RSF_TRAP(T_FILL)
1154 .endm
1155
1156 ENTRY(tl0_sftrap)
1157 rdpr %tstate, %g1
1158 and %g1, TSTATE_CWP_MASK, %g1
1159 wrpr %g1, 0, %cwp
1160 tl0_split
1161 clr %o1
1162 set trap, %o2
1163 ba %xcc, tl0_trap
1164 mov %g2, %o0
1165 END(tl0_sftrap)
1166
1167 .macro tl0_spill_bad count
1168 .rept \count
1169 sir
1170 .align 128
1171 .endr
1172 .endm
1173
1174 .macro tl0_fill_bad count
1175 .rept \count
1176 sir
1177 .align 128
1178 .endr
1179 .endm
1180
1181 .macro tl0_syscall
1182 tl0_split
1183 clr %o1
1184 set syscall, %o2
1185 ba %xcc, tl0_trap
1186 mov T_SYSCALL, %o0
1187 .align 32
1188 .endm
1189
1190 .macro tl0_fp_restore
1191 ba,a %xcc, tl0_fp_restore
1192 nop
1193 .align 32
1194 .endm
1195
1196 ENTRY(tl0_fp_restore)
1197 ldx [PCB_REG + PCB_FLAGS], %g1
1198 andn %g1, PCB_FEF, %g1
1199 stx %g1, [PCB_REG + PCB_FLAGS]
1200
1201 wr %g0, FPRS_FEF, %fprs
1202 wr %g0, ASI_BLK_S, %asi
1203 ldda [PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1204 ldda [PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1205 ldda [PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1206 ldda [PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1207 membar #Sync
1208 done
1209 END(tl0_fp_restore)
1210
1211 .macro tl1_insn_excptn
1212 wrpr %g0, PSTATE_ALT, %pstate
1213 wr %g0, ASI_IMMU, %asi
1214 rdpr %tpc, %g3
1215 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
1216 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
1217 membar #Sync
1218 ba %xcc, tl1_insn_exceptn_trap
1219 mov T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1220 .align 32
1221 .endm
1222
1223 ENTRY(tl1_insn_exceptn_trap)
1224 tl1_split
1225 clr %o1
1226 set trap, %o2
1227 mov %g3, %o4
1228 mov %g4, %o5
1229 ba %xcc, tl1_trap
1230 mov %g2, %o0
1231 END(tl1_insn_exceptn_trap)
1232
1233 .macro tl1_fp_disabled
1234 ba,a %xcc, tl1_fp_disabled_1
1235 nop
1236 .align 32
1237 .endm
1238
1239 ENTRY(tl1_fp_disabled_1)
1240 rdpr %tpc, %g1
1241 set fpu_fault_begin, %g2
1242 sub %g1, %g2, %g1
1243 cmp %g1, fpu_fault_size
1244 bgeu,a,pn %xcc, 1f
1245 nop
1246
1247 wr %g0, FPRS_FEF, %fprs
1248 wr %g0, ASI_BLK_S, %asi
1249 ldda [PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1250 ldda [PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1251 ldda [PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1252 ldda [PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1253 membar #Sync
1254 retry
1255
1256 1: tl1_split
1257 clr %o1
1258 set trap, %o2
1259 ba %xcc, tl1_trap
1260 mov T_FP_DISABLED | T_KERNEL, %o0
1261 END(tl1_fp_disabled_1)
1262
1263 .macro tl1_data_excptn
1264 wrpr %g0, PSTATE_ALT, %pstate
1265 ba,a %xcc, tl1_data_excptn_trap
1266 nop
1267 .align 32
1268 .endm
1269
1270 ENTRY(tl1_data_excptn_trap)
1271 RESUME_SPILLFILL_MMU_CLR_SFSR
1272 ba %xcc, tl1_sfsr_trap
1273 mov T_DATA_EXCEPTION | T_KERNEL, %g2
1274 END(tl1_data_excptn_trap)
1275
1276 .macro tl1_align
1277 ba,a %xcc, tl1_align_trap
1278 nop
1279 .align 32
1280 .endm
1281
1282 ENTRY(tl1_align_trap)
1283 RESUME_SPILLFILL_ALIGN
1284 ba %xcc, tl1_sfsr_trap
1285 mov T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1286 END(tl1_data_excptn_trap)
1287
1288 ENTRY(tl1_sfsr_trap)
1289 wr %g0, ASI_DMMU, %asi
1290 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1291 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1292 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1293 membar #Sync
1294
1295 tl1_split
1296 clr %o1
1297 set trap, %o2
1298 mov %g3, %o4
1299 mov %g4, %o5
1300 ba %xcc, tl1_trap
1301 mov %g2, %o0
1302 END(tl1_sfsr_trap)
1303
1304 .macro tl1_intr level, mask
1305 tl1_split
1306 set \mask, %o1
1307 ba %xcc, tl1_intr
1308 mov \level, %o0
1309 .align 32
1310 .endm
1311
1312 .macro tl1_intr_level
1313 INTR_LEVEL(1)
1314 .endm
1315
1316 .macro tl1_immu_miss
1317 /*
1318 * Load the context and the virtual page number from the tag access
1319 * register. We ignore the context.
1320 */
1321 wr %g0, ASI_IMMU, %asi
1322 ldxa [%g0 + AA_IMMU_TAR] %asi, %g5
1323
1324 /*
1325 * Compute the address of the tte. The tsb mask and address of the
1326 * tsb are patched at startup.
1327 */
1328 .globl tl1_immu_miss_patch_1
1329 tl1_immu_miss_patch_1:
1330 sethi %hi(TSB_KERNEL_MASK), %g6
1331 or %g6, %lo(TSB_KERNEL_MASK), %g6
1332 sethi %hi(TSB_KERNEL), %g7
1333
1334 srlx %g5, TAR_VPN_SHIFT, %g5
1335 and %g5, %g6, %g6
1336 sllx %g6, TTE_SHIFT, %g6
1337 add %g6, %g7, %g6
1338
1339 /*
1340 * Load the tte.
1341 */
1342 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1343
1344 /*
1345 * Check that its valid and executable and that the virtual page
1346 * numbers match.
1347 */
1348 brgez,pn %g7, tl1_immu_miss_trap
1349 andcc %g7, TD_EXEC, %g0
1350 bz,pn %xcc, tl1_immu_miss_trap
1351 srlx %g6, TV_SIZE_BITS, %g6
1352 cmp %g5, %g6
1353 bne,pn %xcc, tl1_immu_miss_trap
1354 EMPTY
1355
1356 /*
1357 * Set the reference bit if its currently clear.
1358 */
1359 andcc %g7, TD_REF, %g0
1360 bz,a,pn %xcc, tl1_immu_miss_set_ref
1361 nop
1362
1363 /*
1364 * Load the tte data into the TLB and retry the instruction.
1365 */
1366 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
1367 retry
1368 .align 128
1369 .endm
1370
1371 ENTRY(tl1_immu_miss_set_ref)
1372 /*
1373 * Recompute the tte address, which we clobbered loading the tte. The
1374 * tsb mask and address of the tsb are patched at startup.
1375 */
1376 .globl tl1_immu_miss_patch_2
1377 tl1_immu_miss_patch_2:
1378 sethi %hi(TSB_KERNEL_MASK), %g6
1379 or %g6, %lo(TSB_KERNEL_MASK), %g6
1380 sethi %hi(TSB_KERNEL), %g7
1381
1382 and %g5, %g6, %g5
1383 sllx %g5, TTE_SHIFT, %g5
1384 add %g5, %g7, %g5
1385
1386 /*
1387 * Set the reference bit.
1388 */
1389 TTE_SET_REF(%g5, %g6, %g7)
1390
1391 /*
1392 * May have become invalid during casxa, in which case start over.
1393 */
1394 brgez,pn %g6, 1f
1395 nop
1396
1397 /*
1398 * Load the tte data into the TLB and retry the instruction.
1399 */
1400 stxa %g6, [%g0] ASI_ITLB_DATA_IN_REG
1401 1: retry
1402 END(tl1_immu_miss_set_ref)
1403
1404 ENTRY(tl1_immu_miss_trap)
1405 /*
1406 * Switch to alternate globals.
1407 */
1408 wrpr %g0, PSTATE_ALT, %pstate
1409
1410 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
1411
1412 tl1_split
1413 clr %o1
1414 set trap, %o2
1415 mov %g2, %o3
1416 ba %xcc, tl1_trap
1417 mov T_INSTRUCTION_MISS | T_KERNEL, %o0
1418 END(tl1_immu_miss_trap)
1419
1420 .macro tl1_dmmu_miss
1421 /*
1422 * Load the context and the virtual page number from the tag access
1423 * register.
1424 */
1425 wr %g0, ASI_DMMU, %asi
1426 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1427
1428 /*
1429 * Extract the context from the contents of the tag access register.
1430 * If its non-zero this is a fault on a user address. Note that the
1431 * faulting address is passed in %g1.
1432 */
1433 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1434 brnz,a,pn %g6, tl1_dmmu_miss_user
1435 mov %g5, %g1
1436
1437 /*
1438 * Check for the direct mapped physical region. These addresses have
1439 * the high bit set so they are negative.
1440 */
1441 brlz,pn %g5, tl1_dmmu_miss_direct
1442 EMPTY
1443
1444 /*
1445 * Compute the address of the tte. The tsb mask and address of the
1446 * tsb are patched at startup.
1447 */
1448 .globl tl1_dmmu_miss_patch_1
1449 tl1_dmmu_miss_patch_1:
1450 sethi %hi(TSB_KERNEL_MASK), %g6
1451 or %g6, %lo(TSB_KERNEL_MASK), %g6
1452 sethi %hi(TSB_KERNEL), %g7
1453
1454 srlx %g5, TAR_VPN_SHIFT, %g5
1455 and %g5, %g6, %g6
1456 sllx %g6, TTE_SHIFT, %g6
1457 add %g6, %g7, %g6
1458
1459 /*
1460 * Load the tte.
1461 */
1462 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1463
1464 /*
1465 * Check that its valid and that the virtual page numbers match.
1466 */
1467 brgez,pn %g7, tl1_dmmu_miss_trap
1468 srlx %g6, TV_SIZE_BITS, %g6
1469 cmp %g5, %g6
1470 bne,pn %xcc, tl1_dmmu_miss_trap
1471 EMPTY
1472
1473 /*
1474 * Set the reference bit if its currently clear.
1475 */
1476 andcc %g7, TD_REF, %g0
1477 bz,a,pt %xcc, tl1_dmmu_miss_set_ref
1478 nop
1479
1480 /*
1481 * Load the tte data into the TLB and retry the instruction.
1482 */
1483 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
1484 retry
1485 .align 128
1486 .endm
1487
1488 ENTRY(tl1_dmmu_miss_set_ref)
1489 /*
1490 * Recompute the tte address, which we clobbered loading the tte. The
1491 * tsb mask and address of the tsb are patched at startup.
1492 */
1493 .globl tl1_dmmu_miss_patch_2
1494 tl1_dmmu_miss_patch_2:
1495 sethi %hi(TSB_KERNEL_MASK), %g6
1496 or %g6, %lo(TSB_KERNEL_MASK), %g6
1497 sethi %hi(TSB_KERNEL), %g7
1498
1499 and %g5, %g6, %g5
1500 sllx %g5, TTE_SHIFT, %g5
1501 add %g5, %g7, %g5
1502
1503 /*
1504 * Set the reference bit.
1505 */
1506 TTE_SET_REF(%g5, %g6, %g7)
1507
1508 /*
1509 * May have become invalid during casxa, in which case start over.
1510 */
1511 brgez,pn %g6, 1f
1512 nop
1513
1514 /*
1515 * Load the tte data into the TLB and retry the instruction.
1516 */
1517 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1518 1: retry
1519 END(tl1_dmmu_miss_set_ref)
1520
1521 ENTRY(tl1_dmmu_miss_trap)
1522 /*
1523 * Switch to alternate globals.
1524 */
1525 wrpr %g0, PSTATE_ALT, %pstate
1526
1527 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1528
1529 KSTACK_CHECK
1530
1531 tl1_split
1532 clr %o1
1533 set trap, %o2
1534 mov %g2, %o3
1535 ba %xcc, tl1_trap
1536 mov T_DATA_MISS | T_KERNEL, %o0
1537 END(tl1_dmmu_miss_trap)
1538
1539 ENTRY(tl1_dmmu_miss_direct)
1540 /*
1541 * Mask off the high bits of the virtual address to get the physical
1542 * address, and or in the tte bits. The virtual address bits that
1543 * correspond to the tte valid and page size bits are left set, so
1544 * they don't have to be included in the tte bits below. We know they
1545 * are set because the virtual address is in the upper va hole.
1546 */
1547 setx TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1548 and %g5, %g6, %g5
1549 or %g5, TD_CP | TD_CV | TD_W, %g5
1550
1551 /*
1552 * Load the tte data into the TLB and retry the instruction.
1553 */
1554 stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
1555 retry
1556 END(tl1_dmmu_miss_direct)
1557
1558 .macro tl1_dmmu_prot
1559 ba,a %xcc, tl1_dmmu_prot_1
1560 nop
1561 .align 128
1562 .endm
1563
1564 ENTRY(tl1_dmmu_prot_1)
1565 /*
1566 * Load the context and the virtual page number from the tag access
1567 * register.
1568 */
1569 wr %g0, ASI_DMMU, %asi
1570 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1571
1572 /*
1573 * Extract the context from the contents of the tag access register.
1574 * If its non-zero this is a fault on a user address. Note that the
1575 * faulting address is passed in %g1.
1576 */
1577 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1578 brnz,a,pn %g6, tl1_dmmu_prot_user
1579 mov %g5, %g1
1580
1581 /*
1582 * Compute the address of the tte. The tsb mask and address of the
1583 * tsb are patched at startup.
1584 */
1585 .globl tl1_dmmu_prot_patch_1
1586 tl1_dmmu_prot_patch_1:
1587 sethi %hi(TSB_KERNEL_MASK), %g6
1588 or %g6, %lo(TSB_KERNEL_MASK), %g6
1589 sethi %hi(TSB_KERNEL), %g7
1590
1591 srlx %g5, TAR_VPN_SHIFT, %g5
1592 and %g5, %g6, %g6
1593 sllx %g6, TTE_SHIFT, %g6
1594 add %g6, %g7, %g6
1595
1596 /*
1597 * Load the tte.
1598 */
1599 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1600
1601 /*
1602 * Check that its valid and writeable and that the virtual page
1603 * numbers match.
1604 */
1605 brgez,pn %g7, tl1_dmmu_prot_trap
1606 andcc %g7, TD_SW, %g0
1607 bz,pn %xcc, tl1_dmmu_prot_trap
1608 srlx %g6, TV_SIZE_BITS, %g6
1609 cmp %g5, %g6
1610 bne,pn %xcc, tl1_dmmu_prot_trap
1611 EMPTY
1612
1613 /*
1614 * Delete the old TLB entry and clear the sfsr.
1615 */
1616 sllx %g5, TAR_VPN_SHIFT, %g6
1617 or %g6, TLB_DEMAP_NUCLEUS, %g6
1618 stxa %g0, [%g6] ASI_DMMU_DEMAP
1619 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1620 membar #Sync
1621
1622 /*
1623 * Recompute the tte address, which we clobbered loading the tte. The
1624 * tsb mask and address of the tsb are patched at startup.
1625 */
1626 .globl tl1_dmmu_prot_patch_2
1627 tl1_dmmu_prot_patch_2:
1628 sethi %hi(TSB_KERNEL_MASK), %g6
1629 or %g6, %lo(TSB_KERNEL_MASK), %g6
1630 sethi %hi(TSB_KERNEL), %g7
1631
1632 and %g5, %g6, %g5
1633 sllx %g5, TTE_SHIFT, %g5
1634 add %g5, %g7, %g5
1635
1636 /*
1637 * Set the hardware write bit.
1638 */
1639 TTE_SET_W(%g5, %g6, %g7)
1640
1641 /*
1642 * May have become invalid during casxa, in which case start over.
1643 */
1644 brgez,pn %g6, 1f
1645 or %g6, TD_W, %g6
1646
1647 /*
1648 * Load the tte data into the TLB and retry the instruction.
1649 */
1650 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1651 1: retry
1652 END(tl1_dmmu_prot_1)
1653
1654 ENTRY(tl1_dmmu_prot_trap)
1655 /*
1656 * Switch to alternate globals.
1657 */
1658 wrpr %g0, PSTATE_ALT, %pstate
1659
1660 /*
1661 * Load the sfar, sfsr and tar. Clear the sfsr.
1662 */
1663 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1664 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1665 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1666 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1667 membar #Sync
1668
1669 tl1_split
1670 clr %o1
1671 set trap, %o2
1672 mov %g2, %o3
1673 mov %g3, %o4
1674 mov %g4, %o5
1675 ba %xcc, tl1_trap
1676 mov T_DATA_PROTECTION | T_KERNEL, %o0
1677 END(tl1_dmmu_prot_trap)
1678
1679 .macro tl1_spill_0_n
1680 SPILL(stx, %sp + SPOFF, 8, EMPTY)
1681 saved
1682 retry
1683 .align 32
1684 RSF_FATAL(T_SPILL)
1685 RSF_FATAL(T_SPILL)
1686 .endm
1687
1688 .macro tl1_spill_2_n
1689 wr %g0, ASI_AIUP, %asi
1690 SPILL(stxa, %sp + SPOFF, 8, %asi)
1691 saved
1692 retry
1693 .align 32
1694 RSF_SPILL_TOPCB
1695 RSF_SPILL_TOPCB
1696 .endm
1697
1698 .macro tl1_spill_3_n
1699 wr %g0, ASI_AIUP, %asi
1700 SPILL(stwa, %sp, 4, %asi)
1701 saved
1702 retry
1703 .align 32
1704 RSF_SPILL_TOPCB
1705 RSF_SPILL_TOPCB
1706 .endm
1707
1708 .macro tl1_spill_0_o
1709 wr %g0, ASI_AIUP, %asi
1710 SPILL(stxa, %sp + SPOFF, 8, %asi)
1711 saved
1712 retry
1713 .align 32
1714 RSF_SPILL_TOPCB
1715 RSF_SPILL_TOPCB
1716 .endm
1717
1718 .macro tl1_spill_1_o
1719 wr %g0, ASI_AIUP, %asi
1720 SPILL(stwa, %sp, 4, %asi)
1721 saved
1722 retry
1723 .align 32
1724 RSF_SPILL_TOPCB
1725 RSF_SPILL_TOPCB
1726 .endm
1727
1728 .macro tl1_spill_2_o
1729 RSF_SPILL_TOPCB
1730 .align 128
1731 .endm
1732
1733 .macro tl1_fill_0_n
1734 FILL(ldx, %sp + SPOFF, 8, EMPTY)
1735 restored
1736 retry
1737 .align 32
1738 RSF_FATAL(T_FILL)
1739 RSF_FATAL(T_FILL)
1740 .endm
1741
1742 .macro tl1_fill_2_n
1743 wr %g0, ASI_AIUP, %asi
1744 FILL(ldxa, %sp + SPOFF, 8, %asi)
1745 restored
1746 retry
1747 .align 32
1748 RSF_FILL_MAGIC
1749 RSF_FILL_MAGIC
1750 .endm
1751
1752 .macro tl1_fill_3_n
1753 wr %g0, ASI_AIUP, %asi
1754 FILL(lduwa, %sp, 4, %asi)
1755 restored
1756 retry
1757 .align 32
1758 RSF_FILL_MAGIC
1759 RSF_FILL_MAGIC
1760 .endm
1761
1762 /*
1763 * This is used to spill windows that are still occupied with user
1764 * data on kernel entry to the pcb.
1765 */
1766 ENTRY(tl1_spill_topcb)
1767 wrpr %g0, PSTATE_ALT, %pstate
1768
1769 /* Free some globals for our use. */
1770 dec 24, ASP_REG
1771 stx %g1, [ASP_REG + 0]
1772 stx %g2, [ASP_REG + 8]
1773 stx %g3, [ASP_REG + 16]
1774
1775 ldx [PCB_REG + PCB_NSAVED], %g1
1776
1777 sllx %g1, PTR_SHIFT, %g2
1778 add %g2, PCB_REG, %g2
1779 stx %sp, [%g2 + PCB_RWSP]
1780
1781 sllx %g1, RW_SHIFT, %g2
1782 add %g2, PCB_REG, %g2
1783 SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1784
1785 inc %g1
1786 stx %g1, [PCB_REG + PCB_NSAVED]
1787
1788 #if KTR_COMPILE & KTR_TRAP
1789 CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1790 , %g1, %g2, %g3, 7, 8, 9)
1791 rdpr %tpc, %g2
1792 stx %g2, [%g1 + KTR_PARM1]
1793 rdpr %tnpc, %g2
1794 stx %g2, [%g1 + KTR_PARM2]
1795 stx %sp, [%g1 + KTR_PARM3]
1796 ldx [PCB_REG + PCB_NSAVED], %g2
1797 stx %g2, [%g1 + KTR_PARM4]
1798 9:
1799 #endif
1800
1801 saved
1802
1803 ldx [ASP_REG + 16], %g3
1804 ldx [ASP_REG + 8], %g2
1805 ldx [ASP_REG + 0], %g1
1806 inc 24, ASP_REG
1807 retry
1808 END(tl1_spill_topcb)
1809
1810 .macro tl1_spill_bad count
1811 .rept \count
1812 sir
1813 .align 128
1814 .endr
1815 .endm
1816
1817 .macro tl1_fill_bad count
1818 .rept \count
1819 sir
1820 .align 128
1821 .endr
1822 .endm
1823
1824 .macro tl1_soft count
1825 .rept \count
1826 tl1_gen T_SOFT | T_KERNEL
1827 .endr
1828 .endm
1829
1830 .sect .trap
1831 .globl tl_trap_begin
1832 tl_trap_begin:
1833 nop
1834
1835 .align 0x8000
1836 .globl tl0_base
1837
1838 tl0_base:
1839 tl0_reserved 8 ! 0x0-0x7
1840 tl0_insn_excptn:
1841 tl0_insn_excptn ! 0x8
1842 tl0_reserved 1 ! 0x9
1843 tl0_insn_error:
1844 tl0_gen T_INSTRUCTION_ERROR ! 0xa
1845 tl0_reserved 5 ! 0xb-0xf
1846 tl0_insn_illegal:
1847 tl0_gen T_ILLEGAL_INSTRUCTION ! 0x10
1848 tl0_priv_opcode:
1849 tl0_gen T_PRIVILEGED_OPCODE ! 0x11
1850 tl0_reserved 14 ! 0x12-0x1f
1851 tl0_fp_disabled:
1852 tl0_gen T_FP_DISABLED ! 0x20
1853 tl0_fp_ieee:
1854 tl0_gen T_FP_EXCEPTION_IEEE_754 ! 0x21
1855 tl0_fp_other:
1856 tl0_gen T_FP_EXCEPTION_OTHER ! 0x22
1857 tl0_tag_ovflw:
1858 tl0_gen T_TAG_OVERFLOW ! 0x23
1859 tl0_clean_window:
1860 clean_window ! 0x24
1861 tl0_divide:
1862 tl0_gen T_DIVISION_BY_ZERO ! 0x28
1863 tl0_reserved 7 ! 0x29-0x2f
1864 tl0_data_excptn:
1865 tl0_data_excptn ! 0x30
1866 tl0_reserved 1 ! 0x31
1867 tl0_data_error:
1868 tl0_gen T_DATA_ERROR ! 0x32
1869 tl0_reserved 1 ! 0x33
1870 tl0_align:
1871 tl0_align ! 0x34
1872 tl0_align_lddf:
1873 tl0_gen T_RESERVED ! 0x35
1874 tl0_align_stdf:
1875 tl0_gen T_RESERVED ! 0x36
1876 tl0_priv_action:
1877 tl0_gen T_PRIVILEGED_ACTION ! 0x37
1878 tl0_reserved 9 ! 0x38-0x40
1879 tl0_intr_level:
1880 tl0_intr_level ! 0x41-0x4f
1881 tl0_reserved 16 ! 0x50-0x5f
1882 tl0_intr_vector:
1883 intr_vector ! 0x60
1884 tl0_watch_phys:
1885 tl0_gen T_PA_WATCHPOINT ! 0x61
1886 tl0_watch_virt:
1887 tl0_gen T_VA_WATCHPOINT ! 0x62
1888 tl0_ecc:
1889 tl0_gen T_CORRECTED_ECC_ERROR ! 0x63
1890 tl0_immu_miss:
1891 tl0_immu_miss ! 0x64
1892 tl0_dmmu_miss:
1893 tl0_dmmu_miss ! 0x68
1894 tl0_dmmu_prot:
1895 tl0_dmmu_prot ! 0x6c
1896 tl0_reserved 16 ! 0x70-0x7f
1897 tl0_spill_0_n:
1898 tl0_spill_0_n ! 0x80
1899 tl0_spill_1_n:
1900 tl0_spill_1_n ! 0x84
1901 tl0_spill_bad 14 ! 0x88-0xbf
1902 tl0_fill_0_n:
1903 tl0_fill_0_n ! 0xc0
1904 tl0_fill_1_n:
1905 tl0_fill_1_n ! 0xc4
1906 tl0_fill_bad 14 ! 0xc8-0xff
1907 tl0_soft:
1908 tl0_gen T_SYSCALL ! 0x100
1909 tl0_gen T_BREAKPOINT ! 0x101
1910 tl0_gen T_DIVISION_BY_ZERO ! 0x102
1911 tl0_reserved 1 ! 0x103
1912 tl0_gen T_CLEAN_WINDOW ! 0x104
1913 tl0_gen T_RANGE_CHECK ! 0x105
1914 tl0_gen T_FIX_ALIGNMENT ! 0x106
1915 tl0_gen T_INTEGER_OVERFLOW ! 0x107
1916 tl0_gen T_SYSCALL ! 0x108
1917 tl0_gen T_SYSCALL ! 0x109
1918 tl0_fp_restore ! 0x10a
1919 tl0_reserved 5 ! 0x10b-0x10f
1920 tl0_gen T_TRAP_INSTRUCTION_16 ! 0x110
1921 tl0_gen T_TRAP_INSTRUCTION_17 ! 0x111
1922 tl0_gen T_TRAP_INSTRUCTION_18 ! 0x112
1923 tl0_gen T_TRAP_INSTRUCTION_19 ! 0x113
1924 tl0_gen T_TRAP_INSTRUCTION_20 ! 0x114
1925 tl0_gen T_TRAP_INSTRUCTION_21 ! 0x115
1926 tl0_gen T_TRAP_INSTRUCTION_22 ! 0x116
1927 tl0_gen T_TRAP_INSTRUCTION_23 ! 0x117
1928 tl0_gen T_TRAP_INSTRUCTION_24 ! 0x118
1929 tl0_gen T_TRAP_INSTRUCTION_25 ! 0x119
1930 tl0_gen T_TRAP_INSTRUCTION_26 ! 0x11a
1931 tl0_gen T_TRAP_INSTRUCTION_27 ! 0x11b
1932 tl0_gen T_TRAP_INSTRUCTION_28 ! 0x11c
1933 tl0_gen T_TRAP_INSTRUCTION_29 ! 0x11d
1934 tl0_gen T_TRAP_INSTRUCTION_30 ! 0x11e
1935 tl0_gen T_TRAP_INSTRUCTION_31 ! 0x11f
1936 tl0_reserved 32 ! 0x120-0x13f
1937 tl0_gen T_SYSCALL ! 0x140
1938 tl0_syscall ! 0x141
1939 tl0_gen T_SYSCALL ! 0x142
1940 tl0_gen T_SYSCALL ! 0x143
1941 tl0_reserved 188 ! 0x144-0x1ff
1942
1943 tl1_base:
1944 tl1_reserved 8 ! 0x200-0x207
1945 tl1_insn_excptn:
1946 tl1_insn_excptn ! 0x208
1947 tl1_reserved 1 ! 0x209
1948 tl1_insn_error:
1949 tl1_gen T_INSTRUCTION_ERROR ! 0x20a
1950 tl1_reserved 5 ! 0x20b-0x20f
1951 tl1_insn_illegal:
1952 tl1_gen T_ILLEGAL_INSTRUCTION ! 0x210
1953 tl1_priv_opcode:
1954 tl1_gen T_PRIVILEGED_OPCODE ! 0x211
1955 tl1_reserved 14 ! 0x212-0x21f
1956 tl1_fp_disabled:
1957 tl1_fp_disabled ! 0x220
1958 tl1_fp_ieee:
1959 tl1_gen T_FP_EXCEPTION_IEEE_754 ! 0x221
1960 tl1_fp_other:
1961 tl1_gen T_FP_EXCEPTION_OTHER ! 0x222
1962 tl1_tag_ovflw:
1963 tl1_gen T_TAG_OVERFLOW ! 0x223
1964 tl1_clean_window:
1965 clean_window ! 0x224
1966 tl1_divide:
1967 tl1_gen T_DIVISION_BY_ZERO ! 0x228
1968 tl1_reserved 7 ! 0x229-0x22f
1969 tl1_data_excptn:
1970 tl1_data_excptn ! 0x230
1971 tl1_reserved 1 ! 0x231
1972 tl1_data_error:
1973 tl1_gen T_DATA_ERROR ! 0x232
1974 tl1_reserved 1 ! 0x233
1975 tl1_align:
1976 tl1_align ! 0x234
1977 tl1_align_lddf:
1978 tl1_gen T_RESERVED ! 0x235
1979 tl1_align_stdf:
1980 tl1_gen T_RESERVED ! 0x236
1981 tl1_priv_action:
1982 tl1_gen T_PRIVILEGED_ACTION ! 0x237
1983 tl1_reserved 9 ! 0x238-0x240
1984 tl1_intr_level:
1985 tl1_intr_level ! 0x241-0x24f
1986 tl1_reserved 16 ! 0x250-0x25f
1987 tl1_intr_vector:
1988 intr_vector ! 0x260
1989 tl1_watch_phys:
1990 tl1_gen T_PA_WATCHPOINT ! 0x261
1991 tl1_watch_virt:
1992 tl1_gen T_VA_WATCHPOINT ! 0x262
1993 tl1_ecc:
1994 tl1_gen T_CORRECTED_ECC_ERROR ! 0x263
1995 tl1_immu_miss:
1996 tl1_immu_miss ! 0x264
1997 tl1_dmmu_miss:
1998 tl1_dmmu_miss ! 0x268
1999 tl1_dmmu_prot:
2000 tl1_dmmu_prot ! 0x26c
2001 tl1_reserved 16 ! 0x270-0x27f
2002 tl1_spill_0_n:
2003 tl1_spill_0_n ! 0x280
2004 tl1_spill_bad 1 ! 0x284
2005 tl1_spill_2_n:
2006 tl1_spill_2_n ! 0x288
2007 tl1_spill_3_n:
2008 tl1_spill_3_n ! 0x29c
2009 tl1_spill_bad 4 ! 0x290-0x29f
2010 tl1_spill_0_o:
2011 tl1_spill_0_o ! 0x2a0
2012 tl1_spill_1_o:
2013 tl1_spill_1_o ! 0x2a4
2014 tl1_spill_2_o:
2015 tl1_spill_2_o ! 0x2a8
2016 tl1_spill_bad 5 ! 0x2ac-0x2bf
2017 tl1_fill_0_n:
2018 tl1_fill_0_n ! 0x2c0
2019 tl1_fill_bad 1 ! 0x2c4
2020 tl1_fill_2_n:
2021 tl1_fill_2_n ! 0x2d0
2022 tl1_fill_3_n:
2023 tl1_fill_3_n ! 0x2d4
2024 tl1_fill_bad 12 ! 0x2d8-0x2ff
2025 tl1_reserved 1 ! 0x300
2026 tl1_breakpoint:
2027 tl1_gen T_BREAKPOINT ! 0x301
2028 tl1_gen T_RSTRWP_PHYS ! 0x302
2029 tl1_gen T_RSTRWP_VIRT ! 0x303
2030 tl1_reserved 252 ! 0x304-0x3ff
2031
2032 .globl tl_trap_end
2033 tl_trap_end:
2034 nop
2035
2036 /*
2037 * User trap entry point.
2038 *
2039 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2040 * u_long sfsr)
2041 *
2042 * This handles redirecting a trap back to usermode as a user trap. The user
2043 * program must have first registered a trap handler with the kernel using
2044 * sysarch(SPARC_UTRAP_INSTALL). The trap handler is passed enough state
2045 * for it to return to the trapping code directly, it will not return through
2046 * the kernel. The trap type is passed in %o0, all out registers must be
2047 * passed through to tl0_trap or to usermode untouched. Note that the
2048 * parameters passed in out registers may be used by the user trap handler.
2049 * Do not change the registers they are passed in or you will break the ABI.
2050 *
2051 * If the trap type allows user traps, setup state to execute the user trap
2052 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2053 */
2054 ENTRY(tl0_utrap)
2055 /*
2056 * Check if the trap type allows user traps.
2057 */
2058 cmp %o0, UT_MAX
2059 bge,a,pt %xcc, tl0_trap
2060 nop
2061
2062 /*
2063 * Load the user trap handler from the utrap table.
2064 */
2065 ldx [PCPU(CURTHREAD)], %l0
2066 ldx [%l0 + TD_PROC], %l0
2067 ldx [%l0 + P_MD + MD_UTRAP], %l0
2068 brz,pt %l0, tl0_trap
2069 sllx %o0, PTR_SHIFT, %l1
2070 ldx [%l0 + %l1], %l0
2071 brz,a,pt %l0, tl0_trap
2072 nop
2073
2074 /*
2075 * If the save we did on entry to the kernel had to spill a window
2076 * to the pcb, pretend we took a spill trap instead. Any windows
2077 * that are in the pcb must be copied out or the fill handler will
2078 * not be able to find them, since the user trap handler returns
2079 * directly to the trapping code. Note that we only support precise
2080 * user traps, which implies that the condition that caused the trap
2081 * in the first place is still valid, so it will occur again when we
2082 * re-execute the trapping instruction.
2083 */
2084 ldx [PCB_REG + PCB_NSAVED], %l1
2085 brnz,a,pn %l1, tl0_trap
2086 mov T_SPILL, %o0
2087
2088 /*
2089 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2090 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2091 * it may be clobbered by an interrupt before the user trap code
2092 * can read it, and we must pass %tstate in order to restore %ccr
2093 * and %asi. The %fsr must be stored to memory, so we use the
2094 * temporary stack for that.
2095 */
2096 rd %fprs, %l1
2097 or %l1, FPRS_FEF, %l2
2098 wr %l2, 0, %fprs
2099 dec 8, ASP_REG
2100 stx %fsr, [ASP_REG]
2101 ldx [ASP_REG], %l4
2102 inc 8, ASP_REG
2103 wr %l1, 0, %fprs
2104
2105 rdpr %tstate, %l5
2106 rdpr %tpc, %l6
2107 rdpr %tnpc, %l7
2108
2109 /*
2110 * Setup %tnpc to return to.
2111 */
2112 wrpr %l0, 0, %tnpc
2113
2114 /*
2115 * Setup %wstate for return, clear WSTATE_TRANSITION.
2116 */
2117 rdpr %wstate, %l1
2118 and %l1, WSTATE_NORMAL_MASK, %l1
2119 wrpr %l1, 0, %wstate
2120
2121 /*
2122 * Setup %tstate for return, change the saved cwp to point to the
2123 * current window instead of the window at the time of the trap.
2124 */
2125 andn %l5, TSTATE_CWP_MASK, %l1
2126 rdpr %cwp, %l2
2127 wrpr %l1, %l2, %tstate
2128
2129 /*
2130 * Setup %sp. Userland processes will crash if this is not setup.
2131 */
2132 sub %fp, CCFSZ, %sp
2133
2134 /*
2135 * Execute the user trap handler.
2136 */
2137 done
2138 END(tl0_utrap)
2139
2140 /*
2141 * (Real) User trap entry point.
2142 *
2143 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2144 * u_int sfsr)
2145 *
2146 * The following setup has been performed:
2147 * - the windows have been split and the active user window has been saved
2148 * (maybe just to the pcb)
2149 * - we are on alternate globals and interrupts are disabled
2150 *
2151 * We switch to the kernel stack, build a trapframe, switch to normal
2152 * globals, enable interrupts and call trap.
2153 *
2154 * NOTE: We must be very careful setting up the per-cpu pointer. We know that
2155 * it has been pre-set in alternate globals, so we read it from there and setup
2156 * the normal %g7 *before* enabling interrupts. This avoids any possibility
2157 * of cpu migration and using the wrong pcpup.
2158 */
2159 ENTRY(tl0_trap)
2160 /*
2161 * Force kernel store order.
2162 */
2163 wrpr %g0, PSTATE_ALT, %pstate
2164
2165 rdpr %tstate, %l0
2166 rdpr %tpc, %l1
2167 rdpr %tnpc, %l2
2168 rd %y, %l3
2169 rd %fprs, %l4
2170 rdpr %wstate, %l5
2171
2172 #if KTR_COMPILE & KTR_TRAP
2173 CATR(KTR_TRAP,
2174 "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2175 , %g1, %g2, %g3, 7, 8, 9)
2176 ldx [PCPU(CURTHREAD)], %g2
2177 stx %g2, [%g1 + KTR_PARM1]
2178 stx %o0, [%g1 + KTR_PARM2]
2179 rdpr %pil, %g2
2180 stx %g2, [%g1 + KTR_PARM3]
2181 stx %l1, [%g1 + KTR_PARM4]
2182 stx %l2, [%g1 + KTR_PARM5]
2183 stx %i6, [%g1 + KTR_PARM6]
2184 9:
2185 #endif
2186
2187 1: and %l5, WSTATE_NORMAL_MASK, %l5
2188 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2189 wrpr %l5, WSTATE_KERNEL, %wstate
2190 rdpr %canrestore, %l6
2191 wrpr %l6, 0, %otherwin
2192 wrpr %g0, 0, %canrestore
2193
2194 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2195
2196 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2197 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2198 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2199 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2200 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2201
2202 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2203 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2204 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2205 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2206 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2207 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2208
2209 wr %g0, FPRS_FEF, %fprs
2210 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2211 rd %gsr, %l6
2212 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2213 wr %g0, 0, %fprs
2214
2215 mov PCB_REG, %l0
2216 mov PCPU_REG, %l1
2217 wrpr %g0, PSTATE_NORMAL, %pstate
2218
2219 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2220 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2221
2222 mov %l0, PCB_REG
2223 mov %l1, PCPU_REG
2224 wrpr %g0, PSTATE_KERNEL, %pstate
2225
2226 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2227 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2228 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2229 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2230 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2231 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2232 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2233 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2234
2235 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2236 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2237 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2238 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2239 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2240
2241 set tl0_ret - 8, %o7
2242 jmpl %o2, %g0
2243 add %sp, CCFSZ + SPOFF, %o0
2244 END(tl0_trap)
2245
2246 /*
2247 * void tl0_intr(u_int level, u_int mask)
2248 */
2249 ENTRY(tl0_intr)
2250 /*
2251 * Force kernel store order.
2252 */
2253 wrpr %g0, PSTATE_ALT, %pstate
2254
2255 rdpr %tstate, %l0
2256 rdpr %tpc, %l1
2257 rdpr %tnpc, %l2
2258 rd %y, %l3
2259 rd %fprs, %l4
2260 rdpr %wstate, %l5
2261
2262 #if KTR_COMPILE & KTR_INTR
2263 CATR(KTR_INTR,
2264 "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2265 , %g1, %g2, %g3, 7, 8, 9)
2266 ldx [PCPU(CURTHREAD)], %g2
2267 stx %g2, [%g1 + KTR_PARM1]
2268 stx %o0, [%g1 + KTR_PARM2]
2269 rdpr %pil, %g2
2270 stx %g2, [%g1 + KTR_PARM3]
2271 stx %l1, [%g1 + KTR_PARM4]
2272 stx %l2, [%g1 + KTR_PARM5]
2273 stx %i6, [%g1 + KTR_PARM6]
2274 9:
2275 #endif
2276
2277 wrpr %o0, 0, %pil
2278 wr %o1, 0, %clear_softint
2279
2280 and %l5, WSTATE_NORMAL_MASK, %l5
2281 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2282 wrpr %l5, WSTATE_KERNEL, %wstate
2283 rdpr %canrestore, %l6
2284 wrpr %l6, 0, %otherwin
2285 wrpr %g0, 0, %canrestore
2286
2287 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2288
2289 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2290 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2291 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2292 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2293 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2294 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2295
2296 wr %g0, FPRS_FEF, %fprs
2297 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2298 rd %gsr, %l6
2299 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2300 wr %g0, 0, %fprs
2301
2302 mov %o0, %l3
2303 mov T_INTERRUPT, %o1
2304
2305 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2306 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2307
2308 mov PCB_REG, %l0
2309 mov PCPU_REG, %l1
2310 wrpr %g0, PSTATE_NORMAL, %pstate
2311
2312 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2313 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2314 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2315 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2316 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2317 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2318 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2319
2320 mov %l0, PCB_REG
2321 mov %l1, PCPU_REG
2322 wrpr %g0, PSTATE_KERNEL, %pstate
2323
2324 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2325 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2326 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2327 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2328 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2329 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2330 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2331 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2332
2333 SET(intr_handlers, %l1, %l0)
2334 sllx %l3, IH_SHIFT, %l1
2335 ldx [%l0 + %l1], %l1
2336 KASSERT(%l1, "tl0_intr: ih null")
2337 call %l1
2338 add %sp, CCFSZ + SPOFF, %o0
2339
2340 /* %l3 contains PIL */
2341 SET(intrcnt, %l1, %l2)
2342 prefetcha [%l2] ASI_N, 1
2343 SET(pil_countp, %l1, %l0)
2344 sllx %l3, 1, %l1
2345 lduh [%l0 + %l1], %l0
2346 sllx %l0, 3, %l0
2347 add %l0, %l2, %l0
2348 ldx [%l0], %l1
2349 inc %l1
2350 stx %l1, [%l0]
2351
2352 lduw [PCPU(CNT) + V_INTR], %l0
2353 inc %l0
2354 stw %l0, [PCPU(CNT) + V_INTR]
2355
2356 ba,a %xcc, tl0_ret
2357 nop
2358 END(tl0_intr)
2359
2360 /*
2361 * Initiate return to usermode.
2362 *
2363 * Called with a trapframe on the stack. The window that was setup in
2364 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2365 * leaf functions, so all ins and locals may have been clobbered since
2366 * then.
2367 *
2368 * This code is rather long and complicated.
2369 */
2370 ENTRY(tl0_ret)
2371 /*
2372 * Check for pending asts atomically with returning. We must raise
2373 * the pil before checking, and if no asts are found the pil must
2374 * remain raised until the retry is executed, or we risk missing asts
2375 * caused by interrupts occuring after the test. If the pil is lowered,
2376 * as it is when we call ast, the check must be re-executed.
2377 */
2378 wrpr %g0, PIL_TICK, %pil
2379 ldx [PCPU(CURTHREAD)], %l0
2380 lduw [%l0 + TD_FLAGS], %l1
2381 set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2382 and %l1, %l2, %l1
2383 brz,a,pt %l1, 1f
2384 nop
2385
2386 /*
2387 * We have an ast. Re-enable interrupts and handle it, then restart
2388 * the return sequence.
2389 */
2390 wrpr %g0, 0, %pil
2391 call ast
2392 add %sp, CCFSZ + SPOFF, %o0
2393 ba,a %xcc, tl0_ret
2394 nop
2395
2396 /*
2397 * Check for windows that were spilled to the pcb and need to be
2398 * copied out. This must be the last thing that is done before the
2399 * return to usermode. If there are still user windows in the cpu
2400 * and we call a nested function after this, which causes them to be
2401 * spilled to the pcb, they will not be copied out and the stack will
2402 * be inconsistent.
2403 */
2404 1: ldx [PCB_REG + PCB_NSAVED], %l1
2405 brz,a,pt %l1, 2f
2406 nop
2407 wrpr %g0, 0, %pil
2408 mov T_SPILL, %o0
2409 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2410 call trap
2411 add %sp, SPOFF + CCFSZ, %o0
2412 ba,a %xcc, tl0_ret
2413 nop
2414
2415 /*
2416 * Restore the out and most global registers from the trapframe.
2417 * The ins will become the outs when we restore below.
2418 */
2419 2: ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2420 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2421 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2422 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2423 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2424 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2425 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2426 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2427
2428 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2429 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2430 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2431 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2432 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2433
2434 /*
2435 * Load everything we need to restore below before disabling
2436 * interrupts.
2437 */
2438 ldx [%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2439 ldx [%sp + SPOFF + CCFSZ + TF_GSR], %l1
2440 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2441 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l3
2442 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2443 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l5
2444 ldx [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2445
2446 /*
2447 * Disable interrupts to restore the special globals. They are not
2448 * saved and restored for all kernel traps, so an interrupt at the
2449 * wrong time would clobber them.
2450 */
2451 wrpr %g0, PSTATE_NORMAL, %pstate
2452
2453 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2454 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2455
2456 /*
2457 * Switch to alternate globals. This frees up some registers we
2458 * can use after the restore changes our window.
2459 */
2460 wrpr %g0, PSTATE_ALT, %pstate
2461
2462 /*
2463 * Drop %pil to zero. It must have been zero at the time of the
2464 * trap, since we were in usermode, but it was raised above in
2465 * order to check for asts atomically. We have interrupts disabled
2466 * so any interrupts will not be serviced until we complete the
2467 * return to usermode.
2468 */
2469 wrpr %g0, 0, %pil
2470
2471 /*
2472 * Save %fprs in an alternate global so it can be restored after the
2473 * restore instruction below. If we restore it before the restore,
2474 * and the restore traps we may run for a while with floating point
2475 * enabled in the kernel, which we want to avoid.
2476 */
2477 mov %l0, %g1
2478
2479 /*
2480 * Restore %fsr and %gsr. These need floating point enabled in %fprs,
2481 * so we set it temporarily and then clear it.
2482 */
2483 wr %g0, FPRS_FEF, %fprs
2484 ldx [%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2485 wr %l1, 0, %gsr
2486 wr %g0, 0, %fprs
2487
2488 /*
2489 * Restore program counters. This could be done after the restore
2490 * but we're out of alternate globals to store them in...
2491 */
2492 wrpr %l2, 0, %tnpc
2493 wrpr %l3, 0, %tpc
2494
2495 /*
2496 * Save %tstate in an alternate global and clear the %cwp field. %cwp
2497 * will be affected by the restore below and we need to make sure it
2498 * points to the current window at that time, not the window that was
2499 * active at the time of the trap.
2500 */
2501 andn %l4, TSTATE_CWP_MASK, %g2
2502
2503 /*
2504 * Restore %y. Could also be below if we had more alternate globals.
2505 */
2506 wr %l5, 0, %y
2507
2508 /*
2509 * Setup %wstate for return. We need to restore the user window state
2510 * which we saved in wstate.other when we trapped. We also need to
2511 * set the transition bit so the restore will be handled specially
2512 * if it traps, use the xor feature of wrpr to do that.
2513 */
2514 srlx %l6, WSTATE_OTHER_SHIFT, %g3
2515 wrpr %g3, WSTATE_TRANSITION, %wstate
2516
2517 /*
2518 * Setup window management registers for return. If not all user
2519 * windows were spilled in the kernel %otherwin will be non-zero,
2520 * so we need to transfer it to %canrestore to correctly restore
2521 * those windows. Otherwise everything gets set to zero and the
2522 * restore below will fill a window directly from the user stack.
2523 */
2524 rdpr %otherwin, %o0
2525 wrpr %o0, 0, %canrestore
2526 wrpr %g0, 0, %otherwin
2527 wrpr %o0, 0, %cleanwin
2528
2529 /*
2530 * Now do the restore. If this instruction causes a fill trap which
2531 * fails to fill a window from the user stack, we will resume at
2532 * tl0_ret_fill_end and call back into the kernel.
2533 */
2534 restore
2535 tl0_ret_fill:
2536
2537 /*
2538 * We made it. We're back in the window that was active at the time
2539 * of the trap, and ready to return to usermode.
2540 */
2541
2542 /*
2543 * Restore %frps. This was saved in an alternate global above.
2544 */
2545 wr %g1, 0, %fprs
2546
2547 /*
2548 * Fixup %tstate so the saved %cwp points to the current window and
2549 * restore it.
2550 */
2551 rdpr %cwp, %g4
2552 wrpr %g2, %g4, %tstate
2553
2554 /*
2555 * Restore the user window state. The transition bit was set above
2556 * for special handling of the restore, this clears it.
2557 */
2558 wrpr %g3, 0, %wstate
2559
2560 #if KTR_COMPILE & KTR_TRAP
2561 CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2562 , %g2, %g3, %g4, 7, 8, 9)
2563 ldx [PCPU(CURTHREAD)], %g3
2564 stx %g3, [%g2 + KTR_PARM1]
2565 rdpr %pil, %g3
2566 stx %g3, [%g2 + KTR_PARM2]
2567 rdpr %tpc, %g3
2568 stx %g3, [%g2 + KTR_PARM3]
2569 rdpr %tnpc, %g3
2570 stx %g3, [%g2 + KTR_PARM4]
2571 stx %sp, [%g2 + KTR_PARM5]
2572 9:
2573 #endif
2574
2575 /*
2576 * Return to usermode.
2577 */
2578 retry
2579 tl0_ret_fill_end:
2580
2581 #if KTR_COMPILE & KTR_TRAP
2582 CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2583 , %l0, %l1, %l2, 7, 8, 9)
2584 rdpr %pstate, %l1
2585 stx %l1, [%l0 + KTR_PARM1]
2586 stx %l5, [%l0 + KTR_PARM2]
2587 stx %sp, [%l0 + KTR_PARM3]
2588 9:
2589 #endif
2590
2591 /*
2592 * The restore above caused a fill trap and the fill handler was
2593 * unable to fill a window from the user stack. The special fill
2594 * handler recognized this and punted, sending us here. We need
2595 * to carefully undo any state that was restored before the restore
2596 * was executed and call trap again. Trap will copyin a window
2597 * from the user stack which will fault in the page we need so the
2598 * restore above will succeed when we try again. If this fails
2599 * the process has trashed its stack, so we kill it.
2600 */
2601
2602 /*
2603 * Restore the kernel window state. This was saved in %l6 above, and
2604 * since the restore failed we're back in the same window.
2605 */
2606 wrpr %l6, 0, %wstate
2607
2608 /*
2609 * Restore the normal globals which have predefined values in the
2610 * kernel. We clobbered them above restoring the user's globals
2611 * so this is very important.
2612 * XXX PSTATE_ALT must already be set.
2613 */
2614 wrpr %g0, PSTATE_ALT, %pstate
2615 mov PCB_REG, %o0
2616 mov PCPU_REG, %o1
2617 wrpr %g0, PSTATE_NORMAL, %pstate
2618 mov %o0, PCB_REG
2619 mov %o1, PCPU_REG
2620 wrpr %g0, PSTATE_KERNEL, %pstate
2621
2622 /*
2623 * Simulate a fill trap and then start the whole return sequence over
2624 * again. This is special because it only copies in 1 window, not 2
2625 * as we would for a normal failed fill. This may be the first time
2626 * the process has been run, so there may not be 2 windows worth of
2627 * stack to copyin.
2628 */
2629 mov T_FILL_RET, %o0
2630 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2631 call trap
2632 add %sp, SPOFF + CCFSZ, %o0
2633 ba,a %xcc, tl0_ret
2634 nop
2635 END(tl0_ret)
2636
2637 /*
2638 * Kernel trap entry point
2639 *
2640 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2641 * u_int sfsr)
2642 *
2643 * This is easy because the stack is already setup and the windows don't need
2644 * to be split. We build a trapframe and call trap(), the same as above, but
2645 * the outs don't need to be saved.
2646 */
2647 ENTRY(tl1_trap)
2648 rdpr %tstate, %l0
2649 rdpr %tpc, %l1
2650 rdpr %tnpc, %l2
2651 rdpr %pil, %l3
2652 rd %y, %l4
2653 rdpr %wstate, %l5
2654
2655 #if KTR_COMPILE & KTR_TRAP
2656 CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2657 , %g1, %g2, %g3, 7, 8, 9)
2658 ldx [PCPU(CURTHREAD)], %g2
2659 stx %g2, [%g1 + KTR_PARM1]
2660 stx %o0, [%g1 + KTR_PARM2]
2661 stx %l3, [%g1 + KTR_PARM3]
2662 stx %l1, [%g1 + KTR_PARM4]
2663 stx %i6, [%g1 + KTR_PARM5]
2664 9:
2665 #endif
2666
2667 wrpr %g0, 1, %tl
2668
2669 and %l5, WSTATE_OTHER_MASK, %l5
2670 wrpr %l5, WSTATE_KERNEL, %wstate
2671
2672 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2673 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2674 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2675 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2676 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2677
2678 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2679 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2680 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2681 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2682 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2683
2684 mov PCB_REG, %l0
2685 mov PCPU_REG, %l1
2686 wrpr %g0, PSTATE_NORMAL, %pstate
2687
2688 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2689 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2690
2691 mov %l0, PCB_REG
2692 mov %l1, PCPU_REG
2693 wrpr %g0, PSTATE_KERNEL, %pstate
2694
2695 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2696 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2697 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2698 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2699 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2700 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2701 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2702 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2703
2704 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2705 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2706 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2707 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2708 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2709
2710 set tl1_ret - 8, %o7
2711 jmpl %o2, %g0
2712 add %sp, CCFSZ + SPOFF, %o0
2713 END(tl1_trap)
2714
2715 ENTRY(tl1_ret)
2716 ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2717 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2718 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2719 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2720 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2721 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2722 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2723 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2724
2725 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2726 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2727 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2728 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2729 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2730
2731 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2732 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
2733 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2734 ldx [%sp + SPOFF + CCFSZ + TF_PIL], %l3
2735 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2736
2737 set VM_MIN_PROM_ADDRESS, %l5
2738 cmp %l1, %l5
2739 bl,a,pt %xcc, 1f
2740 nop
2741
2742 wrpr %g0, PSTATE_NORMAL, %pstate
2743
2744 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2745 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2746
2747 1: wrpr %g0, PSTATE_ALT, %pstate
2748
2749 andn %l0, TSTATE_CWP_MASK, %g1
2750 mov %l1, %g2
2751 mov %l2, %g3
2752
2753 wrpr %l3, 0, %pil
2754 wr %l4, 0, %y
2755
2756 restore
2757
2758 wrpr %g0, 2, %tl
2759
2760 rdpr %cwp, %g4
2761 wrpr %g1, %g4, %tstate
2762 wrpr %g2, 0, %tpc
2763 wrpr %g3, 0, %tnpc
2764
2765 #if KTR_COMPILE & KTR_TRAP
2766 CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2767 , %g2, %g3, %g4, 7, 8, 9)
2768 ldx [PCPU(CURTHREAD)], %g3
2769 stx %g3, [%g2 + KTR_PARM1]
2770 rdpr %pil, %g3
2771 stx %g3, [%g2 + KTR_PARM2]
2772 rdpr %tstate, %g3
2773 stx %g3, [%g2 + KTR_PARM3]
2774 rdpr %tpc, %g3
2775 stx %g3, [%g2 + KTR_PARM4]
2776 stx %sp, [%g2 + KTR_PARM5]
2777 9:
2778 #endif
2779
2780 retry
2781 END(tl1_ret)
2782
2783 /*
2784 * void tl1_intr(u_int level, u_int mask)
2785 */
2786 ENTRY(tl1_intr)
2787 rdpr %tstate, %l0
2788 rdpr %tpc, %l1
2789 rdpr %tnpc, %l2
2790 rdpr %pil, %l3
2791 rd %y, %l4
2792 rdpr %wstate, %l5
2793
2794 #if KTR_COMPILE & KTR_INTR
2795 CATR(KTR_INTR,
2796 "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2797 , %g1, %g2, %g3, 7, 8, 9)
2798 ldx [PCPU(CURTHREAD)], %g2
2799 stx %g2, [%g1 + KTR_PARM1]
2800 stx %o0, [%g1 + KTR_PARM2]
2801 stx %l3, [%g1 + KTR_PARM3]
2802 stx %l1, [%g1 + KTR_PARM4]
2803 stx %i6, [%g1 + KTR_PARM5]
2804 9:
2805 #endif
2806
2807 wrpr %o0, 0, %pil
2808 wr %o1, 0, %clear_softint
2809
2810 wrpr %g0, 1, %tl
2811
2812 and %l5, WSTATE_OTHER_MASK, %l5
2813 wrpr %l5, WSTATE_KERNEL, %wstate
2814
2815 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2816 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2817 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2818 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2819 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2820
2821 mov %o0, %l7
2822 mov T_INTERRUPT | T_KERNEL, %o1
2823
2824 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2825 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2826
2827 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2828 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2829
2830 mov PCB_REG, %l4
2831 mov PCPU_REG, %l5
2832 wrpr %g0, PSTATE_NORMAL, %pstate
2833
2834 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2835 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2836 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2837 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2838 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2839
2840 mov %l4, PCB_REG
2841 mov %l5, PCPU_REG
2842 wrpr %g0, PSTATE_KERNEL, %pstate
2843
2844 SET(intr_handlers, %l5, %l4)
2845 sllx %l7, IH_SHIFT, %l5
2846 ldx [%l4 + %l5], %l5
2847 KASSERT(%l5, "tl1_intr: ih null")
2848 call %l5
2849 add %sp, CCFSZ + SPOFF, %o0
2850
2851 /* %l7 contains PIL */
2852 SET(intrcnt, %l5, %l4)
2853 prefetcha [%l4] ASI_N, 1
2854 SET(pil_countp, %l5, %l6)
2855 sllx %l7, 1, %l5
2856 lduh [%l5 + %l6], %l5
2857 sllx %l5, 3, %l5
2858 add %l5, %l4, %l4
2859 ldx [%l4], %l5
2860 inc %l5
2861 stx %l5, [%l4]
2862
2863 lduw [PCPU(CNT) + V_INTR], %l4
2864 inc %l4
2865 stw %l4, [PCPU(CNT) + V_INTR]
2866
2867 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2868
2869 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2870 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2871 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2872 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2873 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2874
2875 wrpr %g0, PSTATE_ALT, %pstate
2876
2877 andn %l0, TSTATE_CWP_MASK, %g1
2878 mov %l1, %g2
2879 mov %l2, %g3
2880 wrpr %l3, 0, %pil
2881 wr %l4, 0, %y
2882
2883 restore
2884
2885 wrpr %g0, 2, %tl
2886
2887 rdpr %cwp, %g4
2888 wrpr %g1, %g4, %tstate
2889 wrpr %g2, 0, %tpc
2890 wrpr %g3, 0, %tnpc
2891
2892 #if KTR_COMPILE & KTR_INTR
2893 CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2894 , %g2, %g3, %g4, 7, 8, 9)
2895 ldx [PCPU(CURTHREAD)], %g3
2896 stx %g3, [%g2 + KTR_PARM1]
2897 rdpr %pil, %g3
2898 stx %g3, [%g2 + KTR_PARM2]
2899 rdpr %tstate, %g3
2900 stx %g3, [%g2 + KTR_PARM3]
2901 rdpr %tpc, %g3
2902 stx %g3, [%g2 + KTR_PARM4]
2903 stx %sp, [%g2 + KTR_PARM5]
2904 9:
2905 #endif
2906
2907 retry
2908 END(tl1_intr)
2909
2910 .globl tl_text_end
2911 tl_text_end:
2912 nop
2913
2914 /*
2915 * Freshly forked processes come here when switched to for the first time.
2916 * The arguments to fork_exit() have been setup in the locals, we must move
2917 * them to the outs.
2918 */
2919 ENTRY(fork_trampoline)
2920 #if KTR_COMPILE & KTR_PROC
2921 CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
2922 , %g1, %g2, %g3, 7, 8, 9)
2923 ldx [PCPU(CURTHREAD)], %g2
2924 stx %g2, [%g1 + KTR_PARM1]
2925 ldx [%g2 + TD_PROC], %g2
2926 add %g2, P_COMM, %g2
2927 stx %g2, [%g1 + KTR_PARM2]
2928 rdpr %cwp, %g2
2929 stx %g2, [%g1 + KTR_PARM3]
2930 9:
2931 #endif
2932 mov %l0, %o0
2933 mov %l1, %o1
2934 call fork_exit
2935 mov %l2, %o2
2936 ba,a %xcc, tl0_ret
2937 nop
2938 END(fork_trampoline)
Cache object: 9853be214e6482dbf101da242e4359b2
|