The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sparc64/sparc64/exception.S

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
    3  *
    4  * Redistribution and use in source and binary forms, with or without
    5  * modification, are permitted provided that the following conditions
    6  * are met:
    7  * 1. Redistributions of source code must retain the above copyright
    8  *    notice, this list of conditions and the following disclaimer.
    9  * 2. Redistributions in binary form must reproduce the above copyright
   10  *    notice, this list of conditions and the following disclaimer in the
   11  *    documentation and/or other materials provided with the distribution.
   12  * 3. Berkeley Software Design Inc's name may not be used to endorse or
   13  *    promote products derived from this software without specific prior
   14  *    written permission.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  *
   28  *      BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
   29  */
   30 /*-
   31  * Copyright (c) 2001 Jake Burkholder.
   32  * All rights reserved.
   33  *
   34  * Redistribution and use in source and binary forms, with or without
   35  * modification, are permitted provided that the following conditions
   36  * are met:
   37  * 1. Redistributions of source code must retain the above copyright
   38  *    notice, this list of conditions and the following disclaimer.
   39  * 2. Redistributions in binary form must reproduce the above copyright
   40  *    notice, this list of conditions and the following disclaimer in the
   41  *    documentation and/or other materials provided with the distribution.
   42  *
   43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   53  * SUCH DAMAGE.
   54  */
   55 
   56 #include <machine/asm.h>
   57 __FBSDID("$FreeBSD: releng/5.2/sys/sparc64/sparc64/exception.S 117658 2003-07-16 00:08:43Z jmg $");
   58 
   59 #include "opt_compat.h"
   60 #include "opt_ddb.h"
   61 
   62 #include <machine/asi.h>
   63 #include <machine/asmacros.h>
   64 #include <machine/ktr.h>
   65 #include <machine/pstate.h>
   66 #include <machine/trap.h>
   67 #include <machine/tstate.h>
   68 #include <machine/wstate.h>
   69 
   70 #include "assym.s"
   71 
   72 #define TSB_KERNEL_MASK 0x0
   73 #define TSB_KERNEL      0x0
   74 
   75         .register %g2,#ignore
   76         .register %g3,#ignore
   77         .register %g6,#ignore
   78         .register %g7,#ignore
   79 
   80 /*
   81  * Atomically set the reference bit in a tte.
   82  */
   83 #define TTE_SET_BIT(r1, r2, r3, bit) \
   84         add     r1, TTE_DATA, r1 ; \
   85         ldx     [r1], r2 ; \
   86 9:      or      r2, bit, r3 ; \
   87         casxa   [r1] ASI_N, r2, r3 ; \
   88         cmp     r2, r3 ; \
   89         bne,pn  %xcc, 9b ; \
   90          mov    r3, r2
   91 
   92 #define TTE_SET_REF(r1, r2, r3)         TTE_SET_BIT(r1, r2, r3, TD_REF)
   93 #define TTE_SET_W(r1, r2, r3)           TTE_SET_BIT(r1, r2, r3, TD_W)
   94 
   95 /*
   96  * Macros for spilling and filling live windows.
   97  *
   98  * NOTE: These macros use exactly 16 instructions, and it is assumed that the
   99  * handler will not use more than 24 instructions total, to leave room for
  100  * resume vectors which occupy the last 8 instructions.
  101  */
  102 
  103 #define SPILL(storer, base, size, asi) \
  104         storer  %l0, [base + (0 * size)] asi ; \
  105         storer  %l1, [base + (1 * size)] asi ; \
  106         storer  %l2, [base + (2 * size)] asi ; \
  107         storer  %l3, [base + (3 * size)] asi ; \
  108         storer  %l4, [base + (4 * size)] asi ; \
  109         storer  %l5, [base + (5 * size)] asi ; \
  110         storer  %l6, [base + (6 * size)] asi ; \
  111         storer  %l7, [base + (7 * size)] asi ; \
  112         storer  %i0, [base + (8 * size)] asi ; \
  113         storer  %i1, [base + (9 * size)] asi ; \
  114         storer  %i2, [base + (10 * size)] asi ; \
  115         storer  %i3, [base + (11 * size)] asi ; \
  116         storer  %i4, [base + (12 * size)] asi ; \
  117         storer  %i5, [base + (13 * size)] asi ; \
  118         storer  %i6, [base + (14 * size)] asi ; \
  119         storer  %i7, [base + (15 * size)] asi
  120 
  121 #define FILL(loader, base, size, asi) \
  122         loader  [base + (0 * size)] asi, %l0 ; \
  123         loader  [base + (1 * size)] asi, %l1 ; \
  124         loader  [base + (2 * size)] asi, %l2 ; \
  125         loader  [base + (3 * size)] asi, %l3 ; \
  126         loader  [base + (4 * size)] asi, %l4 ; \
  127         loader  [base + (5 * size)] asi, %l5 ; \
  128         loader  [base + (6 * size)] asi, %l6 ; \
  129         loader  [base + (7 * size)] asi, %l7 ; \
  130         loader  [base + (8 * size)] asi, %i0 ; \
  131         loader  [base + (9 * size)] asi, %i1 ; \
  132         loader  [base + (10 * size)] asi, %i2 ; \
  133         loader  [base + (11 * size)] asi, %i3 ; \
  134         loader  [base + (12 * size)] asi, %i4 ; \
  135         loader  [base + (13 * size)] asi, %i5 ; \
  136         loader  [base + (14 * size)] asi, %i6 ; \
  137         loader  [base + (15 * size)] asi, %i7
  138 
  139 #define ERRATUM50(reg)  mov reg, reg
  140 
  141 #define KSTACK_SLOP     1024
  142 
  143 /*
  144  * Sanity check the kernel stack and bail out if its wrong.
  145  * XXX: doesn't handle being on the panic stack.
  146  */
  147 #define KSTACK_CHECK \
  148         dec     16, ASP_REG ; \
  149         stx     %g1, [ASP_REG + 0] ; \
  150         stx     %g2, [ASP_REG + 8] ; \
  151         add     %sp, SPOFF, %g1 ; \
  152         andcc   %g1, (1 << PTR_SHIFT) - 1, %g0 ; \
  153         bnz,a   %xcc, tl1_kstack_fault ; \
  154          inc    16, ASP_REG ; \
  155         ldx     [PCPU(CURTHREAD)], %g2 ; \
  156         ldx     [%g2 + TD_KSTACK], %g2 ; \
  157         add     %g2, KSTACK_SLOP, %g2 ; \
  158         subcc   %g1, %g2, %g1 ; \
  159         ble,a   %xcc, tl1_kstack_fault ; \
  160          inc    16, ASP_REG ; \
  161         set     KSTACK_PAGES * PAGE_SIZE, %g2 ; \
  162         cmp     %g1, %g2 ; \
  163         bgt,a   %xcc, tl1_kstack_fault ; \
  164          inc    16, ASP_REG ; \
  165         ldx     [ASP_REG + 8], %g2 ; \
  166         ldx     [ASP_REG + 0], %g1 ; \
  167         inc     16, ASP_REG
  168 
  169 ENTRY(tl1_kstack_fault)
  170         rdpr    %tl, %g1
  171 1:      cmp     %g1, 2
  172         be,a    2f
  173          nop
  174 
  175 #if KTR_COMPILE & KTR_TRAP
  176         CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
  177             , %g2, %g3, %g4, 7, 8, 9)
  178         rdpr    %tl, %g3
  179         stx     %g3, [%g2 + KTR_PARM1]
  180         rdpr    %tpc, %g3
  181         stx     %g3, [%g2 + KTR_PARM1]
  182         rdpr    %tnpc, %g3
  183         stx     %g3, [%g2 + KTR_PARM1]
  184 9:
  185 #endif
  186 
  187         sub     %g1, 1, %g1
  188         wrpr    %g1, 0, %tl
  189         ba,a    %xcc, 1b
  190          nop
  191 
  192 2:
  193 #if KTR_COMPILE & KTR_TRAP
  194         CATR(KTR_TRAP,
  195             "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
  196             , %g1, %g2, %g3, 7, 8, 9)
  197         add     %sp, SPOFF, %g2
  198         stx     %g2, [%g1 + KTR_PARM1]
  199         ldx     [PCPU(CURTHREAD)], %g2
  200         ldx     [%g2 + TD_KSTACK], %g2
  201         stx     %g2, [%g1 + KTR_PARM2]
  202         rdpr    %canrestore, %g2
  203         stx     %g2, [%g1 + KTR_PARM3]
  204         rdpr    %cansave, %g2
  205         stx     %g2, [%g1 + KTR_PARM4]
  206         rdpr    %otherwin, %g2
  207         stx     %g2, [%g1 + KTR_PARM5]
  208         rdpr    %wstate, %g2
  209         stx     %g2, [%g1 + KTR_PARM6]
  210 9:
  211 #endif
  212 
  213         wrpr    %g0, 0, %canrestore
  214         wrpr    %g0, 6, %cansave
  215         wrpr    %g0, 0, %otherwin
  216         wrpr    %g0, WSTATE_KERNEL, %wstate
  217 
  218         sub     ASP_REG, SPOFF + CCFSZ, %sp
  219         clr     %fp
  220 
  221         set     trap, %o2
  222         ba      %xcc, tl1_trap
  223          mov    T_KSTACK_FAULT | T_KERNEL, %o0
  224 END(tl1_kstack_fault)
  225 
  226 /*
  227  * Magic to resume from a spill or fill trap.  If we get an alignment or an
  228  * mmu fault during a spill or a fill, this macro will detect the fault and
  229  * resume at a set instruction offset in the trap handler.
  230  *
  231  * To check if the previous trap was a spill/fill we convert the trapped pc
  232  * to a trap type and verify that it is in the range of spill/fill vectors.
  233  * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
  234  * tl bit allows us to detect both ranges with one test.
  235  *
  236  * This is:
  237  *      0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
  238  *
  239  * To calculate the new pc we take advantage of the xor feature of wrpr.
  240  * Forcing all the low bits of the trapped pc on we can produce any offset
  241  * into the spill/fill vector.  The size of a spill/fill trap vector is 0x80.
  242  *
  243  *      0x7f ^ 0x1f == 0x60
  244  *      0x1f == (0x80 - 0x60) - 1
  245  *
  246  * Which are the offset and xor value used to resume from alignment faults.
  247  */
  248 
  249 /*
  250  * Determine if we have trapped inside of a spill/fill vector, and if so resume
  251  * at a fixed instruction offset in the trap vector.  Must be called on
  252  * alternate globals.
  253  */
  254 #define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
  255         dec     16, ASP_REG ; \
  256         stx     %g1, [ASP_REG + 0] ; \
  257         stx     %g2, [ASP_REG + 8] ; \
  258         rdpr    %tpc, %g1 ; \
  259         ERRATUM50(%g1) ; \
  260         rdpr    %tba, %g2 ; \
  261         sub     %g1, %g2, %g2 ; \
  262         srlx    %g2, 5, %g2 ; \
  263         andn    %g2, 0x200, %g2 ; \
  264         cmp     %g2, 0x80 ; \
  265         blu,pt  %xcc, 9f ; \
  266          cmp    %g2, 0x100 ; \
  267         bgeu,pt %xcc, 9f ; \
  268          or     %g1, 0x7f, %g1 ; \
  269         wrpr    %g1, xor, %tnpc ; \
  270         stxa_g0_sfsr ; \
  271         ldx     [ASP_REG + 8], %g2 ; \
  272         ldx     [ASP_REG + 0], %g1 ; \
  273         inc     16, ASP_REG ; \
  274         done ; \
  275 9:      ldx     [ASP_REG + 8], %g2 ; \
  276         ldx     [ASP_REG + 0], %g1 ; \
  277         inc     16, ASP_REG
  278 
  279 /*
  280  * For certain faults we need to clear the sfsr mmu register before returning.
  281  */
  282 #define RSF_CLR_SFSR \
  283         wr      %g0, ASI_DMMU, %asi ; \
  284         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
  285 
  286 #define RSF_XOR(off)    ((0x80 - off) - 1)
  287 
  288 /*
  289  * Instruction offsets in spill and fill trap handlers for handling certain
  290  * nested traps, and corresponding xor constants for wrpr.
  291  */
  292 #define RSF_OFF_ALIGN   0x60
  293 #define RSF_OFF_MMU     0x70
  294 
  295 #define RESUME_SPILLFILL_ALIGN \
  296         RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
  297 #define RESUME_SPILLFILL_MMU \
  298         RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
  299 #define RESUME_SPILLFILL_MMU_CLR_SFSR \
  300         RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
  301 
  302 /*
  303  * Constant to add to %tnpc when taking a fill trap just before returning to
  304  * user mode.
  305  */
  306 #define RSF_FILL_INC    tl0_ret_fill_end - tl0_ret_fill
  307 
  308 /*
  309  * Retry a spill or fill with a different wstate due to an alignment fault.
  310  * We may just be using the wrong stack offset.
  311  */
  312 #define RSF_ALIGN_RETRY(ws) \
  313         wrpr    %g0, (ws), %wstate ; \
  314         retry ; \
  315         .align  16
  316 
  317 /*
  318  * Generate a T_SPILL or T_FILL trap if the window operation fails.
  319  */
  320 #define RSF_TRAP(type) \
  321         ba      %xcc, tl0_sftrap ; \
  322          mov    type, %g2 ; \
  323         .align  16
  324 
  325 /*
  326  * Game over if the window operation fails.
  327  */
  328 #define RSF_FATAL(type) \
  329         ba      %xcc, rsf_fatal ; \
  330          mov    type, %g2 ; \
  331         .align  16
  332 
  333 /*
  334  * Magic to resume from a failed fill a few instructions after the corrsponding
  335  * restore.  This is used on return from the kernel to usermode.
  336  */
  337 #define RSF_FILL_MAGIC \
  338         rdpr    %tnpc, %g1 ; \
  339         add     %g1, RSF_FILL_INC, %g1 ; \
  340         wrpr    %g1, 0, %tnpc ; \
  341         done ; \
  342         .align  16
  343 
  344 /*
  345  * Spill to the pcb if a spill to the user stack in kernel mode fails.
  346  */
  347 #define RSF_SPILL_TOPCB \
  348         ba,a    %xcc, tl1_spill_topcb ; \
  349          nop ; \
  350         .align  16
  351 
  352 ENTRY(rsf_fatal)
  353 #if KTR_COMPILE & KTR_TRAP
  354         CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
  355             , %g1, %g3, %g4, 7, 8, 9)
  356         rdpr    %tt, %g3
  357         stx     %g3, [%g1 + KTR_PARM1]
  358         stx     %g2, [%g1 + KTR_PARM2]
  359 9:
  360 #endif
  361 
  362         KSTACK_CHECK
  363 
  364         sir
  365 END(rsf_fatal)
  366 
  367         .comm   intrnames, IV_NAMLEN
  368         .comm   eintrnames, 0
  369 
  370         .comm   intrcnt, IV_MAX * 8
  371         .comm   eintrcnt, 0
  372 
  373 /*
  374  * Trap table and associated macros
  375  *
  376  * Due to its size a trap table is an inherently hard thing to represent in
  377  * code in a clean way.  There are approximately 1024 vectors, of 8 or 32
  378  * instructions each, many of which are identical.  The way that this is
  379  * layed out is the instructions (8 or 32) for the actual trap vector appear
  380  * as an AS macro.  In general this code branches to tl0_trap or tl1_trap,
  381  * but if not supporting code can be placed just after the definition of the
  382  * macro.  The macros are then instantiated in a different section (.trap),
  383  * which is setup to be placed by the linker at the beginning of .text, and the
  384  * code around the macros is moved to the end of trap table.  In this way the
  385  * code that must be sequential in memory can be split up, and located near
  386  * its supporting code so that it is easier to follow.
  387  */
  388 
  389         /*
  390          * Clean window traps occur when %cleanwin is zero to ensure that data
  391          * is not leaked between address spaces in registers.
  392          */
  393         .macro  clean_window
  394         clr     %o0
  395         clr     %o1
  396         clr     %o2
  397         clr     %o3
  398         clr     %o4
  399         clr     %o5
  400         clr     %o6
  401         clr     %o7
  402         clr     %l0
  403         clr     %l1
  404         clr     %l2
  405         clr     %l3
  406         clr     %l4
  407         clr     %l5
  408         clr     %l6
  409         rdpr    %cleanwin, %l7
  410         inc     %l7
  411         wrpr    %l7, 0, %cleanwin
  412         clr     %l7
  413         retry
  414         .align  128
  415         .endm
  416 
  417         /*
  418          * Stack fixups for entry from user mode.  We are still running on the
  419          * user stack, and with its live registers, so we must save soon.  We
  420          * are on alternate globals so we do have some registers.  Set the
  421          * transitional window state, and do the save.  If this traps we
  422          * we attempt to spill a window to the user stack.  If this fails,
  423          * we spill the window to the pcb and continue.  Spilling to the pcb
  424          * must not fail.
  425          *
  426          * NOTE: Must be called with alternate globals and clobbers %g1.
  427          */
  428 
  429         .macro  tl0_split
  430         rdpr    %wstate, %g1
  431         wrpr    %g1, WSTATE_TRANSITION, %wstate
  432         save
  433         .endm
  434 
  435         .macro  tl0_setup       type
  436         tl0_split
  437         clr     %o1
  438         set     trap, %o2
  439         ba      %xcc, tl0_utrap
  440          mov    \type, %o0
  441         .endm
  442 
  443         /*
  444          * Generic trap type.  Call trap() with the specified type.
  445          */
  446         .macro  tl0_gen         type
  447         tl0_setup \type
  448         .align  32
  449         .endm
  450 
  451         /*
  452          * This is used to suck up the massive swaths of reserved trap types.
  453          * Generates count "reserved" trap vectors.
  454          */
  455         .macro  tl0_reserved    count
  456         .rept   \count
  457         tl0_gen T_RESERVED
  458         .endr
  459         .endm
  460 
  461         .macro  tl1_split
  462         rdpr    %wstate, %g1
  463         wrpr    %g1, WSTATE_NESTED, %wstate
  464         save    %sp, -(CCFSZ + TF_SIZEOF), %sp
  465         .endm
  466 
  467         .macro  tl1_setup       type
  468         tl1_split
  469         clr     %o1
  470         set     trap, %o2
  471         ba      %xcc, tl1_trap
  472          mov    \type | T_KERNEL, %o0
  473         .endm
  474 
  475         .macro  tl1_gen         type
  476         tl1_setup \type
  477         .align  32
  478         .endm
  479 
  480         .macro  tl1_reserved    count
  481         .rept   \count
  482         tl1_gen T_RESERVED
  483         .endr
  484         .endm
  485 
  486         .macro  tl0_insn_excptn
  487         wrpr    %g0, PSTATE_ALT, %pstate
  488         wr      %g0, ASI_IMMU, %asi
  489         rdpr    %tpc, %g3
  490         ldxa    [%g0 + AA_IMMU_SFSR] %asi, %g4
  491         stxa    %g0, [%g0 + AA_IMMU_SFSR] %asi
  492         membar  #Sync
  493         ba      %xcc, tl0_sfsr_trap
  494          mov    T_INSTRUCTION_EXCEPTION, %g2
  495         .align  32
  496         .endm
  497 
  498         .macro  tl0_data_excptn
  499         wrpr    %g0, PSTATE_ALT, %pstate
  500         wr      %g0, ASI_DMMU, %asi
  501         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
  502         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
  503         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
  504         membar  #Sync
  505         ba      %xcc, tl0_sfsr_trap
  506          mov    T_DATA_EXCEPTION, %g2
  507         .align  32
  508         .endm
  509 
  510         .macro  tl0_align
  511         wr      %g0, ASI_DMMU, %asi
  512         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
  513         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
  514         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
  515         membar  #Sync
  516         ba      %xcc, tl0_sfsr_trap
  517          mov    T_MEM_ADDRESS_NOT_ALIGNED, %g2
  518         .align  32
  519         .endm
  520 
  521 ENTRY(tl0_sfsr_trap)
  522         tl0_split
  523         clr     %o1
  524         set     trap, %o2
  525         mov     %g3, %o4
  526         mov     %g4, %o5
  527         ba      %xcc, tl0_utrap
  528          mov    %g2, %o0
  529 END(tl0_sfsr_trap)
  530 
  531         .macro  tl0_intr level, mask
  532         tl0_split
  533         set     \mask, %o1
  534         ba      %xcc, tl0_intr
  535          mov    \level, %o0
  536         .align  32
  537         .endm
  538 
  539 #define INTR(level, traplvl)                                            \
  540         tl ## traplvl ## _intr  level, 1 << level
  541 
  542 #define TICK(traplvl) \
  543         tl ## traplvl ## _intr  PIL_TICK, 1
  544 
  545 #define INTR_LEVEL(tl)                                                  \
  546         INTR(1, tl) ;                                                   \
  547         INTR(2, tl) ;                                                   \
  548         INTR(3, tl) ;                                                   \
  549         INTR(4, tl) ;                                                   \
  550         INTR(5, tl) ;                                                   \
  551         INTR(6, tl) ;                                                   \
  552         INTR(7, tl) ;                                                   \
  553         INTR(8, tl) ;                                                   \
  554         INTR(9, tl) ;                                                   \
  555         INTR(10, tl) ;                                                  \
  556         INTR(11, tl) ;                                                  \
  557         INTR(12, tl) ;                                                  \
  558         INTR(13, tl) ;                                                  \
  559         TICK(tl) ;                                                      \
  560         INTR(15, tl) ;
  561 
  562         .macro  tl0_intr_level
  563         INTR_LEVEL(0)
  564         .endm
  565 
  566         .macro  intr_vector
  567         ldxa    [%g0] ASI_INTR_RECEIVE, %g1
  568         andcc   %g1, IRSR_BUSY, %g0
  569         bnz,a,pt %xcc, intr_vector
  570          nop
  571         sir
  572         .align  32
  573         .endm
  574 
  575         .macro  tl0_immu_miss
  576         /*
  577          * Load the virtual page number and context from the tag access
  578          * register.  We ignore the context.
  579          */
  580         wr      %g0, ASI_IMMU, %asi
  581         ldxa    [%g0 + AA_IMMU_TAR] %asi, %g1
  582 
  583         /*
  584          * Initialize the page size walker.
  585          */
  586         mov     TS_MIN, %g2
  587 
  588         /*
  589          * Loop over all supported page sizes.
  590          */
  591 
  592         /*
  593          * Compute the page shift for the page size we are currently looking
  594          * for.
  595          */
  596 1:      add     %g2, %g2, %g3
  597         add     %g3, %g2, %g3
  598         add     %g3, PAGE_SHIFT, %g3
  599 
  600         /*
  601          * Extract the virtual page number from the contents of the tag
  602          * access register.
  603          */
  604         srlx    %g1, %g3, %g3
  605 
  606         /*
  607          * Compute the tte bucket address.
  608          */
  609         ldxa    [%g0 + AA_IMMU_TSB] %asi, %g5
  610         and     %g3, TSB_BUCKET_MASK, %g4
  611         sllx    %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
  612         add     %g4, %g5, %g4
  613 
  614         /*
  615          * Compute the tte tag target.
  616          */
  617         sllx    %g3, TV_SIZE_BITS, %g3
  618         or      %g3, %g2, %g3
  619 
  620         /*
  621          * Loop over the ttes in this bucket
  622          */
  623 
  624         /*
  625          * Load the tte.  Note that this instruction may fault, clobbering
  626          * the contents of the tag access register, %g5, %g6, and %g7.  We
  627          * do not use %g5, and %g6 and %g7 are not used until this instruction
  628          * completes successfully.
  629          */
  630 2:      ldda    [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
  631 
  632         /*
  633          * Check that its valid and executable and that the tte tags match.
  634          */
  635         brgez,pn %g7, 3f
  636          andcc  %g7, TD_EXEC, %g0
  637         bz,pn   %xcc, 3f
  638          cmp    %g3, %g6
  639         bne,pn  %xcc, 3f
  640          EMPTY
  641 
  642         /*
  643          * We matched a tte, load the tlb.
  644          */
  645 
  646         /*
  647          * Set the reference bit, if it's currently clear.
  648          */
  649          andcc  %g7, TD_REF, %g0
  650         bz,a,pn %xcc, tl0_immu_miss_set_ref
  651          nop
  652 
  653         /*
  654          * Load the tte tag and data into the tlb and retry the instruction.
  655          */
  656         stxa    %g1, [%g0 + AA_IMMU_TAR] %asi
  657         stxa    %g7, [%g0] ASI_ITLB_DATA_IN_REG
  658         retry
  659 
  660         /*
  661          * Advance to the next tte in this bucket, and check the low bits
  662          * of the bucket pointer to see if we've finished the bucket.
  663          */
  664 3:      add     %g4, 1 << TTE_SHIFT, %g4
  665         andcc   %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
  666         bnz,pt  %xcc, 2b
  667          EMPTY
  668 
  669         /*
  670          * See if we just checked the largest page size, and advance to the
  671          * next one if not.
  672          */
  673          cmp    %g2, TS_MAX
  674         bne,pt  %xcc, 1b
  675          add    %g2, 1, %g2
  676 
  677         /*
  678          * Not in user tsb, call c code.
  679          */
  680         ba,a    %xcc, tl0_immu_miss_trap
  681         .align  128
  682         .endm
  683 
  684 ENTRY(tl0_immu_miss_set_ref)
  685         /*
  686          * Set the reference bit.
  687          */
  688         TTE_SET_REF(%g4, %g2, %g3)
  689 
  690         /*
  691          * May have become invalid during casxa, in which case start over.
  692          */
  693         brgez,pn %g2, 1f
  694          nop
  695 
  696         /*
  697          * Load the tte tag and data into the tlb and retry the instruction.
  698          */
  699         stxa    %g1, [%g0 + AA_IMMU_TAR] %asi
  700         stxa    %g2, [%g0] ASI_ITLB_DATA_IN_REG
  701 1:      retry
  702 END(tl0_immu_miss_set_ref)
  703 
  704 ENTRY(tl0_immu_miss_trap)
  705         /*
  706          * Put back the contents of the tag access register, in case we
  707          * faulted.
  708          */
  709         stxa    %g1, [%g0 + AA_IMMU_TAR] %asi
  710         membar  #Sync
  711 
  712         /*
  713          * Switch to alternate globals.
  714          */
  715         wrpr    %g0, PSTATE_ALT, %pstate
  716 
  717         /*
  718          * Reload the tag access register.
  719          */
  720         ldxa    [%g0 + AA_IMMU_TAR] %asi, %g2
  721 
  722         /*
  723          * Save the tag access register, and call common trap code.
  724          */
  725         tl0_split
  726         clr     %o1
  727         set     trap, %o2
  728         mov     %g2, %o3
  729         ba      %xcc, tl0_utrap
  730          mov    T_INSTRUCTION_MISS, %o0
  731 END(tl0_immu_miss_trap)
  732 
  733         .macro  tl0_dmmu_miss
  734         /*
  735          * Load the virtual page number and context from the tag access
  736          * register.  We ignore the context.
  737          */
  738         wr      %g0, ASI_DMMU, %asi
  739         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g1
  740 
  741         /*
  742          * Initialize the page size walker.
  743          */
  744 tl1_dmmu_miss_user:
  745         mov     TS_MIN, %g2
  746 
  747         /*
  748          * Loop over all supported page sizes.
  749          */
  750 
  751         /*
  752          * Compute the page shift for the page size we are currently looking
  753          * for.
  754          */
  755 1:      add     %g2, %g2, %g3
  756         add     %g3, %g2, %g3
  757         add     %g3, PAGE_SHIFT, %g3
  758 
  759         /*
  760          * Extract the virtual page number from the contents of the tag
  761          * access register.
  762          */
  763         srlx    %g1, %g3, %g3
  764 
  765         /*
  766          * Compute the tte bucket address.
  767          */
  768         ldxa    [%g0 + AA_DMMU_TSB] %asi, %g5
  769         and     %g3, TSB_BUCKET_MASK, %g4
  770         sllx    %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
  771         add     %g4, %g5, %g4
  772 
  773         /*
  774          * Compute the tte tag target.
  775          */
  776         sllx    %g3, TV_SIZE_BITS, %g3
  777         or      %g3, %g2, %g3
  778 
  779         /*
  780          * Loop over the ttes in this bucket
  781          */
  782 
  783         /*
  784          * Load the tte.  Note that this instruction may fault, clobbering
  785          * the contents of the tag access register, %g5, %g6, and %g7.  We
  786          * do not use %g5, and %g6 and %g7 are not used until this instruction
  787          * completes successfully.
  788          */
  789 2:      ldda    [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
  790 
  791         /*
  792          * Check that its valid and that the virtual page numbers match.
  793          */
  794         brgez,pn %g7, 3f
  795          cmp    %g3, %g6
  796         bne,pn  %xcc, 3f
  797          EMPTY
  798 
  799         /*
  800          * We matched a tte, load the tlb.
  801          */
  802 
  803         /*
  804          * Set the reference bit, if it's currently clear.
  805          */
  806          andcc  %g7, TD_REF, %g0
  807         bz,a,pn %xcc, tl0_dmmu_miss_set_ref
  808          nop
  809 
  810         /*
  811          * Load the tte tag and data into the tlb and retry the instruction.
  812          */
  813         stxa    %g1, [%g0 + AA_DMMU_TAR] %asi
  814         stxa    %g7, [%g0] ASI_DTLB_DATA_IN_REG
  815         retry
  816 
  817         /*
  818          * Advance to the next tte in this bucket, and check the low bits
  819          * of the bucket pointer to see if we've finished the bucket.
  820          */
  821 3:      add     %g4, 1 << TTE_SHIFT, %g4
  822         andcc   %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
  823         bnz,pt  %xcc, 2b
  824          EMPTY
  825 
  826         /*
  827          * See if we just checked the largest page size, and advance to the
  828          * next one if not.
  829          */
  830          cmp    %g2, TS_MAX
  831         bne,pt  %xcc, 1b
  832          add    %g2, 1, %g2
  833 
  834         /*
  835          * Not in user tsb, call c code.
  836          */
  837         ba,a    %xcc, tl0_dmmu_miss_trap
  838         .align  128
  839         .endm
  840 
  841 ENTRY(tl0_dmmu_miss_set_ref)
  842         /*
  843          * Set the reference bit.
  844          */
  845         TTE_SET_REF(%g4, %g2, %g3)
  846 
  847         /*
  848          * May have become invalid during casxa, in which case start over.
  849          */
  850         brgez,pn %g2, 1f
  851          nop
  852 
  853         /*
  854          * Load the tte tag and data into the tlb and retry the instruction.
  855          */
  856         stxa    %g1, [%g0 + AA_DMMU_TAR] %asi
  857         stxa    %g2, [%g0] ASI_DTLB_DATA_IN_REG
  858 1:      retry
  859 END(tl0_dmmu_miss_set_ref)
  860 
  861 ENTRY(tl0_dmmu_miss_trap)
  862         /*
  863          * Put back the contents of the tag access register, in case we
  864          * faulted.
  865          */
  866         stxa    %g1, [%g0 + AA_DMMU_TAR] %asi
  867         membar  #Sync
  868 
  869         /*
  870          * Switch to alternate globals.
  871          */
  872         wrpr    %g0, PSTATE_ALT, %pstate
  873 
  874         /*
  875          * Check if we actually came from the kernel.
  876          */
  877         rdpr    %tl, %g1
  878         cmp     %g1, 1
  879         bgt,a,pn %xcc, 1f
  880          nop
  881 
  882         /*
  883          * Reload the tag access register.
  884          */
  885         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
  886 
  887         /*
  888          * Save the tag access register and call common trap code.
  889          */
  890         tl0_split
  891         clr     %o1
  892         set     trap, %o2
  893         mov     %g2, %o3
  894         ba      %xcc, tl0_utrap
  895          mov    T_DATA_MISS, %o0
  896 
  897         /*
  898          * Handle faults during window spill/fill.
  899          */
  900 1:      RESUME_SPILLFILL_MMU
  901 
  902         /*
  903          * Reload the tag access register.
  904          */
  905         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
  906 
  907         tl1_split
  908         clr     %o1
  909         set     trap, %o2
  910         mov     %g2, %o3
  911         ba      %xcc, tl1_trap
  912          mov    T_DATA_MISS | T_KERNEL, %o0
  913 END(tl0_dmmu_miss_trap)
  914 
  915         .macro  tl0_dmmu_prot
  916         ba,a    %xcc, tl0_dmmu_prot_1
  917          nop
  918         .align  128
  919         .endm
  920 
  921 ENTRY(tl0_dmmu_prot_1)
  922         /*
  923          * Load the virtual page number and context from the tag access
  924          * register.  We ignore the context.
  925          */
  926         wr      %g0, ASI_DMMU, %asi
  927         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g1
  928 
  929         /*
  930          * Initialize the page size walker.
  931          */
  932 tl1_dmmu_prot_user:
  933         mov     TS_MIN, %g2
  934 
  935         /*
  936          * Loop over all supported page sizes.
  937          */
  938 
  939         /*
  940          * Compute the page shift for the page size we are currently looking
  941          * for.
  942          */
  943 1:      add     %g2, %g2, %g3
  944         add     %g3, %g2, %g3
  945         add     %g3, PAGE_SHIFT, %g3
  946 
  947         /*
  948          * Extract the virtual page number from the contents of the tag
  949          * access register.
  950          */
  951         srlx    %g1, %g3, %g3
  952 
  953         /*
  954          * Compute the tte bucket address.
  955          */
  956         ldxa    [%g0 + AA_DMMU_TSB] %asi, %g5
  957         and     %g3, TSB_BUCKET_MASK, %g4
  958         sllx    %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
  959         add     %g4, %g5, %g4
  960 
  961         /*
  962          * Compute the tte tag target.
  963          */
  964         sllx    %g3, TV_SIZE_BITS, %g3
  965         or      %g3, %g2, %g3
  966 
  967         /*
  968          * Loop over the ttes in this bucket
  969          */
  970 
  971         /*
  972          * Load the tte.  Note that this instruction may fault, clobbering
  973          * the contents of the tag access register, %g5, %g6, and %g7.  We
  974          * do not use %g5, and %g6 and %g7 are not used until this instruction
  975          * completes successfully.
  976          */
  977 2:      ldda    [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
  978 
  979         /*
  980          * Check that its valid and writable and that the virtual page
  981          * numbers match.
  982          */
  983         brgez,pn %g7, 4f
  984          andcc  %g7, TD_SW, %g0
  985         bz,pn   %xcc, 4f
  986          cmp    %g3, %g6
  987         bne,pn  %xcc, 4f
  988          nop
  989 
  990         /*
  991          * Set the hardware write bit.
  992          */
  993         TTE_SET_W(%g4, %g2, %g3)
  994 
  995         /*
  996          * Delete the old TLB entry and clear the sfsr.
  997          */
  998         srlx    %g1, PAGE_SHIFT, %g3
  999         sllx    %g3, PAGE_SHIFT, %g3
 1000         stxa    %g0, [%g3] ASI_DMMU_DEMAP
 1001         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1002         membar  #Sync
 1003 
 1004         /*
 1005          * May have become invalid during casxa, in which case start over.
 1006          */
 1007         brgez,pn %g2, 3f
 1008          or     %g2, TD_W, %g2
 1009 
 1010         /*
 1011          * Load the tte data into the tlb and retry the instruction.
 1012          */
 1013         stxa    %g1, [%g0 + AA_DMMU_TAR] %asi
 1014         stxa    %g2, [%g0] ASI_DTLB_DATA_IN_REG
 1015 3:      retry
 1016 
 1017         /*
 1018          * Check the low bits to see if we've finished the bucket.
 1019          */
 1020 4:      add     %g4, 1 << TTE_SHIFT, %g4
 1021         andcc   %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
 1022         bnz,pt  %xcc, 2b
 1023          EMPTY
 1024 
 1025         /*
 1026          * See if we just checked the largest page size, and advance to the
 1027          * next one if not.
 1028          */
 1029          cmp    %g2, TS_MAX
 1030         bne,pt  %xcc, 1b
 1031          add    %g2, 1, %g2
 1032 
 1033         /*
 1034          * Not in user tsb, call c code.
 1035          */
 1036         ba,a    %xcc, tl0_dmmu_prot_trap
 1037          nop
 1038 END(tl0_dmmu_prot_1)
 1039 
 1040 ENTRY(tl0_dmmu_prot_trap)
 1041         /*
 1042          * Put back the contents of the tag access register, in case we
 1043          * faulted.
 1044          */
 1045         stxa    %g1, [%g0 + AA_DMMU_TAR] %asi
 1046         membar  #Sync
 1047 
 1048         /*
 1049          * Switch to alternate globals.
 1050          */
 1051         wrpr    %g0, PSTATE_ALT, %pstate
 1052 
 1053         /*
 1054          * Check if we actually came from the kernel.
 1055          */
 1056         rdpr    %tl, %g1
 1057         cmp     %g1, 1
 1058         bgt,a,pn %xcc, 1f
 1059          nop
 1060 
 1061         /*
 1062          * Load the tar, sfar and sfsr.
 1063          */
 1064         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
 1065         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
 1066         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
 1067         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1068         membar  #Sync
 1069 
 1070         /*
 1071          * Save the mmu registers and call common trap code.
 1072          */
 1073         tl0_split
 1074         clr     %o1
 1075         set     trap, %o2
 1076         mov     %g2, %o3
 1077         mov     %g3, %o4
 1078         mov     %g4, %o5
 1079         ba      %xcc, tl0_utrap
 1080          mov    T_DATA_PROTECTION, %o0
 1081 
 1082         /*
 1083          * Handle faults during window spill/fill.
 1084          */
 1085 1:      RESUME_SPILLFILL_MMU_CLR_SFSR
 1086 
 1087         /*
 1088          * Load the sfar, sfsr and tar.  Clear the sfsr.
 1089          */
 1090         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
 1091         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
 1092         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
 1093         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1094         membar  #Sync
 1095 
 1096         tl1_split
 1097         clr     %o1
 1098         set     trap, %o2
 1099         mov     %g2, %o3
 1100         mov     %g3, %o4
 1101         mov     %g4, %o5
 1102         ba      %xcc, tl1_trap
 1103          mov    T_DATA_PROTECTION | T_KERNEL, %o0
 1104 END(tl0_dmmu_prot_trap)
 1105 
 1106         .macro  tl0_spill_0_n
 1107         wr      %g0, ASI_AIUP, %asi
 1108         SPILL(stxa, %sp + SPOFF, 8, %asi)
 1109         saved
 1110         retry
 1111         .align  32
 1112         RSF_TRAP(T_SPILL)
 1113         RSF_TRAP(T_SPILL)
 1114         .endm
 1115 
 1116         .macro  tl0_spill_1_n
 1117         wr      %g0, ASI_AIUP, %asi
 1118         SPILL(stwa, %sp, 4, %asi)
 1119         saved
 1120         retry
 1121         .align  32
 1122         RSF_TRAP(T_SPILL)
 1123         RSF_TRAP(T_SPILL)
 1124         .endm
 1125 
 1126         .macro  tl0_fill_0_n
 1127         wr      %g0, ASI_AIUP, %asi
 1128         FILL(ldxa, %sp + SPOFF, 8, %asi)
 1129         restored
 1130         retry
 1131         .align  32
 1132         RSF_TRAP(T_FILL)
 1133         RSF_TRAP(T_FILL)
 1134         .endm
 1135 
 1136         .macro  tl0_fill_1_n
 1137         wr      %g0, ASI_AIUP, %asi
 1138         FILL(lduwa, %sp, 4, %asi)
 1139         restored
 1140         retry
 1141         .align  32
 1142         RSF_TRAP(T_FILL)
 1143         RSF_TRAP(T_FILL)
 1144         .endm
 1145 
 1146 ENTRY(tl0_sftrap)
 1147         rdpr    %tstate, %g1
 1148         and     %g1, TSTATE_CWP_MASK, %g1
 1149         wrpr    %g1, 0, %cwp
 1150         tl0_split
 1151         clr     %o1
 1152         set     trap, %o2
 1153         ba      %xcc, tl0_trap
 1154          mov    %g2, %o0
 1155 END(tl0_sftrap)
 1156 
 1157         .macro  tl0_spill_bad   count
 1158         .rept   \count
 1159         sir
 1160         .align  128
 1161         .endr
 1162         .endm
 1163 
 1164         .macro  tl0_fill_bad    count
 1165         .rept   \count
 1166         sir
 1167         .align  128
 1168         .endr
 1169         .endm
 1170 
 1171         .macro  tl0_syscall
 1172         tl0_split
 1173         clr     %o1
 1174         set     syscall, %o2
 1175         ba      %xcc, tl0_trap
 1176          mov    T_SYSCALL, %o0
 1177         .align  32
 1178         .endm
 1179 
 1180         .macro  tl0_fp_restore
 1181         ba,a    %xcc, tl0_fp_restore
 1182          nop
 1183         .align  32
 1184         .endm
 1185 
 1186 ENTRY(tl0_fp_restore)
 1187         ldx     [PCB_REG + PCB_FLAGS], %g1
 1188         andn    %g1, PCB_FEF, %g1
 1189         stx     %g1, [PCB_REG + PCB_FLAGS]
 1190 
 1191         wr      %g0, FPRS_FEF, %fprs
 1192         wr      %g0, ASI_BLK_S, %asi
 1193         ldda    [PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
 1194         ldda    [PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
 1195         ldda    [PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
 1196         ldda    [PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
 1197         membar  #Sync
 1198         done
 1199 END(tl0_fp_restore)
 1200 
 1201         .macro  tl1_insn_excptn
 1202         wrpr    %g0, PSTATE_ALT, %pstate
 1203         wr      %g0, ASI_IMMU, %asi
 1204         rdpr    %tpc, %g3
 1205         ldxa    [%g0 + AA_IMMU_SFSR] %asi, %g4
 1206         stxa    %g0, [%g0 + AA_IMMU_SFSR] %asi
 1207         membar  #Sync
 1208         ba      %xcc, tl1_insn_exceptn_trap
 1209          mov    T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
 1210         .align  32
 1211         .endm
 1212 
 1213 ENTRY(tl1_insn_exceptn_trap)
 1214         tl1_split
 1215         clr     %o1
 1216         set     trap, %o2
 1217         mov     %g3, %o4
 1218         mov     %g4, %o5
 1219         ba      %xcc, tl1_trap
 1220          mov    %g2, %o0
 1221 END(tl1_insn_exceptn_trap)
 1222 
 1223         .macro  tl1_fp_disabled
 1224         ba,a    %xcc, tl1_fp_disabled_1
 1225          nop
 1226         .align  32
 1227         .endm
 1228 
 1229 ENTRY(tl1_fp_disabled_1)
 1230         rdpr    %tpc, %g1
 1231         set     fpu_fault_begin, %g2
 1232         sub     %g1, %g2, %g1
 1233         cmp     %g1, fpu_fault_size
 1234         bgeu,a,pn %xcc, 1f
 1235          nop
 1236 
 1237         wr      %g0, FPRS_FEF, %fprs
 1238         wr      %g0, ASI_BLK_S, %asi
 1239         ldda    [PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
 1240         ldda    [PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
 1241         ldda    [PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
 1242         ldda    [PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
 1243         membar  #Sync
 1244         retry
 1245 
 1246 1:      tl1_split
 1247         clr     %o1
 1248         set     trap, %o2
 1249         ba      %xcc, tl1_trap
 1250          mov    T_FP_DISABLED | T_KERNEL, %o0
 1251 END(tl1_fp_disabled_1)
 1252 
 1253         .macro  tl1_data_excptn
 1254         wrpr    %g0, PSTATE_ALT, %pstate
 1255         ba,a    %xcc, tl1_data_excptn_trap
 1256          nop
 1257         .align  32
 1258         .endm
 1259 
 1260 ENTRY(tl1_data_excptn_trap)
 1261         RESUME_SPILLFILL_MMU_CLR_SFSR
 1262         ba      %xcc, tl1_sfsr_trap
 1263          mov    T_DATA_EXCEPTION | T_KERNEL, %g2
 1264 END(tl1_data_excptn_trap)
 1265 
 1266         .macro  tl1_align
 1267         ba,a    %xcc, tl1_align_trap
 1268          nop
 1269         .align  32
 1270         .endm
 1271 
 1272 ENTRY(tl1_align_trap)
 1273         RESUME_SPILLFILL_ALIGN
 1274         ba      %xcc, tl1_sfsr_trap
 1275          mov    T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
 1276 END(tl1_data_excptn_trap)
 1277 
 1278 ENTRY(tl1_sfsr_trap)
 1279         wr      %g0, ASI_DMMU, %asi
 1280         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
 1281         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
 1282         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1283         membar  #Sync
 1284 
 1285         tl1_split
 1286         clr     %o1
 1287         set     trap, %o2
 1288         mov     %g3, %o4
 1289         mov     %g4, %o5
 1290         ba      %xcc, tl1_trap
 1291          mov    %g2, %o0
 1292 END(tl1_sfsr_trap)
 1293 
 1294         .macro  tl1_intr level, mask
 1295         tl1_split
 1296         set     \mask, %o1
 1297         ba      %xcc, tl1_intr
 1298          mov    \level, %o0
 1299         .align  32
 1300         .endm
 1301 
 1302         .macro  tl1_intr_level
 1303         INTR_LEVEL(1)
 1304         .endm
 1305 
 1306         .macro  tl1_immu_miss
 1307         /*
 1308          * Load the context and the virtual page number from the tag access
 1309          * register.  We ignore the context.
 1310          */
 1311         wr      %g0, ASI_IMMU, %asi
 1312         ldxa    [%g0 + AA_IMMU_TAR] %asi, %g5
 1313 
 1314         /*
 1315          * Compute the address of the tte.  The tsb mask and address of the
 1316          * tsb are patched at startup.
 1317          */
 1318         .globl  tl1_immu_miss_patch_1
 1319 tl1_immu_miss_patch_1:
 1320         sethi   %hi(TSB_KERNEL_MASK), %g6
 1321         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1322         sethi   %hi(TSB_KERNEL), %g7
 1323 
 1324         srlx    %g5, TAR_VPN_SHIFT, %g5
 1325         and     %g5, %g6, %g6
 1326         sllx    %g6, TTE_SHIFT, %g6
 1327         add     %g6, %g7, %g6
 1328 
 1329         /*
 1330          * Load the tte.
 1331          */
 1332         ldda    [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
 1333 
 1334         /*
 1335          * Check that its valid and executable and that the virtual page
 1336          * numbers match.
 1337          */
 1338         brgez,pn %g7, tl1_immu_miss_trap
 1339          andcc  %g7, TD_EXEC, %g0
 1340         bz,pn   %xcc, tl1_immu_miss_trap
 1341          srlx   %g6, TV_SIZE_BITS, %g6
 1342         cmp     %g5, %g6
 1343         bne,pn  %xcc, tl1_immu_miss_trap
 1344          EMPTY
 1345 
 1346         /*
 1347          * Set the reference bit if its currently clear.
 1348          */
 1349          andcc  %g7, TD_REF, %g0
 1350         bz,a,pn %xcc, tl1_immu_miss_set_ref
 1351          nop
 1352 
 1353         /*
 1354          * Load the tte data into the TLB and retry the instruction.
 1355          */
 1356         stxa    %g7, [%g0] ASI_ITLB_DATA_IN_REG
 1357         retry
 1358         .align  128
 1359         .endm
 1360 
 1361 ENTRY(tl1_immu_miss_set_ref)
 1362         /*
 1363          * Recompute the tte address, which we clobbered loading the tte.  The
 1364          * tsb mask and address of the tsb are patched at startup.
 1365          */
 1366         .globl  tl1_immu_miss_patch_2
 1367 tl1_immu_miss_patch_2:
 1368         sethi   %hi(TSB_KERNEL_MASK), %g6
 1369         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1370         sethi   %hi(TSB_KERNEL), %g7
 1371 
 1372         and     %g5, %g6, %g5
 1373         sllx    %g5, TTE_SHIFT, %g5
 1374         add     %g5, %g7, %g5
 1375 
 1376         /*
 1377          * Set the reference bit.
 1378          */
 1379         TTE_SET_REF(%g5, %g6, %g7)
 1380 
 1381         /*
 1382          * May have become invalid during casxa, in which case start over.
 1383          */
 1384         brgez,pn %g6, 1f
 1385          nop
 1386 
 1387         /*
 1388          * Load the tte data into the TLB and retry the instruction.
 1389          */
 1390         stxa    %g6, [%g0] ASI_ITLB_DATA_IN_REG
 1391 1:      retry
 1392 END(tl1_immu_miss_set_ref)
 1393 
 1394 ENTRY(tl1_immu_miss_trap)
 1395         /*
 1396          * Switch to alternate globals.
 1397          */
 1398         wrpr    %g0, PSTATE_ALT, %pstate
 1399 
 1400         ldxa    [%g0 + AA_IMMU_TAR] %asi, %g2
 1401 
 1402         tl1_split
 1403         clr     %o1
 1404         set     trap, %o2
 1405         mov     %g2, %o3
 1406         ba      %xcc, tl1_trap
 1407          mov    T_INSTRUCTION_MISS | T_KERNEL, %o0
 1408 END(tl1_immu_miss_trap)
 1409 
 1410         .macro  tl1_dmmu_miss
 1411         /*
 1412          * Load the context and the virtual page number from the tag access
 1413          * register.
 1414          */
 1415         wr      %g0, ASI_DMMU, %asi
 1416         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g5
 1417 
 1418         /*
 1419          * Extract the context from the contents of the tag access register.
 1420          * If its non-zero this is a fault on a user address.  Note that the
 1421          * faulting address is passed in %g1.
 1422          */
 1423         sllx    %g5, 64 - TAR_VPN_SHIFT, %g6
 1424         brnz,a,pn %g6, tl1_dmmu_miss_user
 1425          mov    %g5, %g1
 1426 
 1427         /*
 1428          * Check for the direct mapped physical region.  These addresses have
 1429          * the high bit set so they are negative.
 1430          */
 1431         brlz,pn %g5, tl1_dmmu_miss_direct
 1432          EMPTY
 1433 
 1434         /*
 1435          * Compute the address of the tte.  The tsb mask and address of the
 1436          * tsb are patched at startup.
 1437          */
 1438         .globl  tl1_dmmu_miss_patch_1
 1439 tl1_dmmu_miss_patch_1:
 1440         sethi   %hi(TSB_KERNEL_MASK), %g6
 1441         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1442         sethi   %hi(TSB_KERNEL), %g7
 1443 
 1444         srlx    %g5, TAR_VPN_SHIFT, %g5
 1445         and     %g5, %g6, %g6
 1446         sllx    %g6, TTE_SHIFT, %g6
 1447         add     %g6, %g7, %g6
 1448 
 1449         /*
 1450          * Load the tte.
 1451          */
 1452         ldda    [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
 1453 
 1454         /*
 1455          * Check that its valid and that the virtual page numbers match.
 1456          */
 1457         brgez,pn %g7, tl1_dmmu_miss_trap
 1458          srlx   %g6, TV_SIZE_BITS, %g6
 1459         cmp     %g5, %g6
 1460         bne,pn %xcc, tl1_dmmu_miss_trap
 1461          EMPTY
 1462 
 1463         /*
 1464          * Set the reference bit if its currently clear.
 1465          */
 1466          andcc  %g7, TD_REF, %g0
 1467         bz,a,pt %xcc, tl1_dmmu_miss_set_ref
 1468          nop
 1469 
 1470         /*
 1471          * Load the tte data into the TLB and retry the instruction.
 1472          */
 1473         stxa    %g7, [%g0] ASI_DTLB_DATA_IN_REG
 1474         retry
 1475         .align  128
 1476         .endm
 1477 
 1478 ENTRY(tl1_dmmu_miss_set_ref)
 1479         /*
 1480          * Recompute the tte address, which we clobbered loading the tte.  The
 1481          * tsb mask and address of the tsb are patched at startup.
 1482          */
 1483         .globl  tl1_dmmu_miss_patch_2
 1484 tl1_dmmu_miss_patch_2:
 1485         sethi   %hi(TSB_KERNEL_MASK), %g6
 1486         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1487         sethi   %hi(TSB_KERNEL), %g7
 1488 
 1489         and     %g5, %g6, %g5
 1490         sllx    %g5, TTE_SHIFT, %g5
 1491         add     %g5, %g7, %g5
 1492 
 1493         /*
 1494          * Set the reference bit.
 1495          */
 1496         TTE_SET_REF(%g5, %g6, %g7)
 1497 
 1498         /*
 1499          * May have become invalid during casxa, in which case start over.
 1500          */
 1501         brgez,pn %g6, 1f
 1502          nop
 1503 
 1504         /*
 1505          * Load the tte data into the TLB and retry the instruction.
 1506          */
 1507         stxa    %g6, [%g0] ASI_DTLB_DATA_IN_REG
 1508 1:      retry
 1509 END(tl1_dmmu_miss_set_ref)
 1510 
 1511 ENTRY(tl1_dmmu_miss_trap)
 1512         /*
 1513          * Switch to alternate globals.
 1514          */
 1515         wrpr    %g0, PSTATE_ALT, %pstate
 1516 
 1517         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
 1518 
 1519         KSTACK_CHECK
 1520 
 1521         tl1_split
 1522         clr     %o1
 1523         set     trap, %o2
 1524         mov     %g2, %o3
 1525         ba      %xcc, tl1_trap
 1526          mov    T_DATA_MISS | T_KERNEL, %o0
 1527 END(tl1_dmmu_miss_trap)
 1528 
 1529 ENTRY(tl1_dmmu_miss_direct)
 1530         /*
 1531          * Mask off the high bits of the virtual address to get the physical
 1532          * address, and or in the tte bits.  The virtual address bits that
 1533          * correspond to the tte valid and page size bits are left set, so
 1534          * they don't have to be included in the tte bits below.  We know they
 1535          * are set because the virtual address is in the upper va hole.
 1536          */
 1537         setx    TLB_DIRECT_TO_TTE_MASK, %g7, %g6
 1538         and     %g5, %g6, %g5
 1539         or      %g5, TD_CP | TD_CV | TD_W, %g5
 1540 
 1541         /*
 1542          * Load the tte data into the TLB and retry the instruction.
 1543          */
 1544         stxa    %g5, [%g0] ASI_DTLB_DATA_IN_REG
 1545         retry
 1546 END(tl1_dmmu_miss_direct)
 1547 
 1548         .macro  tl1_dmmu_prot
 1549         ba,a    %xcc, tl1_dmmu_prot_1
 1550          nop
 1551         .align  128
 1552         .endm
 1553 
 1554 ENTRY(tl1_dmmu_prot_1)
 1555         /*
 1556          * Load the context and the virtual page number from the tag access
 1557          * register.
 1558          */
 1559         wr      %g0, ASI_DMMU, %asi
 1560         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g5
 1561 
 1562         /*
 1563          * Extract the context from the contents of the tag access register.
 1564          * If its non-zero this is a fault on a user address.  Note that the
 1565          * faulting address is passed in %g1.
 1566          */
 1567         sllx    %g5, 64 - TAR_VPN_SHIFT, %g6
 1568         brnz,a,pn %g6, tl1_dmmu_prot_user
 1569          mov    %g5, %g1
 1570 
 1571         /*
 1572          * Compute the address of the tte.  The tsb mask and address of the
 1573          * tsb are patched at startup.
 1574          */
 1575         .globl  tl1_dmmu_prot_patch_1
 1576 tl1_dmmu_prot_patch_1:
 1577         sethi   %hi(TSB_KERNEL_MASK), %g6
 1578         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1579         sethi   %hi(TSB_KERNEL), %g7
 1580 
 1581         srlx    %g5, TAR_VPN_SHIFT, %g5
 1582         and     %g5, %g6, %g6
 1583         sllx    %g6, TTE_SHIFT, %g6
 1584         add     %g6, %g7, %g6
 1585 
 1586         /*
 1587          * Load the tte.
 1588          */
 1589         ldda    [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
 1590 
 1591         /*
 1592          * Check that its valid and writeable and that the virtual page
 1593          * numbers match.
 1594          */
 1595         brgez,pn %g7, tl1_dmmu_prot_trap
 1596          andcc  %g7, TD_SW, %g0
 1597         bz,pn   %xcc, tl1_dmmu_prot_trap
 1598          srlx   %g6, TV_SIZE_BITS, %g6
 1599         cmp     %g5, %g6
 1600         bne,pn  %xcc, tl1_dmmu_prot_trap
 1601          EMPTY
 1602 
 1603         /*
 1604          * Delete the old TLB entry and clear the sfsr.
 1605          */
 1606          sllx   %g5, TAR_VPN_SHIFT, %g6
 1607         or      %g6, TLB_DEMAP_NUCLEUS, %g6
 1608         stxa    %g0, [%g6] ASI_DMMU_DEMAP
 1609         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1610         membar  #Sync
 1611 
 1612         /*
 1613          * Recompute the tte address, which we clobbered loading the tte.  The
 1614          * tsb mask and address of the tsb are patched at startup.
 1615          */
 1616         .globl  tl1_dmmu_prot_patch_2
 1617 tl1_dmmu_prot_patch_2:
 1618         sethi   %hi(TSB_KERNEL_MASK), %g6
 1619         or      %g6, %lo(TSB_KERNEL_MASK), %g6
 1620         sethi   %hi(TSB_KERNEL), %g7
 1621 
 1622         and     %g5, %g6, %g5
 1623         sllx    %g5, TTE_SHIFT, %g5
 1624         add     %g5, %g7, %g5
 1625 
 1626         /*
 1627          * Set the hardware write bit.
 1628          */
 1629         TTE_SET_W(%g5, %g6, %g7)
 1630 
 1631         /*
 1632          * May have become invalid during casxa, in which case start over.
 1633          */
 1634         brgez,pn %g6, 1f
 1635          or     %g6, TD_W, %g6
 1636 
 1637         /*
 1638          * Load the tte data into the TLB and retry the instruction.
 1639          */
 1640         stxa    %g6, [%g0] ASI_DTLB_DATA_IN_REG
 1641 1:      retry
 1642 END(tl1_dmmu_prot_1)
 1643 
 1644 ENTRY(tl1_dmmu_prot_trap)
 1645         /*
 1646          * Switch to alternate globals.
 1647          */
 1648         wrpr    %g0, PSTATE_ALT, %pstate
 1649 
 1650         /*
 1651          * Load the sfar, sfsr and tar.  Clear the sfsr.
 1652          */
 1653         ldxa    [%g0 + AA_DMMU_TAR] %asi, %g2
 1654         ldxa    [%g0 + AA_DMMU_SFAR] %asi, %g3
 1655         ldxa    [%g0 + AA_DMMU_SFSR] %asi, %g4
 1656         stxa    %g0, [%g0 + AA_DMMU_SFSR] %asi
 1657         membar  #Sync
 1658 
 1659         tl1_split
 1660         clr     %o1
 1661         set     trap, %o2
 1662         mov     %g2, %o3
 1663         mov     %g3, %o4
 1664         mov     %g4, %o5
 1665         ba      %xcc, tl1_trap
 1666          mov    T_DATA_PROTECTION | T_KERNEL, %o0
 1667 END(tl1_dmmu_prot_trap)
 1668 
 1669         .macro  tl1_spill_0_n
 1670         SPILL(stx, %sp + SPOFF, 8, EMPTY)
 1671         saved
 1672         retry
 1673         .align  32
 1674         RSF_FATAL(T_SPILL)
 1675         RSF_FATAL(T_SPILL)
 1676         .endm
 1677 
 1678         .macro  tl1_spill_2_n
 1679         wr      %g0, ASI_AIUP, %asi
 1680         SPILL(stxa, %sp + SPOFF, 8, %asi)
 1681         saved
 1682         retry
 1683         .align  32
 1684         RSF_SPILL_TOPCB
 1685         RSF_SPILL_TOPCB
 1686         .endm
 1687 
 1688         .macro  tl1_spill_3_n
 1689         wr      %g0, ASI_AIUP, %asi
 1690         SPILL(stwa, %sp, 4, %asi)
 1691         saved
 1692         retry
 1693         .align  32
 1694         RSF_SPILL_TOPCB
 1695         RSF_SPILL_TOPCB
 1696         .endm
 1697 
 1698         .macro  tl1_spill_0_o
 1699         wr      %g0, ASI_AIUP, %asi
 1700         SPILL(stxa, %sp + SPOFF, 8, %asi)
 1701         saved
 1702         retry
 1703         .align  32
 1704         RSF_SPILL_TOPCB
 1705         RSF_SPILL_TOPCB
 1706         .endm
 1707 
 1708         .macro  tl1_spill_1_o
 1709         wr      %g0, ASI_AIUP, %asi
 1710         SPILL(stwa, %sp, 4, %asi)
 1711         saved
 1712         retry
 1713         .align  32
 1714         RSF_SPILL_TOPCB
 1715         RSF_SPILL_TOPCB
 1716         .endm
 1717 
 1718         .macro  tl1_spill_2_o
 1719         RSF_SPILL_TOPCB
 1720         .align  128
 1721         .endm
 1722 
 1723         .macro  tl1_fill_0_n
 1724         FILL(ldx, %sp + SPOFF, 8, EMPTY)
 1725         restored
 1726         retry
 1727         .align  32
 1728         RSF_FATAL(T_FILL)
 1729         RSF_FATAL(T_FILL)
 1730         .endm
 1731 
 1732         .macro  tl1_fill_2_n
 1733         wr      %g0, ASI_AIUP, %asi
 1734         FILL(ldxa, %sp + SPOFF, 8, %asi)
 1735         restored
 1736         retry
 1737         .align 32
 1738         RSF_FILL_MAGIC
 1739         RSF_FILL_MAGIC
 1740         .endm
 1741 
 1742         .macro  tl1_fill_3_n
 1743         wr      %g0, ASI_AIUP, %asi
 1744         FILL(lduwa, %sp, 4, %asi)
 1745         restored
 1746         retry
 1747         .align 32
 1748         RSF_FILL_MAGIC
 1749         RSF_FILL_MAGIC
 1750         .endm
 1751 
 1752 /*
 1753  * This is used to spill windows that are still occupied with user
 1754  * data on kernel entry to the pcb.
 1755  */
 1756 ENTRY(tl1_spill_topcb)
 1757         wrpr    %g0, PSTATE_ALT, %pstate
 1758 
 1759         /* Free some globals for our use. */
 1760         dec     24, ASP_REG
 1761         stx     %g1, [ASP_REG + 0]
 1762         stx     %g2, [ASP_REG + 8]
 1763         stx     %g3, [ASP_REG + 16]
 1764 
 1765         ldx     [PCB_REG + PCB_NSAVED], %g1
 1766 
 1767         sllx    %g1, PTR_SHIFT, %g2
 1768         add     %g2, PCB_REG, %g2
 1769         stx     %sp, [%g2 + PCB_RWSP]
 1770 
 1771         sllx    %g1, RW_SHIFT, %g2
 1772         add     %g2, PCB_REG, %g2
 1773         SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
 1774 
 1775         inc     %g1
 1776         stx     %g1, [PCB_REG + PCB_NSAVED]
 1777 
 1778 #if KTR_COMPILE & KTR_TRAP
 1779         CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
 1780            , %g1, %g2, %g3, 7, 8, 9)
 1781         rdpr    %tpc, %g2
 1782         stx     %g2, [%g1 + KTR_PARM1]
 1783         rdpr    %tnpc, %g2
 1784         stx     %g2, [%g1 + KTR_PARM2]
 1785         stx     %sp, [%g1 + KTR_PARM3]
 1786         ldx     [PCB_REG + PCB_NSAVED], %g2
 1787         stx     %g2, [%g1 + KTR_PARM4]
 1788 9:
 1789 #endif
 1790 
 1791         saved
 1792 
 1793         ldx     [ASP_REG + 16], %g3
 1794         ldx     [ASP_REG + 8], %g2
 1795         ldx     [ASP_REG + 0], %g1
 1796         inc     24, ASP_REG
 1797         retry
 1798 END(tl1_spill_topcb)
 1799 
 1800         .macro  tl1_spill_bad   count
 1801         .rept   \count
 1802         sir
 1803         .align  128
 1804         .endr
 1805         .endm
 1806 
 1807         .macro  tl1_fill_bad    count
 1808         .rept   \count
 1809         sir
 1810         .align  128
 1811         .endr
 1812         .endm
 1813 
 1814         .macro  tl1_soft        count
 1815         .rept   \count
 1816         tl1_gen T_SOFT | T_KERNEL
 1817         .endr
 1818         .endm
 1819 
 1820         .sect   .trap
 1821         .align  0x8000
 1822         .globl  tl0_base
 1823 
 1824 tl0_base:
 1825         tl0_reserved    8                               ! 0x0-0x7
 1826 tl0_insn_excptn:
 1827         tl0_insn_excptn                                 ! 0x8
 1828         tl0_reserved    1                               ! 0x9
 1829 tl0_insn_error:
 1830         tl0_gen         T_INSTRUCTION_ERROR             ! 0xa
 1831         tl0_reserved    5                               ! 0xb-0xf
 1832 tl0_insn_illegal:
 1833         tl0_gen         T_ILLEGAL_INSTRUCTION           ! 0x10
 1834 tl0_priv_opcode:
 1835         tl0_gen         T_PRIVILEGED_OPCODE             ! 0x11
 1836         tl0_reserved    14                              ! 0x12-0x1f
 1837 tl0_fp_disabled:
 1838         tl0_gen         T_FP_DISABLED                   ! 0x20
 1839 tl0_fp_ieee:
 1840         tl0_gen         T_FP_EXCEPTION_IEEE_754         ! 0x21
 1841 tl0_fp_other:
 1842         tl0_gen         T_FP_EXCEPTION_OTHER            ! 0x22
 1843 tl0_tag_ovflw:
 1844         tl0_gen         T_TAG_OFERFLOW                  ! 0x23
 1845 tl0_clean_window:
 1846         clean_window                                    ! 0x24
 1847 tl0_divide:
 1848         tl0_gen         T_DIVISION_BY_ZERO              ! 0x28
 1849         tl0_reserved    7                               ! 0x29-0x2f
 1850 tl0_data_excptn:
 1851         tl0_data_excptn                                 ! 0x30
 1852         tl0_reserved    1                               ! 0x31
 1853 tl0_data_error:
 1854         tl0_gen         T_DATA_ERROR                    ! 0x32
 1855         tl0_reserved    1                               ! 0x33
 1856 tl0_align:
 1857         tl0_align                                       ! 0x34
 1858 tl0_align_lddf:
 1859         tl0_gen         T_RESERVED                      ! 0x35
 1860 tl0_align_stdf:
 1861         tl0_gen         T_RESERVED                      ! 0x36
 1862 tl0_priv_action:
 1863         tl0_gen         T_PRIVILEGED_ACTION             ! 0x37
 1864         tl0_reserved    9                               ! 0x38-0x40
 1865 tl0_intr_level:
 1866         tl0_intr_level                                  ! 0x41-0x4f
 1867         tl0_reserved    16                              ! 0x50-0x5f
 1868 tl0_intr_vector:
 1869         intr_vector                                     ! 0x60
 1870 tl0_watch_phys:
 1871         tl0_gen         T_PA_WATCHPOINT                 ! 0x61
 1872 tl0_watch_virt:
 1873         tl0_gen         T_VA_WATCHPOINT                 ! 0x62
 1874 tl0_ecc:
 1875         tl0_gen         T_CORRECTED_ECC_ERROR           ! 0x63
 1876 tl0_immu_miss:
 1877         tl0_immu_miss                                   ! 0x64
 1878 tl0_dmmu_miss:
 1879         tl0_dmmu_miss                                   ! 0x68
 1880 tl0_dmmu_prot:
 1881         tl0_dmmu_prot                                   ! 0x6c
 1882         tl0_reserved    16                              ! 0x70-0x7f
 1883 tl0_spill_0_n:
 1884         tl0_spill_0_n                                   ! 0x80
 1885 tl0_spill_1_n:
 1886         tl0_spill_1_n                                   ! 0x84
 1887         tl0_spill_bad   14                              ! 0x88-0xbf
 1888 tl0_fill_0_n:
 1889         tl0_fill_0_n                                    ! 0xc0
 1890 tl0_fill_1_n:
 1891         tl0_fill_1_n                                    ! 0xc4
 1892         tl0_fill_bad    14                              ! 0xc8-0xff
 1893 tl0_soft:
 1894         tl0_gen         T_SYSCALL                       ! 0x100
 1895         tl0_gen         T_BREAKPOINT                    ! 0x101
 1896         tl0_gen         T_DIVISION_BY_ZERO              ! 0x102
 1897         tl0_reserved    1                               ! 0x103
 1898         tl0_gen         T_CLEAN_WINDOW                  ! 0x104
 1899         tl0_gen         T_RANGE_CHECK                   ! 0x105
 1900         tl0_gen         T_FIX_ALIGNMENT                 ! 0x106
 1901         tl0_gen         T_INTEGER_OVERFLOW              ! 0x107
 1902         tl0_gen         T_SYSCALL                       ! 0x108
 1903 #ifdef COMPAT_FREEBSD4
 1904         tl0_syscall                                     ! 0x109
 1905 #else
 1906         tl0_gen         T_SYSCALL                       ! 0x109
 1907 #endif
 1908         tl0_fp_restore                                  ! 0x10a
 1909         tl0_reserved    5                               ! 0x10b-0x10f
 1910         tl0_gen         T_TRAP_INSTRUCTION_16           ! 0x110
 1911         tl0_gen         T_TRAP_INSTRUCTION_17           ! 0x111
 1912         tl0_gen         T_TRAP_INSTRUCTION_18           ! 0x112
 1913         tl0_gen         T_TRAP_INSTRUCTION_19           ! 0x113
 1914         tl0_gen         T_TRAP_INSTRUCTION_20           ! 0x114
 1915         tl0_gen         T_TRAP_INSTRUCTION_21           ! 0x115
 1916         tl0_gen         T_TRAP_INSTRUCTION_22           ! 0x116
 1917         tl0_gen         T_TRAP_INSTRUCTION_23           ! 0x117
 1918         tl0_gen         T_TRAP_INSTRUCTION_24           ! 0x118
 1919         tl0_gen         T_TRAP_INSTRUCTION_25           ! 0x119
 1920         tl0_gen         T_TRAP_INSTRUCTION_26           ! 0x11a
 1921         tl0_gen         T_TRAP_INSTRUCTION_27           ! 0x11b
 1922         tl0_gen         T_TRAP_INSTRUCTION_28           ! 0x11c
 1923         tl0_gen         T_TRAP_INSTRUCTION_29           ! 0x11d
 1924         tl0_gen         T_TRAP_INSTRUCTION_30           ! 0x11e
 1925         tl0_gen         T_TRAP_INSTRUCTION_31           ! 0x11f
 1926         tl0_reserved    32                              ! 0x120-0x13f
 1927         tl0_gen         T_SYSCALL                       ! 0x140
 1928         tl0_syscall                                     ! 0x141
 1929         tl0_gen         T_SYSCALL                       ! 0x142
 1930         tl0_gen         T_SYSCALL                       ! 0x143
 1931         tl0_reserved    188                             ! 0x144-0x1ff
 1932 
 1933 tl1_base:
 1934         tl1_reserved    8                               ! 0x200-0x207
 1935 tl1_insn_excptn:
 1936         tl1_insn_excptn                                 ! 0x208
 1937         tl1_reserved    1                               ! 0x209
 1938 tl1_insn_error:
 1939         tl1_gen         T_INSTRUCTION_ERROR             ! 0x20a
 1940         tl1_reserved    5                               ! 0x20b-0x20f
 1941 tl1_insn_illegal:
 1942         tl1_gen         T_ILLEGAL_INSTRUCTION           ! 0x210
 1943 tl1_priv_opcode:
 1944         tl1_gen         T_PRIVILEGED_OPCODE             ! 0x211
 1945         tl1_reserved    14                              ! 0x212-0x21f
 1946 tl1_fp_disabled:
 1947         tl1_fp_disabled                                 ! 0x220
 1948 tl1_fp_ieee:
 1949         tl1_gen         T_FP_EXCEPTION_IEEE_754         ! 0x221
 1950 tl1_fp_other:
 1951         tl1_gen         T_FP_EXCEPTION_OTHER            ! 0x222
 1952 tl1_tag_ovflw:
 1953         tl1_gen         T_TAG_OFERFLOW                  ! 0x223
 1954 tl1_clean_window:
 1955         clean_window                                    ! 0x224
 1956 tl1_divide:
 1957         tl1_gen         T_DIVISION_BY_ZERO              ! 0x228
 1958         tl1_reserved    7                               ! 0x229-0x22f
 1959 tl1_data_excptn:
 1960         tl1_data_excptn                                 ! 0x230
 1961         tl1_reserved    1                               ! 0x231
 1962 tl1_data_error:
 1963         tl1_gen         T_DATA_ERROR                    ! 0x232
 1964         tl1_reserved    1                               ! 0x233
 1965 tl1_align:
 1966         tl1_align                                       ! 0x234
 1967 tl1_align_lddf:
 1968         tl1_gen         T_RESERVED                      ! 0x235
 1969 tl1_align_stdf:
 1970         tl1_gen         T_RESERVED                      ! 0x236
 1971 tl1_priv_action:
 1972         tl1_gen         T_PRIVILEGED_ACTION             ! 0x237
 1973         tl1_reserved    9                               ! 0x238-0x240
 1974 tl1_intr_level:
 1975         tl1_intr_level                                  ! 0x241-0x24f
 1976         tl1_reserved    16                              ! 0x250-0x25f
 1977 tl1_intr_vector:
 1978         intr_vector                                     ! 0x260
 1979 tl1_watch_phys:
 1980         tl1_gen         T_PA_WATCHPOINT                 ! 0x261
 1981 tl1_watch_virt:
 1982         tl1_gen         T_VA_WATCHPOINT                 ! 0x262
 1983 tl1_ecc:
 1984         tl1_gen         T_CORRECTED_ECC_ERROR           ! 0x263
 1985 tl1_immu_miss:
 1986         tl1_immu_miss                                   ! 0x264
 1987 tl1_dmmu_miss:
 1988         tl1_dmmu_miss                                   ! 0x268
 1989 tl1_dmmu_prot:
 1990         tl1_dmmu_prot                                   ! 0x26c
 1991         tl1_reserved    16                              ! 0x270-0x27f
 1992 tl1_spill_0_n:
 1993         tl1_spill_0_n                                   ! 0x280
 1994         tl1_spill_bad   1                               ! 0x284
 1995 tl1_spill_2_n:
 1996         tl1_spill_2_n                                   ! 0x288
 1997 tl1_spill_3_n:
 1998         tl1_spill_3_n                                   ! 0x29c
 1999         tl1_spill_bad   4                               ! 0x290-0x29f
 2000 tl1_spill_0_o:
 2001         tl1_spill_0_o                                   ! 0x2a0
 2002 tl1_spill_1_o:
 2003         tl1_spill_1_o                                   ! 0x2a4
 2004 tl1_spill_2_o:
 2005         tl1_spill_2_o                                   ! 0x2a8
 2006         tl1_spill_bad   5                               ! 0x2ac-0x2bf
 2007 tl1_fill_0_n:
 2008         tl1_fill_0_n                                    ! 0x2c0
 2009         tl1_fill_bad    1                               ! 0x2c4
 2010 tl1_fill_2_n:
 2011         tl1_fill_2_n                                    ! 0x2d0
 2012 tl1_fill_3_n:
 2013         tl1_fill_3_n                                    ! 0x2d4
 2014         tl1_fill_bad    12                              ! 0x2d8-0x2ff
 2015         tl1_reserved    1                               ! 0x300
 2016 tl1_breakpoint:
 2017         tl1_gen         T_BREAKPOINT                    ! 0x301
 2018         tl1_gen         T_RSTRWP_PHYS                   ! 0x302
 2019         tl1_gen         T_RSTRWP_VIRT                   ! 0x303
 2020         tl1_reserved    252                             ! 0x304-0x3ff
 2021 
 2022 /*
 2023  * User trap entry point.
 2024  *
 2025  * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
 2026  *                u_long sfsr)
 2027  *
 2028  * This handles redirecting a trap back to usermode as a user trap.  The user
 2029  * program must have first registered a trap handler with the kernel using
 2030  * sysarch(SPARC_UTRAP_INSTALL).  The trap handler is passed enough state
 2031  * for it to return to the trapping code directly, it will not return through
 2032  * the kernel.  The trap type is passed in %o0, all out registers must be
 2033  * passed through to tl0_trap or to usermode untouched.  Note that the
 2034  * parameters passed in out registers may be used by the user trap handler.
 2035  * Do not change the registers they are passed in or you will break the ABI.
 2036  *
 2037  * If the trap type allows user traps, setup state to execute the user trap
 2038  * handler and bounce back to usermode, otherwise branch to tl0_trap.
 2039  */
 2040 ENTRY(tl0_utrap)
 2041         /*
 2042          * Check if the trap type allows user traps.
 2043          */
 2044         cmp     %o0, UT_MAX
 2045         bge,a,pt %xcc, tl0_trap
 2046          nop
 2047 
 2048         /*
 2049          * Load the user trap handler from the utrap table.
 2050          */
 2051         ldx     [PCPU(CURTHREAD)], %l0
 2052         ldx     [%l0 + TD_PROC], %l0
 2053         ldx     [%l0 + P_MD + MD_UTRAP], %l0
 2054         brz,pt  %l0, tl0_trap
 2055          sllx   %o0, PTR_SHIFT, %l1
 2056         ldx     [%l0 + %l1], %l0
 2057         brz,a,pt %l0, tl0_trap
 2058          nop
 2059 
 2060         /*
 2061          * If the save we did on entry to the kernel had to spill a window
 2062          * to the pcb, pretend we took a spill trap instead.  Any windows
 2063          * that are in the pcb must be copied out or the fill handler will
 2064          * not be able to find them, since the user trap handler returns
 2065          * directly to the trapping code.  Note that we only support precise
 2066          * user traps, which implies that the condition that caused the trap
 2067          * in the first place is still valid, so it will occur again when we
 2068          * re-execute the trapping instruction.
 2069          */     
 2070         ldx     [PCB_REG + PCB_NSAVED], %l1
 2071         brnz,a,pn %l1, tl0_trap
 2072          mov    T_SPILL, %o0
 2073 
 2074         /*
 2075          * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
 2076          * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
 2077          * it may be clobbered by an interrupt before the user trap code
 2078          * can read it, and we must pass %tstate in order to restore %ccr
 2079          * and %asi.  The %fsr must be stored to memory, so we use the
 2080          * temporary stack for that.
 2081          */
 2082         rd      %fprs, %l1
 2083         or      %l1, FPRS_FEF, %l2
 2084         wr      %l2, 0, %fprs
 2085         dec     8, ASP_REG
 2086         stx     %fsr, [ASP_REG]
 2087         ldx     [ASP_REG], %l4
 2088         inc     8, ASP_REG
 2089         wr      %l1, 0, %fprs
 2090 
 2091         rdpr    %tstate, %l5
 2092         rdpr    %tpc, %l6
 2093         rdpr    %tnpc, %l7
 2094 
 2095         /*
 2096          * Setup %tnpc to return to.
 2097          */
 2098         wrpr    %l0, 0, %tnpc
 2099 
 2100         /*
 2101          * Setup %wstate for return, clear WSTATE_TRANSITION.
 2102          */
 2103         rdpr    %wstate, %l1
 2104         and     %l1, WSTATE_NORMAL_MASK, %l1
 2105         wrpr    %l1, 0, %wstate
 2106 
 2107         /*
 2108          * Setup %tstate for return, change the saved cwp to point to the
 2109          * current window instead of the window at the time of the trap.
 2110          */
 2111         andn    %l5, TSTATE_CWP_MASK, %l1
 2112         rdpr    %cwp, %l2
 2113         wrpr    %l1, %l2, %tstate
 2114 
 2115         /*
 2116          * Setup %sp.  Userland processes will crash if this is not setup.
 2117          */
 2118         sub     %fp, CCFSZ, %sp
 2119 
 2120         /*
 2121          * Execute the user trap handler.
 2122          */
 2123         done
 2124 END(tl0_utrap)
 2125 
 2126 /*
 2127  * (Real) User trap entry point.
 2128  *
 2129  * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
 2130  *               u_int sfsr)
 2131  *
 2132  * The following setup has been performed:
 2133  *      - the windows have been split and the active user window has been saved
 2134  *        (maybe just to the pcb)
 2135  *      - we are on alternate globals and interrupts are disabled
 2136  *
 2137  * We switch to the kernel stack, build a trapframe, switch to normal
 2138  * globals, enable interrupts and call trap.
 2139  *
 2140  * NOTE: We must be very careful setting up the per-cpu pointer.  We know that
 2141  * it has been pre-set in alternate globals, so we read it from there and setup
 2142  * the normal %g7 *before* enabling interrupts.  This avoids any possibility
 2143  * of cpu migration and using the wrong pcpup.
 2144  */
 2145 ENTRY(tl0_trap)
 2146         /*
 2147          * Force kernel store order.
 2148          */
 2149         wrpr    %g0, PSTATE_ALT, %pstate
 2150 
 2151         rdpr    %tstate, %l0
 2152         rdpr    %tpc, %l1
 2153         rdpr    %tnpc, %l2
 2154         rd      %y, %l3
 2155         rd      %fprs, %l4
 2156         rdpr    %wstate, %l5
 2157 
 2158 #if KTR_COMPILE & KTR_TRAP
 2159         CATR(KTR_TRAP,
 2160             "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
 2161             , %g1, %g2, %g3, 7, 8, 9)
 2162         ldx     [PCPU(CURTHREAD)], %g2
 2163         stx     %g2, [%g1 + KTR_PARM1]
 2164         stx     %o0, [%g1 + KTR_PARM2]
 2165         rdpr    %pil, %g2
 2166         stx     %g2, [%g1 + KTR_PARM3]
 2167         stx     %l1, [%g1 + KTR_PARM4]
 2168         stx     %l2, [%g1 + KTR_PARM5]
 2169         stx     %i6, [%g1 + KTR_PARM6]
 2170 9:
 2171 #endif
 2172 
 2173 1:      and     %l5, WSTATE_NORMAL_MASK, %l5
 2174         sllx    %l5, WSTATE_OTHER_SHIFT, %l5
 2175         wrpr    %l5, WSTATE_KERNEL, %wstate
 2176         rdpr    %canrestore, %l6
 2177         wrpr    %l6, 0, %otherwin
 2178         wrpr    %g0, 0, %canrestore
 2179 
 2180         sub     PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
 2181 
 2182         stx     %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2183         stx     %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
 2184         stx     %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
 2185         stx     %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
 2186         stx     %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
 2187 
 2188         stx     %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
 2189         stx     %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
 2190         stx     %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
 2191         stx     %l3, [%sp + SPOFF + CCFSZ + TF_Y]
 2192         stx     %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
 2193         stx     %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
 2194 
 2195         wr      %g0, FPRS_FEF, %fprs
 2196         stx     %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
 2197         rd      %gsr, %l6
 2198         stx     %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
 2199         wr      %g0, 0, %fprs
 2200 
 2201         mov     PCB_REG, %l0
 2202         mov     PCPU_REG, %l1
 2203         wrpr    %g0, PSTATE_NORMAL, %pstate
 2204 
 2205         stx     %g6, [%sp + SPOFF + CCFSZ + TF_G6]
 2206         stx     %g7, [%sp + SPOFF + CCFSZ + TF_G7]
 2207 
 2208         mov     %l0, PCB_REG
 2209         mov     %l1, PCPU_REG
 2210         wrpr    %g0, PSTATE_KERNEL, %pstate
 2211 
 2212         stx     %i0, [%sp + SPOFF + CCFSZ + TF_O0]
 2213         stx     %i1, [%sp + SPOFF + CCFSZ + TF_O1]
 2214         stx     %i2, [%sp + SPOFF + CCFSZ + TF_O2]
 2215         stx     %i3, [%sp + SPOFF + CCFSZ + TF_O3]
 2216         stx     %i4, [%sp + SPOFF + CCFSZ + TF_O4]
 2217         stx     %i5, [%sp + SPOFF + CCFSZ + TF_O5]
 2218         stx     %i6, [%sp + SPOFF + CCFSZ + TF_O6]
 2219         stx     %i7, [%sp + SPOFF + CCFSZ + TF_O7]
 2220 
 2221         stx     %g1, [%sp + SPOFF + CCFSZ + TF_G1]
 2222         stx     %g2, [%sp + SPOFF + CCFSZ + TF_G2]
 2223         stx     %g3, [%sp + SPOFF + CCFSZ + TF_G3]
 2224         stx     %g4, [%sp + SPOFF + CCFSZ + TF_G4]
 2225         stx     %g5, [%sp + SPOFF + CCFSZ + TF_G5]
 2226 
 2227         set     tl0_ret - 8, %o7
 2228         jmpl    %o2, %g0
 2229          add    %sp, CCFSZ + SPOFF, %o0
 2230 END(tl0_trap)
 2231 
 2232 /*
 2233  * void tl0_intr(u_int level, u_int mask)
 2234  */
 2235 ENTRY(tl0_intr)
 2236         /*
 2237          * Force kernel store order.
 2238          */
 2239         wrpr    %g0, PSTATE_ALT, %pstate
 2240 
 2241         rdpr    %tstate, %l0
 2242         rdpr    %tpc, %l1
 2243         rdpr    %tnpc, %l2
 2244         rd      %y, %l3
 2245         rd      %fprs, %l4
 2246         rdpr    %wstate, %l5
 2247 
 2248 #if KTR_COMPILE & KTR_INTR
 2249         CATR(KTR_INTR,
 2250             "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
 2251             , %g1, %g2, %g3, 7, 8, 9)
 2252         ldx     [PCPU(CURTHREAD)], %g2
 2253         stx     %g2, [%g1 + KTR_PARM1]
 2254         stx     %o0, [%g1 + KTR_PARM2]
 2255         rdpr    %pil, %g2
 2256         stx     %g2, [%g1 + KTR_PARM3]
 2257         stx     %l1, [%g1 + KTR_PARM4]
 2258         stx     %l2, [%g1 + KTR_PARM5]
 2259         stx     %i6, [%g1 + KTR_PARM6]
 2260 9:
 2261 #endif
 2262 
 2263         wrpr    %o0, 0, %pil
 2264         wr      %o1, 0, %clear_softint
 2265 
 2266         and     %l5, WSTATE_NORMAL_MASK, %l5
 2267         sllx    %l5, WSTATE_OTHER_SHIFT, %l5
 2268         wrpr    %l5, WSTATE_KERNEL, %wstate
 2269         rdpr    %canrestore, %l6
 2270         wrpr    %l6, 0, %otherwin
 2271         wrpr    %g0, 0, %canrestore
 2272 
 2273         sub     PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
 2274 
 2275         stx     %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
 2276         stx     %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
 2277         stx     %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
 2278         stx     %l3, [%sp + SPOFF + CCFSZ + TF_Y]
 2279         stx     %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
 2280         stx     %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
 2281 
 2282         wr      %g0, FPRS_FEF, %fprs
 2283         stx     %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
 2284         rd      %gsr, %l6
 2285         stx     %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
 2286         wr      %g0, 0, %fprs
 2287 
 2288         mov     %o0, %l3
 2289         mov     T_INTERRUPT, %o1
 2290 
 2291         stx     %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
 2292         stx     %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2293 
 2294         mov     PCB_REG, %l0
 2295         mov     PCPU_REG, %l1
 2296         wrpr    %g0, PSTATE_NORMAL, %pstate
 2297 
 2298         stx     %g1, [%sp + SPOFF + CCFSZ + TF_G1]
 2299         stx     %g2, [%sp + SPOFF + CCFSZ + TF_G2]
 2300         stx     %g3, [%sp + SPOFF + CCFSZ + TF_G3]
 2301         stx     %g4, [%sp + SPOFF + CCFSZ + TF_G4]
 2302         stx     %g5, [%sp + SPOFF + CCFSZ + TF_G5]
 2303         stx     %g6, [%sp + SPOFF + CCFSZ + TF_G6]
 2304         stx     %g7, [%sp + SPOFF + CCFSZ + TF_G7]
 2305 
 2306         mov     %l0, PCB_REG
 2307         mov     %l1, PCPU_REG
 2308         wrpr    %g0, PSTATE_KERNEL, %pstate
 2309 
 2310         stx     %i0, [%sp + SPOFF + CCFSZ + TF_O0]
 2311         stx     %i1, [%sp + SPOFF + CCFSZ + TF_O1]
 2312         stx     %i2, [%sp + SPOFF + CCFSZ + TF_O2]
 2313         stx     %i3, [%sp + SPOFF + CCFSZ + TF_O3]
 2314         stx     %i4, [%sp + SPOFF + CCFSZ + TF_O4]
 2315         stx     %i5, [%sp + SPOFF + CCFSZ + TF_O5]
 2316         stx     %i6, [%sp + SPOFF + CCFSZ + TF_O6]
 2317         stx     %i7, [%sp + SPOFF + CCFSZ + TF_O7]
 2318 
 2319         /* %l3 contains PIL */
 2320         SET(intrcnt, %l1, %l2)
 2321         prefetcha [%l2] ASI_N, 1
 2322         SET(pil_countp, %l1, %l0)
 2323         sllx    %l3, 1, %l1
 2324         lduh    [%l0 + %l1], %l0
 2325         sllx    %l0, 3, %l0
 2326         add     %l0, %l2, %l0
 2327 
 2328         ATOMIC_INC_ULONG(%l0, %l1, %l2)
 2329 
 2330         call    critical_enter
 2331          nop
 2332 
 2333         SET(cnt+V_INTR, %l1, %l0)
 2334         ATOMIC_INC_INT(%l0, %l1, %l2)
 2335 
 2336         SET(intr_handlers, %l1, %l0)
 2337         sllx    %l3, IH_SHIFT, %l1
 2338         ldx     [%l0 + %l1], %l1
 2339         KASSERT(%l1, "tl0_intr: ih null")
 2340         call    %l1
 2341          add    %sp, CCFSZ + SPOFF, %o0
 2342 
 2343         call    critical_exit
 2344          nop
 2345 
 2346         ba,a    %xcc, tl0_ret
 2347          nop
 2348 END(tl0_intr)
 2349 
 2350 /*
 2351  * Initiate return to usermode.
 2352  *
 2353  * Called with a trapframe on the stack.  The window that was setup in
 2354  * tl0_trap may have been used by "fast" trap handlers that pretend to be
 2355  * leaf functions, so all ins and locals may have been clobbered since
 2356  * then.
 2357  *
 2358  * This code is rather long and complicated.
 2359  */
 2360 ENTRY(tl0_ret)
 2361         /*
 2362          * Check for pending asts atomically with returning.  We must raise
 2363          * the pil before checking, and if no asts are found the pil must
 2364          * remain raised until the retry is executed, or we risk missing asts
 2365          * caused by interrupts occuring after the test.  If the pil is lowered,
 2366          * as it is when we call ast, the check must be re-executed.
 2367          */
 2368         wrpr    %g0, PIL_TICK, %pil
 2369         ldx     [PCPU(CURTHREAD)], %l0
 2370         lduw    [%l0 + TD_FLAGS], %l1
 2371         set     TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
 2372         and     %l1, %l2, %l1
 2373         brz,a,pt %l1, 1f
 2374          nop
 2375 
 2376         /*
 2377          * We have an ast.  Re-enable interrupts and handle it, then restart
 2378          * the return sequence.
 2379          */
 2380         wrpr    %g0, 0, %pil
 2381         call    ast
 2382          add    %sp, CCFSZ + SPOFF, %o0
 2383         ba,a    %xcc, tl0_ret
 2384          nop
 2385 
 2386         /*
 2387          * Check for windows that were spilled to the pcb and need to be
 2388          * copied out.  This must be the last thing that is done before the
 2389          * return to usermode.  If there are still user windows in the cpu
 2390          * and we call a nested function after this, which causes them to be
 2391          * spilled to the pcb, they will not be copied out and the stack will
 2392          * be inconsistent.
 2393          */
 2394 1:      ldx     [PCB_REG + PCB_NSAVED], %l1
 2395         brz,a,pt %l1, 2f
 2396          nop
 2397         wrpr    %g0, 0, %pil
 2398         mov     T_SPILL, %o0
 2399         stx     %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2400         call    trap
 2401          add    %sp, SPOFF + CCFSZ, %o0
 2402         ba,a    %xcc, tl0_ret
 2403          nop
 2404 
 2405         /*
 2406          * Restore the out and most global registers from the trapframe.
 2407          * The ins will become the outs when we restore below.
 2408          */
 2409 2:      ldx     [%sp + SPOFF + CCFSZ + TF_O0], %i0
 2410         ldx     [%sp + SPOFF + CCFSZ + TF_O1], %i1
 2411         ldx     [%sp + SPOFF + CCFSZ + TF_O2], %i2
 2412         ldx     [%sp + SPOFF + CCFSZ + TF_O3], %i3
 2413         ldx     [%sp + SPOFF + CCFSZ + TF_O4], %i4
 2414         ldx     [%sp + SPOFF + CCFSZ + TF_O5], %i5
 2415         ldx     [%sp + SPOFF + CCFSZ + TF_O6], %i6
 2416         ldx     [%sp + SPOFF + CCFSZ + TF_O7], %i7
 2417 
 2418         ldx     [%sp + SPOFF + CCFSZ + TF_G1], %g1
 2419         ldx     [%sp + SPOFF + CCFSZ + TF_G2], %g2
 2420         ldx     [%sp + SPOFF + CCFSZ + TF_G3], %g3
 2421         ldx     [%sp + SPOFF + CCFSZ + TF_G4], %g4
 2422         ldx     [%sp + SPOFF + CCFSZ + TF_G5], %g5
 2423 
 2424         /*
 2425          * Load everything we need to restore below before disabling
 2426          * interrupts.
 2427          */
 2428         ldx     [%sp + SPOFF + CCFSZ + TF_FPRS], %l0
 2429         ldx     [%sp + SPOFF + CCFSZ + TF_GSR], %l1
 2430         ldx     [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
 2431         ldx     [%sp + SPOFF + CCFSZ + TF_TPC], %l3
 2432         ldx     [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
 2433         ldx     [%sp + SPOFF + CCFSZ + TF_Y], %l5
 2434         ldx     [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
 2435 
 2436         /*
 2437          * Disable interrupts to restore the special globals.  They are not
 2438          * saved and restored for all kernel traps, so an interrupt at the
 2439          * wrong time would clobber them.
 2440          */
 2441         wrpr    %g0, PSTATE_NORMAL, %pstate
 2442 
 2443         ldx     [%sp + SPOFF + CCFSZ + TF_G6], %g6
 2444         ldx     [%sp + SPOFF + CCFSZ + TF_G7], %g7
 2445 
 2446         /*
 2447          * Switch to alternate globals.  This frees up some registers we
 2448          * can use after the restore changes our window.
 2449          */
 2450         wrpr    %g0, PSTATE_ALT, %pstate
 2451 
 2452         /*
 2453          * Drop %pil to zero.  It must have been zero at the time of the
 2454          * trap, since we were in usermode, but it was raised above in
 2455          * order to check for asts atomically.  We have interrupts disabled
 2456          * so any interrupts will not be serviced until we complete the
 2457          * return to usermode.
 2458          */
 2459         wrpr    %g0, 0, %pil
 2460 
 2461         /*
 2462          * Save %fprs in an alternate global so it can be restored after the
 2463          * restore instruction below.  If we restore it before the restore,
 2464          * and the restore traps we may run for a while with floating point
 2465          * enabled in the kernel, which we want to avoid.
 2466          */
 2467         mov     %l0, %g1
 2468 
 2469         /*
 2470          * Restore %fsr and %gsr.  These need floating point enabled in %fprs,
 2471          * so we set it temporarily and then clear it.
 2472          */
 2473         wr      %g0, FPRS_FEF, %fprs
 2474         ldx     [%sp + SPOFF + CCFSZ + TF_FSR], %fsr
 2475         wr      %l1, 0, %gsr
 2476         wr      %g0, 0, %fprs
 2477 
 2478         /*
 2479          * Restore program counters.  This could be done after the restore
 2480          * but we're out of alternate globals to store them in...
 2481          */
 2482         wrpr    %l2, 0, %tnpc
 2483         wrpr    %l3, 0, %tpc
 2484 
 2485         /*
 2486          * Save %tstate in an alternate global and clear the %cwp field.  %cwp
 2487          * will be affected by the restore below and we need to make sure it
 2488          * points to the current window at that time, not the window that was
 2489          * active at the time of the trap.
 2490          */
 2491         andn    %l4, TSTATE_CWP_MASK, %g2
 2492 
 2493         /*
 2494          * Restore %y.  Could also be below if we had more alternate globals.
 2495          */
 2496         wr      %l5, 0, %y
 2497 
 2498         /*
 2499          * Setup %wstate for return.  We need to restore the user window state
 2500          * which we saved in wstate.other when we trapped.  We also need to
 2501          * set the transition bit so the restore will be handled specially
 2502          * if it traps, use the xor feature of wrpr to do that.
 2503          */
 2504         srlx    %l6, WSTATE_OTHER_SHIFT, %g3
 2505         wrpr    %g3, WSTATE_TRANSITION, %wstate
 2506 
 2507         /*
 2508          * Setup window management registers for return.  If not all user
 2509          * windows were spilled in the kernel %otherwin will be non-zero,
 2510          * so we need to transfer it to %canrestore to correctly restore
 2511          * those windows.  Otherwise everything gets set to zero and the
 2512          * restore below will fill a window directly from the user stack.
 2513          */
 2514         rdpr    %otherwin, %o0
 2515         wrpr    %o0, 0, %canrestore
 2516         wrpr    %g0, 0, %otherwin
 2517         wrpr    %o0, 0, %cleanwin
 2518 
 2519         /*
 2520          * Now do the restore.  If this instruction causes a fill trap which
 2521          * fails to fill a window from the user stack, we will resume at
 2522          * tl0_ret_fill_end and call back into the kernel.
 2523          */
 2524         restore
 2525 tl0_ret_fill:
 2526 
 2527         /*
 2528          * We made it.  We're back in the window that was active at the time
 2529          * of the trap, and ready to return to usermode.
 2530          */
 2531 
 2532         /*
 2533          * Restore %frps.  This was saved in an alternate global above.
 2534          */
 2535         wr      %g1, 0, %fprs
 2536 
 2537         /*
 2538          * Fixup %tstate so the saved %cwp points to the current window and
 2539          * restore it.
 2540          */
 2541         rdpr    %cwp, %g4
 2542         wrpr    %g2, %g4, %tstate
 2543 
 2544         /*
 2545          * Restore the user window state.  The transition bit was set above
 2546          * for special handling of the restore, this clears it.
 2547          */
 2548         wrpr    %g3, 0, %wstate
 2549 
 2550 #if KTR_COMPILE & KTR_TRAP
 2551         CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
 2552             , %g2, %g3, %g4, 7, 8, 9)
 2553         ldx     [PCPU(CURTHREAD)], %g3
 2554         stx     %g3, [%g2 + KTR_PARM1]
 2555         rdpr    %pil, %g3
 2556         stx     %g3, [%g2 + KTR_PARM2]
 2557         rdpr    %tpc, %g3
 2558         stx     %g3, [%g2 + KTR_PARM3]
 2559         rdpr    %tnpc, %g3
 2560         stx     %g3, [%g2 + KTR_PARM4]
 2561         stx     %sp, [%g2 + KTR_PARM5]
 2562 9:
 2563 #endif
 2564 
 2565         /*
 2566          * Return to usermode.
 2567          */
 2568         retry
 2569 tl0_ret_fill_end:
 2570 
 2571 #if KTR_COMPILE & KTR_TRAP
 2572         CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
 2573             , %l0, %l1, %l2, 7, 8, 9)
 2574         rdpr    %pstate, %l1
 2575         stx     %l1, [%l0 + KTR_PARM1]
 2576         stx     %l5, [%l0 + KTR_PARM2]
 2577         stx     %sp, [%l0 + KTR_PARM3]
 2578 9:
 2579 #endif
 2580 
 2581         /*
 2582          * The restore above caused a fill trap and the fill handler was
 2583          * unable to fill a window from the user stack.  The special fill
 2584          * handler recognized this and punted, sending us here.  We need
 2585          * to carefully undo any state that was restored before the restore
 2586          * was executed and call trap again.  Trap will copyin a window
 2587          * from the user stack which will fault in the page we need so the
 2588          * restore above will succeed when we try again.  If this fails
 2589          * the process has trashed its stack, so we kill it.
 2590          */
 2591 
 2592         /*
 2593          * Restore the kernel window state.  This was saved in %l6 above, and
 2594          * since the restore failed we're back in the same window.
 2595          */
 2596         wrpr    %l6, 0, %wstate
 2597 
 2598         /*
 2599          * Restore the normal globals which have predefined values in the
 2600          * kernel.  We clobbered them above restoring the user's globals
 2601          * so this is very important.
 2602          * XXX PSTATE_ALT must already be set.
 2603          */
 2604         wrpr    %g0, PSTATE_ALT, %pstate
 2605         mov     PCB_REG, %o0
 2606         mov     PCPU_REG, %o1
 2607         wrpr    %g0, PSTATE_NORMAL, %pstate
 2608         mov     %o0, PCB_REG
 2609         mov     %o1, PCPU_REG
 2610         wrpr    %g0, PSTATE_KERNEL, %pstate
 2611 
 2612         /*
 2613          * Simulate a fill trap and then start the whole return sequence over
 2614          * again.  This is special because it only copies in 1 window, not 2
 2615          * as we would for a normal failed fill.  This may be the first time
 2616          * the process has been run, so there may not be 2 windows worth of
 2617          * stack to copyin.
 2618          */
 2619         mov     T_FILL_RET, %o0
 2620         stx     %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2621         call    trap
 2622          add    %sp, SPOFF + CCFSZ, %o0
 2623         ba,a    %xcc, tl0_ret
 2624          nop
 2625 END(tl0_ret)
 2626 
 2627 /*
 2628  * Kernel trap entry point
 2629  *
 2630  * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
 2631  *               u_int sfsr)
 2632  *
 2633  * This is easy because the stack is already setup and the windows don't need
 2634  * to be split.  We build a trapframe and call trap(), the same as above, but
 2635  * the outs don't need to be saved.
 2636  */
 2637 ENTRY(tl1_trap)
 2638         rdpr    %tstate, %l0
 2639         rdpr    %tpc, %l1
 2640         rdpr    %tnpc, %l2
 2641         rdpr    %pil, %l3
 2642         rd      %y, %l4
 2643         rdpr    %wstate, %l5
 2644 
 2645 #if KTR_COMPILE & KTR_TRAP
 2646         CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
 2647             , %g1, %g2, %g3, 7, 8, 9)
 2648         ldx     [PCPU(CURTHREAD)], %g2
 2649         stx     %g2, [%g1 + KTR_PARM1]
 2650         stx     %o0, [%g1 + KTR_PARM2]
 2651         stx     %l3, [%g1 + KTR_PARM3]
 2652         stx     %l1, [%g1 + KTR_PARM4]
 2653         stx     %i6, [%g1 + KTR_PARM5]
 2654 9:
 2655 #endif
 2656 
 2657         wrpr    %g0, 1, %tl
 2658 
 2659         and     %l5, WSTATE_OTHER_MASK, %l5
 2660         wrpr    %l5, WSTATE_KERNEL, %wstate
 2661 
 2662         stx     %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2663         stx     %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
 2664         stx     %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
 2665         stx     %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
 2666         stx     %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
 2667 
 2668         stx     %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
 2669         stx     %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
 2670         stx     %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
 2671         stx     %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
 2672         stx     %l4, [%sp + SPOFF + CCFSZ + TF_Y]
 2673 
 2674         mov     PCB_REG, %l0
 2675         mov     PCPU_REG, %l1
 2676         wrpr    %g0, PSTATE_NORMAL, %pstate
 2677 
 2678         stx     %g6, [%sp + SPOFF + CCFSZ + TF_G6]
 2679         stx     %g7, [%sp + SPOFF + CCFSZ + TF_G7]
 2680 
 2681         mov     %l0, PCB_REG
 2682         mov     %l1, PCPU_REG
 2683         wrpr    %g0, PSTATE_KERNEL, %pstate
 2684 
 2685         stx     %i0, [%sp + SPOFF + CCFSZ + TF_O0]
 2686         stx     %i1, [%sp + SPOFF + CCFSZ + TF_O1]
 2687         stx     %i2, [%sp + SPOFF + CCFSZ + TF_O2]
 2688         stx     %i3, [%sp + SPOFF + CCFSZ + TF_O3]
 2689         stx     %i4, [%sp + SPOFF + CCFSZ + TF_O4]
 2690         stx     %i5, [%sp + SPOFF + CCFSZ + TF_O5]
 2691         stx     %i6, [%sp + SPOFF + CCFSZ + TF_O6]
 2692         stx     %i7, [%sp + SPOFF + CCFSZ + TF_O7]
 2693 
 2694         stx     %g1, [%sp + SPOFF + CCFSZ + TF_G1]
 2695         stx     %g2, [%sp + SPOFF + CCFSZ + TF_G2]
 2696         stx     %g3, [%sp + SPOFF + CCFSZ + TF_G3]
 2697         stx     %g4, [%sp + SPOFF + CCFSZ + TF_G4]
 2698         stx     %g5, [%sp + SPOFF + CCFSZ + TF_G5]
 2699 
 2700         set     tl1_ret - 8, %o7
 2701         jmpl    %o2, %g0
 2702          add    %sp, CCFSZ + SPOFF, %o0
 2703 END(tl1_trap)
 2704 
 2705 ENTRY(tl1_ret)
 2706         ldx     [%sp + SPOFF + CCFSZ + TF_O0], %i0
 2707         ldx     [%sp + SPOFF + CCFSZ + TF_O1], %i1
 2708         ldx     [%sp + SPOFF + CCFSZ + TF_O2], %i2
 2709         ldx     [%sp + SPOFF + CCFSZ + TF_O3], %i3
 2710         ldx     [%sp + SPOFF + CCFSZ + TF_O4], %i4
 2711         ldx     [%sp + SPOFF + CCFSZ + TF_O5], %i5
 2712         ldx     [%sp + SPOFF + CCFSZ + TF_O6], %i6
 2713         ldx     [%sp + SPOFF + CCFSZ + TF_O7], %i7
 2714 
 2715         ldx     [%sp + SPOFF + CCFSZ + TF_G1], %g1
 2716         ldx     [%sp + SPOFF + CCFSZ + TF_G2], %g2
 2717         ldx     [%sp + SPOFF + CCFSZ + TF_G3], %g3
 2718         ldx     [%sp + SPOFF + CCFSZ + TF_G4], %g4
 2719         ldx     [%sp + SPOFF + CCFSZ + TF_G5], %g5
 2720 
 2721         ldx     [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
 2722         ldx     [%sp + SPOFF + CCFSZ + TF_TPC], %l1
 2723         ldx     [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
 2724         ldx     [%sp + SPOFF + CCFSZ + TF_PIL], %l3
 2725         ldx     [%sp + SPOFF + CCFSZ + TF_Y], %l4
 2726 
 2727         set     VM_MIN_PROM_ADDRESS, %l5
 2728         cmp     %l1, %l5
 2729         bl,a,pt %xcc, 1f
 2730          nop
 2731 
 2732         wrpr    %g0, PSTATE_NORMAL, %pstate
 2733 
 2734         ldx     [%sp + SPOFF + CCFSZ + TF_G6], %g6
 2735         ldx     [%sp + SPOFF + CCFSZ + TF_G7], %g7
 2736 
 2737 1:      wrpr    %g0, PSTATE_ALT, %pstate
 2738 
 2739         andn    %l0, TSTATE_CWP_MASK, %g1
 2740         mov     %l1, %g2
 2741         mov     %l2, %g3
 2742 
 2743         wrpr    %l3, 0, %pil
 2744         wr      %l4, 0, %y
 2745 
 2746         restore
 2747 
 2748         wrpr    %g0, 2, %tl
 2749 
 2750         rdpr    %cwp, %g4
 2751         wrpr    %g1, %g4, %tstate
 2752         wrpr    %g2, 0, %tpc
 2753         wrpr    %g3, 0, %tnpc
 2754 
 2755 #if KTR_COMPILE & KTR_TRAP
 2756         CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
 2757             , %g2, %g3, %g4, 7, 8, 9)
 2758         ldx     [PCPU(CURTHREAD)], %g3
 2759         stx     %g3, [%g2 + KTR_PARM1]
 2760         rdpr    %pil, %g3
 2761         stx     %g3, [%g2 + KTR_PARM2]
 2762         rdpr    %tstate, %g3
 2763         stx     %g3, [%g2 + KTR_PARM3]
 2764         rdpr    %tpc, %g3
 2765         stx     %g3, [%g2 + KTR_PARM4]
 2766         stx     %sp, [%g2 + KTR_PARM5]
 2767 9:
 2768 #endif
 2769 
 2770         retry
 2771 END(tl1_ret)
 2772 
 2773 /*
 2774  * void tl1_intr(u_int level, u_int mask)
 2775  */
 2776 ENTRY(tl1_intr)
 2777         rdpr    %tstate, %l0
 2778         rdpr    %tpc, %l1
 2779         rdpr    %tnpc, %l2
 2780         rdpr    %pil, %l3
 2781         rd      %y, %l4
 2782         rdpr    %wstate, %l5
 2783 
 2784 #if KTR_COMPILE & KTR_INTR
 2785         CATR(KTR_INTR,
 2786             "tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
 2787             , %g1, %g2, %g3, 7, 8, 9)
 2788         ldx     [PCPU(CURTHREAD)], %g2
 2789         stx     %g2, [%g1 + KTR_PARM1]
 2790         stx     %o0, [%g1 + KTR_PARM2]
 2791         stx     %l3, [%g1 + KTR_PARM3]
 2792         stx     %l1, [%g1 + KTR_PARM4]
 2793         stx     %i6, [%g1 + KTR_PARM5]
 2794 9:
 2795 #endif
 2796 
 2797         wrpr    %o0, 0, %pil
 2798         wr      %o1, 0, %clear_softint
 2799 
 2800         wrpr    %g0, 1, %tl
 2801 
 2802         and     %l5, WSTATE_OTHER_MASK, %l5
 2803         wrpr    %l5, WSTATE_KERNEL, %wstate
 2804 
 2805         stx     %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
 2806         stx     %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
 2807         stx     %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
 2808         stx     %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
 2809         stx     %l4, [%sp + SPOFF + CCFSZ + TF_Y]
 2810 
 2811         mov     %o0, %l7
 2812         mov     T_INTERRUPT | T_KERNEL, %o1
 2813 
 2814         stx     %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
 2815         stx     %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
 2816 
 2817         stx     %i6, [%sp + SPOFF + CCFSZ + TF_O6]
 2818         stx     %i7, [%sp + SPOFF + CCFSZ + TF_O7]
 2819 
 2820         mov     PCB_REG, %l4
 2821         mov     PCPU_REG, %l5
 2822         wrpr    %g0, PSTATE_NORMAL, %pstate
 2823 
 2824         stx     %g1, [%sp + SPOFF + CCFSZ + TF_G1]
 2825         stx     %g2, [%sp + SPOFF + CCFSZ + TF_G2]
 2826         stx     %g3, [%sp + SPOFF + CCFSZ + TF_G3]
 2827         stx     %g4, [%sp + SPOFF + CCFSZ + TF_G4]
 2828         stx     %g5, [%sp + SPOFF + CCFSZ + TF_G5]
 2829 
 2830         mov     %l4, PCB_REG
 2831         mov     %l5, PCPU_REG
 2832         wrpr    %g0, PSTATE_KERNEL, %pstate
 2833 
 2834         /* %l3 contains PIL */
 2835         SET(intrcnt, %l5, %l4)
 2836         prefetcha [%l4] ASI_N, 1
 2837         SET(pil_countp, %l5, %l6)
 2838         sllx    %l7, 1, %l5
 2839         lduh    [%l5 + %l6], %l5
 2840         sllx    %l5, 3, %l5
 2841         add     %l5, %l4, %l4
 2842 
 2843         ATOMIC_INC_ULONG(%l4, %l5, %l6)
 2844 
 2845         call    critical_enter
 2846          nop
 2847 
 2848         SET(cnt+V_INTR, %l5, %l4)
 2849         ATOMIC_INC_INT(%l4, %l5, %l6)
 2850 
 2851         SET(intr_handlers, %l5, %l4)
 2852         sllx    %l7, IH_SHIFT, %l5
 2853         ldx     [%l4 + %l5], %l5
 2854         KASSERT(%l5, "tl1_intr: ih null")
 2855         call    %l5
 2856          add    %sp, CCFSZ + SPOFF, %o0
 2857 
 2858         call    critical_exit
 2859          nop
 2860 
 2861         ldx     [%sp + SPOFF + CCFSZ + TF_Y], %l4
 2862 
 2863         ldx     [%sp + SPOFF + CCFSZ + TF_G1], %g1
 2864         ldx     [%sp + SPOFF + CCFSZ + TF_G2], %g2
 2865         ldx     [%sp + SPOFF + CCFSZ + TF_G3], %g3
 2866         ldx     [%sp + SPOFF + CCFSZ + TF_G4], %g4
 2867         ldx     [%sp + SPOFF + CCFSZ + TF_G5], %g5
 2868 
 2869         wrpr    %g0, PSTATE_ALT, %pstate
 2870 
 2871         andn    %l0, TSTATE_CWP_MASK, %g1
 2872         mov     %l1, %g2
 2873         mov     %l2, %g3
 2874         wrpr    %l3, 0, %pil
 2875         wr      %l4, 0, %y
 2876 
 2877         restore
 2878 
 2879         wrpr    %g0, 2, %tl
 2880 
 2881         rdpr    %cwp, %g4
 2882         wrpr    %g1, %g4, %tstate
 2883         wrpr    %g2, 0, %tpc
 2884         wrpr    %g3, 0, %tnpc
 2885 
 2886 #if KTR_COMPILE & KTR_INTR
 2887         CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
 2888             , %g2, %g3, %g4, 7, 8, 9)
 2889         ldx     [PCPU(CURTHREAD)], %g3
 2890         stx     %g3, [%g2 + KTR_PARM1]
 2891         rdpr    %pil, %g3
 2892         stx     %g3, [%g2 + KTR_PARM2]
 2893         rdpr    %tstate, %g3
 2894         stx     %g3, [%g2 + KTR_PARM3]
 2895         rdpr    %tpc, %g3
 2896         stx     %g3, [%g2 + KTR_PARM4]
 2897         stx     %sp, [%g2 + KTR_PARM5]
 2898 9:
 2899 #endif
 2900 
 2901         retry
 2902 END(tl1_intr)
 2903 
 2904 /*
 2905  * Freshly forked processes come here when switched to for the first time.
 2906  * The arguments to fork_exit() have been setup in the locals, we must move
 2907  * them to the outs.
 2908  */
 2909 ENTRY(fork_trampoline)
 2910 #if KTR_COMPILE & KTR_PROC
 2911         CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
 2912             , %g1, %g2, %g3, 7, 8, 9)
 2913         ldx     [PCPU(CURTHREAD)], %g2
 2914         stx     %g2, [%g1 + KTR_PARM1]
 2915         ldx     [%g2 + TD_PROC], %g2
 2916         add     %g2, P_COMM, %g2
 2917         stx     %g2, [%g1 + KTR_PARM2]
 2918         rdpr    %cwp, %g2
 2919         stx     %g2, [%g1 + KTR_PARM3]
 2920 9:
 2921 #endif
 2922         mov     %l0, %o0
 2923         mov     %l1, %o1
 2924         call    fork_exit
 2925          mov    %l2, %o2
 2926         ba,a    %xcc, tl0_ret
 2927          nop
 2928 END(fork_trampoline)

Cache object: 34f53015c423df7d7315855eb09daecd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.