The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/Emulate64.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */                                                                                                                                                                                     
   25 
   26 /* Emulate64.s
   27  *
   28  * Software emulation of instructions not handled in hw, on 64-bit machines.
   29  */
   30  
   31 #include <sys/appleapiopts.h>
   32 #include <cpus.h>
   33 #include <ppc/asm.h>
   34 #include <ppc/proc_reg.h>
   35 #include <ppc/exception.h>
   36 #include <mach/machine/vm_param.h>
   37 #include <ppc/cpu_capabilities.h>
   38 #include <assym.s>
   39 
   40 // CR bit set if the instruction is an "update" form (LFDU, STWU, etc):
   41 #define kUpdate 25
   42 
   43 // CR bit set if interrupt occured in trace mode (ie, MSR_SE_BIT):
   44 #define kTrace  8
   45 
   46 // CR bit set if notification on alignment interrupts is requested (notifyUnalignbit in spcFlags):
   47 #define kNotify 9
   48 
   49 // CR bit distinguishes between alignment and program exceptions:
   50 #define kAlignment      10
   51 
   52 
   53 
   54 // *************************************
   55 // * P R O G R A M   I N T E R R U P T *
   56 // *************************************
   57 //
   58 // These are floating pt exceptions, illegal instructions, privileged mode violations,
   59 // and traps.  All we're interested in at this low level is illegal instructions.
   60 // The ones we "emulate" are:
   61 //              DCBA,  which is not implemented in the IBM 970.  The emulation is to ignore it,
   62 //                         as it is just a hint.
   63 //              MCRXR, which is not implemented on the IBM 970, but is in the PPC ISA.
   64 //
   65 // Additionally, to facilitate debugging the alignment handler, we recognize a special
   66 // diagnostic mode that is used to simulate alignment exceptions.  When in this mode,
   67 // if the instruction has opcode==0 and the extended opcode is one of the X-form
   68 // instructions that can take an alignment interrupt, then we change the opcode to
   69 // 31 and pretend it got an alignment interrupt.  This exercises paths that
   70 // are hard to drive or perhaps never driven on this particular CPU.
   71 
   72         .text
   73         .globl  EXT(Emulate64)
   74         .align  5
   75 LEXT(Emulate64)
   76         crclr   kAlignment                                              // not an alignment exception
   77         b               a64AlignAssistJoin                              // join alignment handler
   78         
   79         
   80 // Return from alignment handler with all the regs loaded for opcode emulation.
   81         
   82 a64HandleProgramInt:
   83         rlwinm. r0,r29,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT      // illegal opcode?
   84         beq             a64PassAlong                                    // No, must have been trap or priv violation etc
   85         rlwinm  r3,r20,6,26,31                                  // right justify opcode field (bits 0-5)
   86         rlwinm  r4,r20,31,22,31                                 // right justify extended opcode field (bits 21-30)
   87         cmpwi   cr0,r3,31                                               // X-form?
   88         cmpwi   cr1,r4,758                                              // DCBA?
   89         cmpwi   cr4,r4,512                                              // MCRXR?
   90         crand   cr1_eq,cr0_eq,cr1_eq                    // merge the two tests for DCBA
   91         crand   cr4_eq,cr0_eq,cr4_eq                    // and for MCRXR
   92         beq++   cr1_eq,a64ExitEm                                // was DCBA, so ignore
   93         bne--   cr4_eq,a64NotEmulated                   // skip if not MCRXR
   94         
   95 // Was MCRXR, so emulate.
   96 
   97         ld              r3,savexer(r13)                                 // get the XER
   98         lwz             r4,savecr(r13)                                  // and the CR
   99         rlwinm  r5,r20,11,27,29                                 // get (CR# * 4) from instruction
  100         rlwinm  r6,r3,0,4,31                                    // zero XER[32-35] (also XER[0-31])
  101         sld             r4,r4,r5                                                // move target CR field to bits 32-35
  102         rlwimi  r4,r3,0,0,3                                             // move XER[32-35] into CR field
  103         stw             r6,savexer+4(r13)                               // update XER
  104         srd             r4,r4,r5                                                // re-position CR
  105         stw             r4,savecr(r13)                                  // update CR
  106         b               a64ExitEm                                               // done
  107 
  108 // Not an opcode we normally emulate.  If in special diagnostic mode and opcode=0,
  109 // emulate as an alignment exception.  This special case is for test software.
  110 
  111 a64NotEmulated:
  112         lwz             r30,dgFlags(0)                                  // Get the flags
  113         rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb  // Do we want to try to emulate something?
  114         beq++   a64PassAlong                                    // No emulation allowed
  115         cmpwi   r3,0                                                    // opcode==0 ?
  116         bne             a64PassAlong                                    // not the special case
  117         oris    r20,r20,0x7C00                                  // change opcode to 31
  118         crset   kAlignment                                              // say we took alignment exception
  119         rlwinm  r5,r4,0,26+1,26-1                               // mask Update bit (32) out of extended opcode
  120         rlwinm  r5,r5,0,0,31                                    // Clean out leftover junk from rlwinm
  121 
  122         cmpwi   r4,1014                                                 // dcbz/dcbz128 ?
  123         crmove  cr1_eq,cr0_eq
  124         cmpwi   r5,21                                                   // ldx/ldux ?
  125         cror    cr1_eq,cr0_eq,cr1_eq
  126         cmpwi   r5,599                                                  // lfdx/lfdux ?
  127         cror    cr1_eq,cr0_eq,cr1_eq
  128         cmpwi   r5,535                                                  // lfsx/lfsux ?
  129         cror    cr1_eq,cr0_eq,cr1_eq
  130         cmpwi   r5,343                                                  // lhax/lhaux ?
  131         cror    cr1_eq,cr0_eq,cr1_eq
  132         cmpwi   r4,790                                                  // lhbrx ?
  133         cror    cr1_eq,cr0_eq,cr1_eq
  134         cmpwi   r5,279                                                  // lhzx/lhzux ?
  135         cror    cr1_eq,cr0_eq,cr1_eq
  136         cmpwi   r4,597                                                  // lswi ?
  137         cror    cr1_eq,cr0_eq,cr1_eq
  138         cmpwi   r4,533                                                  // lswx ?
  139         cror    cr1_eq,cr0_eq,cr1_eq
  140         cmpwi   r5,341                                                  // lwax/lwaux ?
  141         cror    cr1_eq,cr0_eq,cr1_eq
  142         cmpwi   r4,534                                                  // lwbrx ?
  143         cror    cr1_eq,cr0_eq,cr1_eq
  144         cmpwi   r5,23                                                   // lwz/lwzx ?
  145         cror    cr1_eq,cr0_eq,cr1_eq
  146         cmpwi   r5,149                                                  // stdx/stdux ?
  147         cror    cr1_eq,cr0_eq,cr1_eq
  148         cmpwi   r5,727                                                  // stfdx/stfdux ?
  149         cror    cr1_eq,cr0_eq,cr1_eq
  150         cmpwi   r4,983                                                  // stfiwx ?
  151         cror    cr1_eq,cr0_eq,cr1_eq
  152         cmpwi   r5,663                                                  // stfsx/stfsux ?
  153         cror    cr1_eq,cr0_eq,cr1_eq
  154         cmpwi   r4,918                                                  // sthbrx ?
  155         cror    cr1_eq,cr0_eq,cr1_eq
  156         cmpwi   r5,407                                                  // sthx/sthux ?
  157         cror    cr1_eq,cr0_eq,cr1_eq
  158         cmpwi   r4,725                                                  // stswi ?
  159         cror    cr1_eq,cr0_eq,cr1_eq
  160         cmpwi   r4,661                                                  // stswx ?
  161         cror    cr1_eq,cr0_eq,cr1_eq
  162         cmpwi   r4,662                                                  // stwbrx ?
  163         cror    cr1_eq,cr0_eq,cr1_eq
  164         cmpwi   r5,151                                                  // stwx/stwux ?
  165         cror    cr1_eq,cr0_eq,cr1_eq
  166         
  167         beq++   cr1,a64GotInstruction                   // it was one of the X-forms we handle
  168         crclr   kAlignment                                              // revert to program interrupt
  169         b               a64PassAlong                                    // not recognized extended opcode
  170         
  171 
  172 // *****************************************
  173 // * A L I G N M E N T   I N T E R R U P T *
  174 // *****************************************
  175 //
  176 // We get here in exception context, ie with interrupts disabled, translation off, and
  177 // in 64-bit mode, with:
  178 //              r13 = save-area pointer, with general context already saved in it
  179 //              cr6 = feature flags
  180 // We preserve r13 and cr6.  Other GPRs and CRs, the LR and CTR are used.
  181 //
  182 // Current 64-bit processors (GPUL) handle almost all misaligned operations in hardware,
  183 // so this routine usually isn't called very often.  Only floating pt ops that cross a page
  184 // boundary and are not word aligned, and LMW/STMW can take exceptions to cacheable memory.
  185 // However, in contrast to G3 and G4, any misaligned load/store will get an alignment
  186 // interrupt on uncached memory.
  187 //
  188 // We always emulate scalar ops with a series of byte load/stores.  Doing so is no slower
  189 // than LWZ/STW in cases where a scalar op gets an alignment exception.
  190 //
  191 // This routine supports all legal permutations of alignment interrupts occuring in user or
  192 // supervisor mode, 32 or 64-bit addressing, and translation on or off.  We do not emulate
  193 // instructions that go past the end of an address space, such as "LHZ -1(0)"; we just pass
  194 // along the alignment exception rather than wrap around to byte 0.  (Treatment of address
  195 // space wrap is a moot point in Mac OS X, since we do not map either the last page or
  196 // page 0.)
  197 //
  198 // First, check for a few special cases such as virtual machines, etc.
  199 
  200         .globl  EXT(AlignAssist64)
  201         .align  5
  202 LEXT(AlignAssist64)
  203         crset   kAlignment                                                              // mark as alignment interrupt
  204 
  205 a64AlignAssistJoin:                                                                             // join here from program interrupt handler
  206         mfsprg  r31,0                                                                   // get the per_proc data ptr
  207         mcrf    cr3,cr6                                                                 // save feature flags here...
  208         lwz             r21,spcFlags(r31)                                               // grab the special flags
  209         ld              r29,savesrr1(r13)                                               // get the MSR etc at the fault
  210         ld              r28,savesrr0(r13)                                               // get the EA of faulting instruction
  211         mfmsr   r26                                                                             // save MSR at entry
  212         rlwinm. r0,r21,0,runningVMbit,runningVMbit              // Are we running a VM?
  213         lwz             r19,dgFlags(0)                                                  // Get the diagnostics flags
  214         bne--   a64PassAlong                                                    // yes, let the virtual machine monitor handle
  215 
  216 
  217 // Set up the MSR shadow regs.  We turn on FP in this routine, and usually set DR and RI
  218 // when accessing user space (the SLB is still set up with all the user space translations.)
  219 // However, if the interrupt occured in the kernel with DR off, we keep it off while
  220 // accessing the "target" address space.  If we set DR to access the target space, we also
  221 // set RI.  The RI bit tells the exception handlers to clear cr0 beq and return if we get an
  222 // exception accessing the user address space.  We are careful to test cr0 beq after every such
  223 // access.  We keep the following "shadows" of the MSR in global regs across this code:
  224 //              r25 = MSR at entry, plus FP and probably DR and RI (used to access target space)
  225 //              r26 = MSR at entry
  226 //              r27 = free
  227 //              r29 = SRR1 (ie, MSR at interrupt)
  228 // Note that EE and IR are always off, and SF is always on in this code.
  229 
  230                 rlwinm  r3,r29,0,MSR_DR_BIT,MSR_DR_BIT                  // was translation on at fault?
  231         rlwimi  r3,r3,32-MSR_RI_BIT+MSR_DR_BIT,MSR_RI_BIT,MSR_RI_BIT    // if DR was set, set RI too
  232         or              r25,r26,r3                                                              // assemble MSR to use accessing target space
  233         
  234 
  235 // Because the DSISR and DAR are either not set or are not to be trusted on some 64-bit
  236 // processors on an alignment interrupt, we must fetch the faulting instruction ourselves,
  237 // then decode/hash the opcode and reconstruct the EA manually.
  238 
  239         mtmsr   r25                                     // turn on FP and (if it was on at fault) DR and RI
  240         isync                                           // wait for it to happen
  241                 cmpw    r0,r0                           // turn on beq so we can check for DSIs
  242         lwz             r20,0(r28)                      // fetch faulting instruction, probably with DR on
  243         bne--   a64RedriveAsISI         // got a DSI trying to fetch it, pretend it was an ISI
  244         mtmsr   r26                                     // turn DR back off
  245         isync                                           // wait for it to happen
  246 
  247 
  248 // Set a few flags while we wait for the faulting instruction to arrive from cache.
  249 
  250         rlwinm. r0,r29,0,MSR_SE_BIT,MSR_SE_BIT                          // Were we single stepping?
  251                 stw             r20,savemisc2(r13)      // Save the instruction image in case we notify
  252         crnot   kTrace,cr0_eq
  253         rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb                      // Should we notify?
  254         crnot   kNotify,cr0_eq        
  255 
  256 
  257 // Hash the intruction into a 5-bit value "AAAAB" used to index the branch table, and a
  258 // 1-bit kUpdate flag, as follows:
  259 //  ¥ for X-form instructions (with primary opcode 31):
  260 //       the "AAAA" bits are bits 21-24 of the instruction
  261 //       the "B" bit is the XOR of bits 29 and 30
  262 //       the update bit is instruction bit 25
  263 //      ¥ for D and DS-form instructions (actually, any primary opcode except 31):
  264 //       the "AAAA" bits are bits 1-4 of the instruction
  265 //       the "B" bit is 0
  266 //       the update bit is instruction bit 5
  267 //
  268 // Just for fun (and perhaps a little speed on deep-pipe machines), we compute the hash,
  269 // update flag, and EA without branches and with ipc >= 2.
  270 //
  271 // When we "bctr" to the opcode-specific reoutine, the following are all set up:
  272 //              MSR = EE and IR off, SF and FP on
  273 //              r13 = save-area pointer (physical)
  274 //              r14 = ptr to saver0 in save-area (ie, to base of GPRs)
  275 //              r15 = 0x00000000FFFFFFFF if 32-bit mode fault, 0xFFFFFFFFFFFFFFFF if 64
  276 //              r16 = RA * 8 (ie, reg# not reg value)
  277 //              r17 = EA
  278 //              r18 = (RA|0) (reg value)
  279 //              r19 = -1 if X-form, 0 if D-form
  280 //              r20 = faulting instruction
  281 //              r21 = RT * 8 (ie, reg# not reg value)
  282 //              r22 = addr(aaFPopTable)+(RT*32), ie ptr to floating pt table for target register
  283 //              r25 = MSR at entrance, probably with DR and RI set (for access to target space)
  284 //              r26 = MSR at entrance
  285 //              r27 = free
  286 //              r28 = SRR0 (ie, EA of faulting instruction)
  287 //              r29 = SRR1 (ie, MSR at fault)
  288 //              r30 = scratch, usually user data
  289 //              r31 = per-proc pointer
  290 //              cr2 = kTrace, kNotify, and kAlignment flags
  291 //      cr3 = saved copy of feature flags used in lowmem vector code
  292 //              cr6 = bits 24-27 of CR are bits 24-27 of opcode if X-form, or bits 4-5 and 00 if D-form
  293 //                        bit 25 is the kUpdate flag, set for update form instructions
  294 //              cr7 = bits 28-31 of CR are bits 28-31 of opcode if X-form, or 0 if D-form
  295 
  296 a64GotInstruction:                                      // here from program interrupt with instruction in r20
  297         rlwinm  r21,r20,6+6,20,25       // move the primary opcode (bits 0-6) to bits 20-25
  298         la              r14,saver0(r13)         // r14 <- base address of GPR registers
  299         xori    r19,r21,0x07C0          // iff primary opcode is 31, set r19 to 0
  300         rlwinm  r16,r20,16+3,24,28      // r16 <- RA*8
  301         subi    r19,r19,1                       // set bit 0 iff X-form (ie, if primary opcode is 31)
  302         rlwinm  r17,r20,21+3,24,28      // r17 <- RB*8 (if X-form)
  303         sradi   r19,r19,63                      // r19 <- -1 if X-form, 0 if D-form
  304         extsh   r22,r20                         // r22 <- displacement (if D-form)
  305 
  306         ldx             r23,r14,r17                     // get (RB), if any
  307         and             r15,r20,r19                     // instruction if X, 0 if D
  308         andc    r17,r21,r19                     // primary opcode in bits 20-25 if D, 0 if X
  309         ldx             r18,r14,r16                     // get (RA)
  310         subi    r24,r16,1                       // set bit 0 iff RA==0
  311         or              r21,r15,r17                     // r21 <- instruction if X, or bits 0-5 in bits 20-25 if D
  312         sradi   r24,r24,63                      // r24 <- -1 if RA==0, 0 otherwise
  313         rlwinm  r17,r21,32-4,25,28      // shift opcode bits 21-24 to 25-28 (hash "AAAA" bits)
  314         lis             r10,ha16(a64BranchTable)        // start to build up branch table address
  315         rlwimi  r17,r21,0,29,29         // move opcode bit 29 into hash as start of "B" bit
  316         rlwinm  r30,r21,1,29,29         // position opcode bit 30 in position 29
  317         and             r12,r23,r19                     // RB if X-form, 0 if D-form
  318         andc    r11,r22,r19                     // 0 if X-form, sign extended displacement if D-form
  319         xor             r17,r17,r30                     // bit 29 ("B") of hash is xor(bit29,bit30)
  320         addi    r10,r10,lo16(a64BranchTable)
  321         or              r12,r12,r11                     // r12 <- (RB) or displacement, as appropriate
  322         lwzx    r30,r10,r17                     // get address from branch table
  323         mtcrf   0x01,r21                        // move opcode bits 28-31 to CR7
  324         sradi   r15,r29,32                      // propogate SF bit from SRR1 (MSR_SF, which is bit 0)
  325         andc    r18,r18,r24                     // r18 <- (RA|0)
  326         mtcrf   0x02,r21                        // move opcode bits 24-27 to CR6 (kUpdate is bit 25)
  327         add             r17,r18,r12                     // r17 <- EA, which might need to be clamped to 32 bits
  328         mtctr   r30                                     // set up branch address
  329         
  330         oris    r15,r15,0xFFFF          // start to fill low word of r15 with 1s
  331         rlwinm  r21,r20,11+3,24,28      // r21 <- RT * 8
  332         lis             r22,ha16(EXT(aaFPopTable))      // start to compute address of floating pt table
  333         ori             r15,r15,0xFFFF          // now bits 32-63 of r15 are 1s
  334         addi    r22,r22,lo16(EXT(aaFPopTable))
  335         and             r17,r17,r15                     // clamp EA to 32 bits if necessary
  336         rlwimi  r22,r21,2,22,26         // move RT into aaFPopTable address (which is 1KB aligned)
  337         
  338         bf--    kAlignment,a64HandleProgramInt  // return to Program Interrupt handler
  339         bctr                                            // if alignment interrupt, jump to opcode-specific routine
  340         
  341         
  342 // Floating-pt load single (lfs[u], lfsx[u])
  343 
  344 a64LfsLfsx:
  345         bl              a64Load4Bytes           // get data in r30
  346         mtctr   r22                                     // set up address of "lfs fRT,emfp0(r31)"
  347         stw             r30,emfp0(r31)          // put word here for aaFPopTable routine
  348         bctrl                                           // do the lfs
  349         b               a64UpdateCheck          // update RA if necessary and exit
  350         
  351         
  352 // Floating-pt store single (stfs[u], stfsx[u])
  353 
  354 a64StfsStfsx:
  355         ori             r22,r22,8                       // set dir==1 (ie, single store) in aaFPopTable
  356         mtctr   r22                                     // set up address of "stfs fRT,emfp0(r31)"
  357         bctrl                                           // execute the store into emfp0
  358         lwz             r30,emfp0(r31)          // get the word
  359         bl              a64Store4Bytes          // store r30 into user space
  360         b               a64UpdateCheck          // update RA if necessary and exit
  361         
  362 
  363 // Floating-pt store as integer word (stfiwx)
  364 
  365 a64Stfiwx:
  366         ori             r22,r22,16+8            // set size=1, dir==1 (ie, double store) in aaFPopTable
  367         mtctr   r22                                     // set up FP register table address
  368         bctrl                                           // double precision store into emfp0
  369         lwz             r30,emfp0+4(r31)        // get the low-order word
  370         bl              a64Store4Bytes          // store r30 into user space
  371         b               a64Exit                         // successfully emulated
  372         
  373 
  374 // Floating-pt load double (lfd[u], lfdx[u])
  375 
  376 a64LfdLfdx:
  377         ori             r22,r22,16                      // set Double bit in aaFPopTable address
  378         bl              a64Load8Bytes           // get data in r30
  379         mtctr   r22                                     // set up address of "lfd fRT,emfp0(r31)"
  380         std             r30,emfp0(r31)          // put doubleword here for aaFPopTable routine
  381         bctrl                                           // execute the load
  382         b               a64UpdateCheck          // update RA if necessary and exit
  383 
  384 
  385 // Floating-pt store double (stfd[u], stfdx[u])
  386 
  387 a64StfdStfdx:
  388         ori             r22,r22,16+8            // set size=1, dir==1 (ie, double store) in aaFPopTable address
  389         mtctr   r22                                     // address of routine to stfd RT
  390         bctrl                                           // store into emfp0
  391         ld              r30,emfp0(r31)          // get the doubleword
  392         bl              a64Store8Bytes          // store r30 into user space
  393         b               a64UpdateCheck          // update RA if necessary and exit
  394 
  395 
  396 // Load halfword w 0-fill (lhz[u], lhzx[u])
  397 
  398 a64LhzLhzx:
  399         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  400         stdx    r30,r14,r21                     // store into RT slot in register file
  401         b               a64UpdateCheck          // update RA if necessary and exit
  402 
  403 
  404 // Load halfword w sign fill (lha[u], lhax[u])
  405 
  406 a64LhaLhax:
  407         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  408         extsh   r30,r30                         // sign-extend
  409         stdx    r30,r14,r21                     // store into RT slot in register file
  410         b               a64UpdateCheck          // update RA if necessary and exit
  411 
  412 
  413 // Load halfword byte reversed (lhbrx)
  414 
  415 a64Lhbrx:
  416         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  417         rlwinm  r3,r30,8,16,23          // reverse bytes into r3
  418         rlwimi  r3,r30,24,24,31
  419         stdx    r3,r14,r21                      // store into RT slot in register file
  420         b               a64Exit                         // successfully emulated
  421 
  422 
  423 // Store halfword (sth[u], sthx[u])
  424 
  425 a64SthSthx:
  426         ldx             r30,r14,r21                     // get RT
  427         bl              a64Store2Bytes          // store r30 into user space
  428         b               a64UpdateCheck          // update RA if necessary and exit
  429 
  430 
  431 // Store halfword byte reversed (sthbrx)
  432 
  433 a64Sthbrx:
  434         addi    r21,r21,6                       // point to low two bytes of RT
  435         lhbrx   r30,r14,r21                     // load and reverse
  436         bl              a64Store2Bytes          // store r30 into user space
  437         b               a64Exit                         // successfully emulated
  438 
  439 
  440 // Load word w 0-fill (lwz[u], lwzx[u]), also lwarx.
  441 
  442 a64LwzLwzxLwarx:
  443         andc    r3,r19,r20                      // light bit 30 of r3 iff lwarx
  444         andi.   r0,r3,2                         // is it lwarx?
  445         bne--   a64PassAlong            // yes, never try to emulate a lwarx
  446         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  447         stdx    r30,r14,r21                     // update register file
  448         b               a64UpdateCheck          // update RA if necessary and exit
  449         
  450         
  451 // Load word w sign fill (lwa, lwax[u])
  452 
  453 a64Lwa:
  454         crclr   kUpdate                         // no update form of lwa (its a reserved encoding)
  455 a64Lwax:
  456         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  457         extsw   r30,r30                         // sign extend
  458         stdx    r30,r14,r21                     // update register file
  459         b               a64UpdateCheck          // update RA if necessary and exit
  460 
  461 
  462 // Load word byte reversed (lwbrx)
  463 
  464 a64Lwbrx:
  465         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  466         rlwinm  r3,r30,24,0,31          // flip bytes 1234 to 4123
  467         rlwimi  r3,r30,8,8,15           // r3 is now 4323
  468         rlwimi  r3,r30,8,24,31          // r3 is now 4321
  469         stdx    r3,r14,r21                      // update register file
  470         b               a64Exit                         // successfully emulated
  471 
  472         
  473 // Store word (stw[u], stwx[u])
  474 
  475 a64StwStwx:
  476         ldx             r30,r14,r21                     // get RT
  477         bl              a64Store4Bytes          // store r30 into user space
  478         b               a64UpdateCheck          // update RA if necessary and exit
  479 
  480 
  481 // Store word byte reversed (stwbrx)
  482 
  483 a64Stwbrx:
  484         addi    r21,r21,4                       // point to low word of RT
  485         lwbrx   r30,r14,r21                     // load and reverse
  486         bl              a64Store4Bytes          // store r30 into user space
  487         b               a64Exit                         // successfully emulated
  488 
  489 
  490 // Load doubleword (ld[u], ldx[u]), also lwa.
  491 
  492 a64LdLwa:                                                       // these are DS form: ld=0, ldu=1, and lwa=2
  493         andi.   r0,r20,2                        // ld[u] or lwa? (test bit 30 of DS field)
  494         rlwinm  r3,r20,0,30,31          // must adjust EA by subtracting DS field
  495         sub             r17,r17,r3
  496         and             r17,r17,r15                     // re-clamp to 32 bits if necessary
  497         bne             a64Lwa                          // handle lwa
  498 a64Ldx:
  499         bl              a64Load8Bytes           // load 8 bytes from user space into r30
  500         stdx    r30,r14,r21                     // update register file
  501         b               a64UpdateCheck          // update RA if necessary and exit
  502 
  503 
  504 // Store doubleword (stdx[u], std[u])
  505 
  506 a64StdxStwcx:
  507         bf--    30,a64PassAlong         // stwcx, so pass along alignment exception
  508         b               a64Stdx                         // was stdx
  509 a64StdStfiwx:
  510         bt              30,a64Stfiwx            // handle stfiwx
  511         rlwinm. r3,r20,0,30,31          // must adjust EA by subtracting DS field
  512         sub             r17,r17,r3
  513         and             r17,r17,r15                     // re-clamp to 32 bits if necessary
  514 a64Stdx:
  515         ldx             r30,r14,r21                     // get RT
  516         bl              a64Store8Bytes          // store RT into user space
  517         b               a64UpdateCheck          // update RA if necessary and exit
  518 
  519 
  520 // Dcbz and Dcbz128 (bit 10 distinguishes the two forms)
  521 
  522 a64DcbzDcbz128:
  523         andis.  r0,r20,0x0020           // bit 10 set?
  524         li              r3,0                            // get a 0 to store
  525         li              r0,4                            // assume 32-bit version, store 8 bytes 4x
  526         li              r4,_COMM_PAGE_BASE_ADDRESS
  527         rldicr  r17,r17,0,63-5          // 32-byte align EA
  528         beq             a64DcbzSetup            // it was the 32-byte version
  529         rldicr  r17,r17,0,63-7          // zero low 7 bits of EA
  530         li              r0,16                           // store 8 bytes 16x
  531 a64DcbzSetup:
  532         xor             r4,r4,r28                       // was dcbz in the commpage(s)?
  533         and             r4,r4,r15                       // mask off high-order bits if 32-bit mode
  534         srdi.   r4,r4,12                        // check SRR0
  535         bne             a64NotCommpage          // not in commpage
  536         rlwinm. r4,r29,0,MSR_PR_BIT,MSR_PR_BIT  // did fault occur in user mode?
  537         beq--   a64NotCommpage          // do not zero cr7 if kernel got alignment exception
  538         lwz             r4,savecr(r13)          // if we take a dcbz{128} in the commpage...
  539         rlwinm  r4,r4,0,0,27            // ...clear user's cr7...
  540         stw             r4,savecr(r13)          // ...as a flag for _COMM_PAGE_BIGCOPY
  541 a64NotCommpage:
  542         mtctr   r0
  543         cmpw    r0,r0                           // turn cr0 beq on so we can check for DSIs
  544         mtmsr   r25                                     // turn on DR and RI so we can address user space
  545         isync                                           // wait for it to happen
  546 a64DcbzLoop:
  547         std             r3,0(r17)                       // store into user space
  548         bne--   a64RedriveAsDSI
  549         addi    r17,r17,8
  550         bdnz    a64DcbzLoop
  551         
  552         mtmsr   r26                                     // restore MSR
  553         isync                                           // wait for it to happen
  554         b               a64Exit
  555 
  556 
  557 // Load and store multiple (lmw, stmw), distinguished by bit 25
  558 
  559 a64LmwStmw:
  560         subfic  r22,r21,32*8            // how many regs to load or store?
  561         srwi    r22,r22,1                       // get bytes to load/store
  562         bf              25,a64LoadMultiple      // handle lmw
  563         b               a64StoreMultiple        // it was stmw
  564         
  565         
  566 // Load string word immediate (lswi)
  567 
  568 a64Lswi:
  569         rlwinm  r22,r20,21,27,31        // get #bytes in r22
  570         and             r17,r18,r15                     // recompute EA as (RA|0), and clamp
  571         subi    r3,r22,1                        // r22==0?
  572         rlwimi  r22,r3,6,26,26          // map count of 0 to 32
  573         b               a64LoadMultiple
  574         
  575         
  576 // Store string word immediate (stswi)
  577 
  578 a64Stswi:
  579         rlwinm  r22,r20,21,27,31        // get #bytes in r22
  580         and             r17,r18,r15                     // recompute EA as (RA|0), and clamp
  581         subi    r3,r22,1                        // r22==0?
  582         rlwimi  r22,r3,6,26,26          // map count of 0 to 32
  583         b               a64StoreMultiple
  584         
  585         
  586 // Load string word indexed (lswx), also lwbrx
  587 
  588 a64LswxLwbrx:
  589         bf              30,a64Lwbrx                     // was lwbrx
  590         ld              r22,savexer(r13)        // get the xer
  591         rlwinm  r22,r22,0,25,31         // isolate the byte count
  592         b               a64LoadMultiple         // join common code
  593         
  594         
  595 // Store string word indexed (stswx), also stwbrx
  596 
  597 a64StswxStwbrx:
  598         bf              30,a64Stwbrx            // was stwbrx
  599         ld              r22,savexer(r13)        // get the xer
  600         rlwinm  r22,r22,0,25,31         // isolate the byte count
  601         b               a64StoreMultiple        // join common code
  602 
  603 
  604 // Load multiple words.  This handles lmw, lswi, and lswx.
  605 
  606 a64LoadMultiple:                                        // r22 = byte count, may be 0
  607         subic.  r3,r22,1                        // get (#bytes-1)
  608         blt             a64Exit                         // done if 0
  609         add             r4,r17,r3                       // get EA of last operand byte
  610         and             r4,r4,r15                       // clamp
  611         cmpld   r4,r17                          // address space wrap?
  612         blt--   a64PassAlong            // pass along exception if so
  613         srwi.   r4,r22,2                        // get # full words to load
  614         rlwinm  r22,r22,0,30,31         // r22 <- leftover byte count
  615         cmpwi   cr1,r22,0                       // leftover bytes?
  616         beq             a64Lm3                          // no words
  617         mtctr   r4                                      // set up word count
  618         cmpw    r0,r0                           // set beq for DSI test
  619 a64Lm2:
  620         mtmsr   r25                                     // turn on DR and RI
  621         isync                                           // wait for it to happen
  622         lbz             r3,0(r17)
  623         bne--   a64RedriveAsDSI         // got a DSI
  624         lbz             r4,1(r17)
  625         bne--   a64RedriveAsDSI         // got a DSI
  626         lbz             r5,2(r17)
  627         bne--   a64RedriveAsDSI         // got a DSI
  628         lbz             r6,3(r17)
  629         bne--   a64RedriveAsDSI         // got a DSI
  630         rlwinm  r30,r3,24,0,7           // pack bytes into r30
  631         rldimi  r30,r4,16,40
  632         rldimi  r30,r5,8,48
  633         rldimi  r30,r6,0,56
  634         mtmsr   r26                                     // turn DR back off so we can store into register file
  635         isync
  636         addi    r17,r17,4                       // bump EA
  637         stdx    r30,r14,r21                     // pack into register file
  638         addi    r21,r21,8                       // bump register file offset
  639         rlwinm  r21,r21,0,24,28         // wrap around to 0
  640         bdnz    a64Lm2
  641 a64Lm3:                                                         // cr1/r22 = leftover bytes (0-3), cr0 beq set
  642         beq             cr1,a64Exit                     // no leftover bytes
  643         mtctr   r22
  644         mtmsr   r25                                     // turn on DR so we can access user space
  645         isync
  646         lbz             r3,0(r17)                       // get 1st leftover byte
  647         bne--   a64RedriveAsDSI         // got a DSI
  648         rlwinm  r30,r3,24,0,7           // position in byte 4 of r30 (and clear rest of r30)
  649         bdz             a64Lm4                          // only 1 byte leftover
  650         lbz             r3,1(r17)                       // get 2nd byte
  651         bne--   a64RedriveAsDSI         // got a DSI
  652         rldimi  r30,r3,16,40            // insert into byte 5 of r30
  653         bdz             a64Lm4                          // only 2 bytes leftover
  654         lbz             r3,2(r17)                       // get 3rd byte
  655         bne--   a64RedriveAsDSI         // got a DSI
  656         rldimi  r30,r3,8,48                     // insert into byte 6
  657 a64Lm4:
  658         mtmsr   r26                                     // turn DR back off so we can store into register file
  659         isync
  660         stdx    r30,r14,r21                     // pack partially-filled word into register file
  661         b               a64Exit
  662 
  663 
  664 // Store multiple words.  This handles stmw, stswi, and stswx.
  665 
  666 a64StoreMultiple:                                       // r22 = byte count, may be 0
  667         subic.  r3,r22,1                        // get (#bytes-1)
  668         blt             a64Exit                         // done if 0
  669         add             r4,r17,r3                       // get EA of last operand byte
  670         and             r4,r4,r15                       // clamp
  671         cmpld   r4,r17                          // address space wrap?
  672         blt--   a64PassAlong            // pass along exception if so
  673         srwi.   r4,r22,2                        // get # full words to load
  674         rlwinm  r22,r22,0,30,31         // r22 <- leftover byte count
  675         cmpwi   cr1,r22,0                       // leftover bytes?
  676         beq             a64Sm3                          // no words
  677         mtctr   r4                                      // set up word count
  678         cmpw    r0,r0                           // turn on beq so we can check for DSIs
  679 a64Sm2:
  680         ldx             r30,r14,r21                     // get next register
  681         addi    r21,r21,8                       // bump register file offset
  682         rlwinm  r21,r21,0,24,28         // wrap around to 0
  683         srwi    r3,r30,24                       // shift the four bytes into position
  684         srwi    r4,r30,16
  685         srwi    r5,r30,8
  686         mtmsr   r25                                     // turn on DR so we can access user space
  687         isync                                           // wait for it to happen
  688         stb             r3,0(r17)
  689         bne--   a64RedriveAsDSI         // got a DSI
  690         stb             r4,1(r17)
  691         bne--   a64RedriveAsDSI         // got a DSI
  692         stb             r5,2(r17)
  693         bne--   a64RedriveAsDSI         // got a DSI
  694         stb             r30,3(r17)
  695         bne--   a64RedriveAsDSI         // got a DSI
  696         mtmsr   r26                                     // turn DR back off
  697         isync
  698         addi    r17,r17,4                       // bump EA
  699         bdnz    a64Sm2
  700 a64Sm3:                                                         // r22 = 0-3, cr1 set on r22, cr0 beq set
  701         beq             cr1,a64Exit                     // no leftover bytes
  702         ldx             r30,r14,r21                     // get last register
  703         mtctr   r22
  704         mtmsr   r25                                     // turn on DR so we can access user space
  705         isync                                           // wait for it to happen
  706 a64Sm4:
  707         rlwinm  r30,r30,8,0,31          // position next byte
  708         stb             r30,0(r17)                      // pack into user space
  709         addi    r17,r17,1                       // bump user space ptr
  710         bne--   a64RedriveAsDSI         // got a DSI
  711         bdnz    a64Sm4
  712         mtmsr   r26                                     // turn DR back off
  713         isync
  714         b               a64Exit
  715 
  716 
  717 // Subroutines to load bytes from user space.
  718 
  719 a64Load2Bytes:                                          // load 2 bytes right-justified into r30
  720         addi    r7,r17,1                        // get EA of last byte
  721         and             r7,r7,r15                       // clamp
  722         cmpld   r7,r17                          // address wrap?
  723         blt--   a64PassAlong            // yes
  724         mtmsr   r25                                     // turn on DR so we can access user space
  725         isync                                           // wait for it to happen
  726         sub.    r30,r30,r30                     // 0-fill dest and set beq
  727         b               a64Load2                        // jump into routine
  728 a64Load4Bytes:                                          // load 4 bytes right-justified into r30 (ie, low order word)
  729         addi    r7,r17,3                        // get EA of last byte
  730         and             r7,r7,r15                       // clamp
  731         cmpld   r7,r17                          // address wrap?
  732         blt--   a64PassAlong            // yes
  733         mtmsr   r25                                     // turn on DR so we can access user space
  734         isync                                           // wait for it to happen
  735         sub.    r30,r30,r30                     // 0-fill dest and set beq
  736         b               a64Load4                        // jump into routine
  737 a64Load8Bytes:                                          // load 8 bytes into r30
  738         addi    r7,r17,7                        // get EA of last byte
  739         and             r7,r7,r15                       // clamp
  740         cmpld   r7,r17                          // address wrap?
  741         blt--   a64PassAlong            // yes
  742         mtmsr   r25                                     // turn on DR so we can access user space
  743         isync                                           // wait for it to happen
  744         sub.    r30,r30,r30                     // 0-fill dest and set beq
  745         lbz             r3,-7(r7)                       // get byte 0
  746         bne--   a64RedriveAsDSI         // got a DSI
  747         lbz             r4,-6(r7)                       // and byte 1, etc
  748         bne--   a64RedriveAsDSI         // got a DSI
  749         lbz             r5,-5(r7)
  750         bne--   a64RedriveAsDSI         // got a DSI
  751         lbz             r6,-4(r7)
  752         bne--   a64RedriveAsDSI         // got a DSI
  753         rldimi  r30,r3,56,0                     // position bytes in upper word
  754         rldimi  r30,r4,48,8
  755         rldimi  r30,r5,40,16
  756         rldimi  r30,r6,32,24
  757 a64Load4:
  758         lbz             r3,-3(r7)
  759         bne--   a64RedriveAsDSI         // got a DSI
  760         lbz             r4,-2(r7)
  761         bne--   a64RedriveAsDSI         // got a DSI
  762         rldimi  r30,r3,24,32            // insert bytes 4 and 5 into r30
  763         rldimi  r30,r4,16,40
  764 a64Load2:
  765         lbz             r3,-1(r7)
  766         bne--   a64RedriveAsDSI         // got a DSI
  767         lbz             r4,0(r7)
  768         bne--   a64RedriveAsDSI         // got a DSI
  769         mtmsr   r26                                     // turn DR back off
  770         isync
  771         rldimi  r30,r3,8,48                     // insert bytes 6 and 7 into r30
  772         rldimi  r30,r4,0,56
  773         blr
  774         
  775         
  776 // Subroutines to store bytes into user space.
  777 
  778 a64Store2Bytes:                                         // store bytes 6 and 7 of r30
  779         addi    r7,r17,1                        // get EA of last byte
  780         and             r7,r7,r15                       // clamp
  781         cmpld   r7,r17                          // address wrap?
  782         blt--   a64PassAlong            // yes
  783         mtmsr   r25                                     // turn on DR so we can access user space
  784         isync                                           // wait for it to happen
  785         cmpw    r0,r0                           // set beq so we can check for DSI
  786         b               a64Store2                       // jump into routine
  787 a64Store4Bytes:                                         // store bytes 4-7 of r30 (ie, low order word)
  788         addi    r7,r17,3                        // get EA of last byte
  789         and             r7,r7,r15                       // clamp
  790         cmpld   r7,r17                          // address wrap?
  791         blt--   a64PassAlong            // yes
  792         mtmsr   r25                                     // turn on DR so we can access user space
  793         isync                                           // wait for it to happen
  794         cmpw    r0,r0                           // set beq so we can check for DSI
  795         b               a64Store4                       // jump into routine
  796 a64Store8Bytes:                                         // r30 = bytes
  797         addi    r7,r17,7                        // get EA of last byte
  798         and             r7,r7,r15                       // clamp
  799         cmpld   r7,r17                          // address wrap?
  800         blt--   a64PassAlong            // yes
  801         mtmsr   r25                                     // turn on DR so we can access user space
  802         isync                                           // wait for it to happen
  803         cmpw    r0,r0                           // set beq so we can check for DSI
  804         rotldi  r3,r30,8                        // shift byte 0 into position
  805         rotldi  r4,r30,16                       // and byte 1
  806         rotldi  r5,r30,24                       // and byte 2
  807         rotldi  r6,r30,32                       // and byte 3
  808         stb             r3,-7(r7)                       // store byte 0
  809         bne--   a64RedriveAsDSI         // got a DSI
  810         stb             r4,-6(r7)                       // and byte 1 etc...
  811         bne--   a64RedriveAsDSI         // got a DSI
  812         stb             r5,-5(r7)
  813         bne--   a64RedriveAsDSI         // got a DSI
  814         stb             r6,-4(r7)
  815         bne--   a64RedriveAsDSI         // got a DSI
  816 a64Store4:
  817         rotldi  r3,r30,40                       // shift byte 4 into position
  818         rotldi  r4,r30,48                       // and byte 5
  819         stb             r3,-3(r7)
  820         bne--   a64RedriveAsDSI         // got a DSI
  821         stb             r4,-2(r7)
  822         bne--   a64RedriveAsDSI         // got a DSI
  823 a64Store2:
  824         rotldi  r3,r30,56                       // shift byte 6 into position
  825         stb             r3,-1(r7)                       // store byte 6
  826         bne--   a64RedriveAsDSI         // got a DSI
  827         stb             r30,0(r7)                       // store byte 7, which is already positioned
  828         bne--   a64RedriveAsDSI         // got a DSI
  829         mtmsr   r26                                     // turn off DR
  830         isync
  831         blr
  832         
  833                 
  834 // Exit routines.
  835 
  836 a64ExitEm:
  837                 li              r30,T_EMULATE                   // Change exception code to emulate
  838                 stw             r30,saveexception(r13)  // Save it
  839                 b               a64Exit                                 // Join standard exit routine...
  840 
  841 a64PassAlong:                                                   // unhandled exception, just pass it along
  842         crset   kNotify                                 // return T_ALIGNMENT or T_PROGRAM
  843         crclr   kTrace                                  // not a trace interrupt
  844         b               a64Exit1
  845 a64UpdateCheck:                                                 // successfully emulated, may be update form
  846         bf              kUpdate,a64Exit                 // update?
  847         stdx    r17,r14,r16                             // yes, store EA into RA
  848 a64Exit:                                                                // instruction successfully emulated
  849         addi    r28,r28,4                               // bump SRR0 past the emulated instruction
  850         li              r30,T_IN_VAIN                   // eat the interrupt since we emulated it
  851         and             r28,r28,r15                             // clamp to address space size (32 vs 64)
  852         std             r28,savesrr0(r13)               // save, so we return to next instruction
  853 a64Exit1:
  854         bt--    kTrace,a64Trace                 // were we in single-step at fault?
  855         bt--    kNotify,a64Notify               // should we say T_ALIGNMENT anyway?
  856 a64Exit2:
  857         mcrf    cr6,cr3                                 // restore feature flags
  858         mr              r11,r30                                 // pass back exception code (T_IN_VAIN etc) in r11
  859         b               EXT(EmulExit)                   // return to exception processing
  860 
  861 
  862 // Notification requested: pass exception upstairs even though it might have been emulated.
  863 
  864 a64Notify:
  865         li              r30,T_ALIGNMENT                 // somebody wants to know about it (but don't redrive)
  866         bt              kAlignment,a64Exit2             // was an alignment exception
  867         li              r30,T_PROGRAM                   // was an emulated instruction
  868         b               a64Exit2
  869 
  870 
  871 // Emulate a trace interrupt after handling alignment interrupt.
  872 
  873 a64Trace:
  874         lwz             r9,SAVflags(r13)                // get the save-area flags
  875         li              r30,T_TRACE
  876         oris    r9,r9,hi16(SAVredrive)  // Set the redrive bit
  877         stw             r30,saveexception(r13)  // Set the exception code
  878         stw             r9,SAVflags(r13)                // Set the flags
  879         b               a64Exit2                                // Exit and do trace interrupt...
  880 
  881 
  882 // Got a DSI accessing user space.  Redrive.  One way this can happen is if another
  883 // processor removes a mapping while we are emulating.
  884 
  885 a64RedriveAsISI:                                                // this DSI happened fetching the opcode (r1==DSISR  r4==DAR)
  886         mtmsr   r26                                             // turn DR back off
  887         isync                                                   // wait for it to happen
  888         li              r30,T_INSTRUCTION_ACCESS
  889         rlwimi  r29,r1,0,0,4                    // insert the fault type from DSI's DSISR
  890         std             r29,savesrr1(r13)               // update SRR1 to look like an ISI
  891         b               a64Redrive
  892 
  893 a64RedriveAsDSI:                                                // r0==DAR  r1==DSISR
  894         mtmsr   r26                                             // turn DR back off
  895         isync                                                   // wait for it to happen
  896         stw             r1,savedsisr(r13)               // Set the DSISR of failed access
  897         std             r0,savedar(r13)                 // Set the address of the failed access
  898         li              r30,T_DATA_ACCESS               // Set failing data access code
  899 a64Redrive:
  900         lwz             r9,SAVflags(r13)                // Pick up the flags
  901         stw             r30,saveexception(r13)  // Set the replacement code
  902         oris    r9,r9,hi16(SAVredrive)  // Set the redrive bit
  903         stw             r9,SAVflags(r13)                // Set redrive request
  904         crclr   kTrace                                  // don't take a trace interrupt
  905         crclr   kNotify                                 // don't pass alignment exception
  906         b               a64Exit2                                // done
  907         
  908 
  909 // This is the branch table, indexed by the "AAAAB" opcode hash.
  910 
  911 a64BranchTable:
  912         .long   a64LwzLwzxLwarx         // 00000  lwz[u], lwzx[u], lwarx
  913         .long   a64Ldx                          // 00001  ldx[u]
  914         .long   a64PassAlong            // 00010  ldarx         (never emulate these)
  915         .long   a64PassAlong            // 00011
  916         .long   a64StwStwx                      // 00100  stw[u], stwx[u]
  917         .long   a64StdxStwcx            // 00101  stdx[u], stwcx
  918         .long   a64PassAlong            // 00110
  919         .long   a64PassAlong            // 00111  stdcx         (never emulate these)
  920         .long   a64LhzLhzx                      // 01000  lhz[u], lhzx[u]
  921         .long   a64PassAlong            // 01001
  922         .long   a64LhaLhax                      // 01010  lha[u], lhax[u]
  923         .long   a64Lwax                         // 01011  lwax[u]
  924         .long   a64SthSthx                      // 01100  sth[u], sthx[u]
  925         .long   a64PassAlong            // 01101
  926         .long   a64LmwStmw                      // 01110  lmw, stmw
  927         .long   a64PassAlong            // 01111
  928         .long   a64LfsLfsx                      // 10000  lfs[u], lfsx[u]
  929         .long   a64LswxLwbrx            // 10001  lswx, lwbrx
  930         .long   a64LfdLfdx                      // 10010  lfd[u], lfdx[u]
  931         .long   a64Lswi                         // 10011  lswi
  932         .long   a64StfsStfsx            // 10100  stfs[u], stfsx[u]
  933         .long   a64StswxStwbrx          // 10101  stswx, stwbrx
  934         .long   a64StfdStfdx            // 10110  stfd[u], stfdx[u]
  935         .long   a64Stswi                        // 10111  stswi
  936         .long   a64PassAlong            // 11000
  937         .long   a64Lhbrx                        // 11001  lhbrx
  938         .long   a64LdLwa                        // 11010  ld[u], lwa
  939         .long   a64PassAlong            // 11011
  940         .long   a64PassAlong            // 11100
  941         .long   a64Sthbrx                       // 11101  sthbrx
  942         .long   a64StdStfiwx            // 11110  std[u], stfiwx
  943         .long   a64DcbzDcbz128          // 11111  dcbz, dcbz128
  944 
  945 

Cache object: e53c24f8619368c0bd53f87221bcc085


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.