The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/Emulate64.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */                                                                                                                                                                                     
   22 
   23 /* Emulate64.s
   24  *
   25  * Software emulation of instructions not handled in hw, on 64-bit machines.
   26  */
   27  
   28 #include <sys/appleapiopts.h>
   29 #include <ppc/asm.h>
   30 #include <ppc/proc_reg.h>
   31 #include <ppc/exception.h>
   32 #include <mach/machine/vm_param.h>
   33 #include <ppc/cpu_capabilities.h>
   34 #include <assym.s>
   35 
   36 // CR bit set if the instruction is an "update" form (LFDU, STWU, etc):
   37 #define kUpdate 25
   38 
   39 // CR bit set if interrupt occured in trace mode (ie, MSR_SE_BIT):
   40 #define kTrace  8
   41 
   42 // CR bit set if notification on alignment interrupts is requested (notifyUnalignbit in spcFlags):
   43 #define kNotify 9
   44 
   45 // CR bit distinguishes between alignment and program exceptions:
   46 #define kAlignment      10
   47 
   48 
   49 
   50 // *************************************
   51 // * P R O G R A M   I N T E R R U P T *
   52 // *************************************
   53 //
   54 // These are floating pt exceptions, illegal instructions, privileged mode violations,
   55 // and traps.  All we're interested in at this low level is illegal instructions.
   56 // The ones we "emulate" are:
   57 //              DCBA,  which is not implemented in the IBM 970.  The emulation is to ignore it,
   58 //                         as it is just a hint.
   59 //              MCRXR, which is not implemented on the IBM 970, but is in the PPC ISA.
   60 //
   61 // Additionally, to facilitate debugging the alignment handler, we recognize a special
   62 // diagnostic mode that is used to simulate alignment exceptions.  When in this mode,
   63 // if the instruction has opcode==0 and the extended opcode is one of the X-form
   64 // instructions that can take an alignment interrupt, then we change the opcode to
   65 // 31 and pretend it got an alignment interrupt.  This exercises paths that
   66 // are hard to drive or perhaps never driven on this particular CPU.
   67 
   68         .text
   69         .globl  EXT(Emulate64)
   70         .align  5
   71 LEXT(Emulate64)
   72         crclr   kAlignment                                              // not an alignment exception
   73         b               a64AlignAssistJoin                              // join alignment handler
   74         
   75         
   76 // Return from alignment handler with all the regs loaded for opcode emulation.
   77         
   78 a64HandleProgramInt:
   79         rlwinm. r0,r29,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT      // illegal opcode?
   80         beq             a64PassAlong                                    // No, must have been trap or priv violation etc
   81         rlwinm  r3,r20,6,26,31                                  // right justify opcode field (bits 0-5)
   82         rlwinm  r4,r20,31,22,31                                 // right justify extended opcode field (bits 21-30)
   83         cmpwi   cr0,r3,31                                               // X-form?
   84         cmpwi   cr1,r4,758                                              // DCBA?
   85         cmpwi   cr4,r4,512                                              // MCRXR?
   86         crand   cr1_eq,cr0_eq,cr1_eq                    // merge the two tests for DCBA
   87         crand   cr4_eq,cr0_eq,cr4_eq                    // and for MCRXR
   88         beq++   cr1_eq,a64ExitEm                                // was DCBA, so ignore
   89         bne--   cr4_eq,a64NotEmulated                   // skip if not MCRXR
   90         
   91 // Was MCRXR, so emulate.
   92 
   93         ld              r3,savexer(r13)                                 // get the XER
   94         lwz             r4,savecr(r13)                                  // and the CR
   95         rlwinm  r5,r20,11,27,29                                 // get (CR# * 4) from instruction
   96         rlwinm  r6,r3,0,4,31                                    // zero XER[32-35] (also XER[0-31])
   97         sld             r4,r4,r5                                                // move target CR field to bits 32-35
   98         rlwimi  r4,r3,0,0,3                                             // move XER[32-35] into CR field
   99         stw             r6,savexer+4(r13)                               // update XER
  100         srd             r4,r4,r5                                                // re-position CR
  101         stw             r4,savecr(r13)                                  // update CR
  102         b               a64ExitEm                                               // done
  103 
  104 // Not an opcode we normally emulate.  If in special diagnostic mode and opcode=0,
  105 // emulate as an alignment exception.  This special case is for test software.
  106 
  107 a64NotEmulated:
  108         lwz             r30,dgFlags(0)                                  // Get the flags
  109         rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb  // Do we want to try to emulate something?
  110         beq++   a64PassAlong                                    // No emulation allowed
  111         cmpwi   r3,0                                                    // opcode==0 ?
  112         bne             a64PassAlong                                    // not the special case
  113         oris    r20,r20,0x7C00                                  // change opcode to 31
  114         crset   kAlignment                                              // say we took alignment exception
  115         rlwinm  r5,r4,0,26+1,26-1                               // mask Update bit (32) out of extended opcode
  116         rlwinm  r5,r5,0,0,31                                    // Clean out leftover junk from rlwinm
  117 
  118         cmpwi   r4,1014                                                 // dcbz/dcbz128 ?
  119         crmove  cr1_eq,cr0_eq
  120         cmpwi   r5,21                                                   // ldx/ldux ?
  121         cror    cr1_eq,cr0_eq,cr1_eq
  122         cmpwi   r5,599                                                  // lfdx/lfdux ?
  123         cror    cr1_eq,cr0_eq,cr1_eq
  124         cmpwi   r5,535                                                  // lfsx/lfsux ?
  125         cror    cr1_eq,cr0_eq,cr1_eq
  126         cmpwi   r5,343                                                  // lhax/lhaux ?
  127         cror    cr1_eq,cr0_eq,cr1_eq
  128         cmpwi   r4,790                                                  // lhbrx ?
  129         cror    cr1_eq,cr0_eq,cr1_eq
  130         cmpwi   r5,279                                                  // lhzx/lhzux ?
  131         cror    cr1_eq,cr0_eq,cr1_eq
  132         cmpwi   r4,597                                                  // lswi ?
  133         cror    cr1_eq,cr0_eq,cr1_eq
  134         cmpwi   r4,533                                                  // lswx ?
  135         cror    cr1_eq,cr0_eq,cr1_eq
  136         cmpwi   r5,341                                                  // lwax/lwaux ?
  137         cror    cr1_eq,cr0_eq,cr1_eq
  138         cmpwi   r4,534                                                  // lwbrx ?
  139         cror    cr1_eq,cr0_eq,cr1_eq
  140         cmpwi   r5,23                                                   // lwz/lwzx ?
  141         cror    cr1_eq,cr0_eq,cr1_eq
  142         cmpwi   r5,149                                                  // stdx/stdux ?
  143         cror    cr1_eq,cr0_eq,cr1_eq
  144         cmpwi   r5,727                                                  // stfdx/stfdux ?
  145         cror    cr1_eq,cr0_eq,cr1_eq
  146         cmpwi   r4,983                                                  // stfiwx ?
  147         cror    cr1_eq,cr0_eq,cr1_eq
  148         cmpwi   r5,663                                                  // stfsx/stfsux ?
  149         cror    cr1_eq,cr0_eq,cr1_eq
  150         cmpwi   r4,918                                                  // sthbrx ?
  151         cror    cr1_eq,cr0_eq,cr1_eq
  152         cmpwi   r5,407                                                  // sthx/sthux ?
  153         cror    cr1_eq,cr0_eq,cr1_eq
  154         cmpwi   r4,725                                                  // stswi ?
  155         cror    cr1_eq,cr0_eq,cr1_eq
  156         cmpwi   r4,661                                                  // stswx ?
  157         cror    cr1_eq,cr0_eq,cr1_eq
  158         cmpwi   r4,662                                                  // stwbrx ?
  159         cror    cr1_eq,cr0_eq,cr1_eq
  160         cmpwi   r5,151                                                  // stwx/stwux ?
  161         cror    cr1_eq,cr0_eq,cr1_eq
  162         
  163         beq++   cr1,a64GotInstruction                   // it was one of the X-forms we handle
  164         crclr   kAlignment                                              // revert to program interrupt
  165         b               a64PassAlong                                    // not recognized extended opcode
  166         
  167 
  168 // *****************************************
  169 // * A L I G N M E N T   I N T E R R U P T *
  170 // *****************************************
  171 //
  172 // We get here in exception context, ie with interrupts disabled, translation off, and
  173 // in 64-bit mode, with:
  174 //              r13 = save-area pointer, with general context already saved in it
  175 //              cr6 = feature flags
  176 // We preserve r13 and cr6.  Other GPRs and CRs, the LR and CTR are used.
  177 //
  178 // Current 64-bit processors (GPUL) handle almost all misaligned operations in hardware,
  179 // so this routine usually isn't called very often.  Only floating pt ops that cross a page
  180 // boundary and are not word aligned, and LMW/STMW can take exceptions to cacheable memory.
  181 // However, in contrast to G3 and G4, any misaligned load/store will get an alignment
  182 // interrupt on uncached memory.
  183 //
  184 // We always emulate scalar ops with a series of byte load/stores.  Doing so is no slower
  185 // than LWZ/STW in cases where a scalar op gets an alignment exception.
  186 //
  187 // This routine supports all legal permutations of alignment interrupts occuring in user or
  188 // supervisor mode, 32 or 64-bit addressing, and translation on or off.  We do not emulate
  189 // instructions that go past the end of an address space, such as "LHZ -1(0)"; we just pass
  190 // along the alignment exception rather than wrap around to byte 0.
  191 //
  192 // First, check for a few special cases such as virtual machines, etc.
  193 
  194         .globl  EXT(AlignAssist64)
  195         .align  5
  196 LEXT(AlignAssist64)
  197         crset   kAlignment                                                              // mark as alignment interrupt
  198 
  199 a64AlignAssistJoin:                                                                             // join here from program interrupt handler
  200         li              r0,0                                                                    // Get a 0
  201         mfsprg  r31,0                                                                   // get the per_proc data ptr
  202         mcrf    cr3,cr6                                                                 // save feature flags here...
  203         lwz             r21,spcFlags(r31)                                               // grab the special flags
  204         ld              r29,savesrr1(r13)                                               // get the MSR etc at the fault
  205         ld              r28,savesrr0(r13)                                               // get the EA of faulting instruction
  206         stw             r0,savemisc3(r13)                                               // Assume we will handle this ok
  207         mfmsr   r26                                                                             // save MSR at entry
  208         rlwinm. r0,r21,0,runningVMbit,runningVMbit              // Are we running a VM?
  209         lwz             r19,dgFlags(0)                                                  // Get the diagnostics flags
  210         bne--   a64PassAlong                                                    // yes, let the virtual machine monitor handle
  211 
  212 
  213 // Set up the MSR shadow regs.  We turn on FP in this routine, and usually set DR and RI
  214 // when accessing user space (the SLB is still set up with all the user space translations.)
  215 // However, if the interrupt occured in the kernel with DR off, we keep it off while
  216 // accessing the "target" address space.  If we set DR to access the target space, we also
  217 // set RI.  The RI bit tells the exception handlers to clear cr0 beq and return if we get an
  218 // exception accessing the user address space.  We are careful to test cr0 beq after every such
  219 // access.  We keep the following "shadows" of the MSR in global regs across this code:
  220 //              r25 = MSR at entry, plus FP and probably DR and RI (used to access target space)
  221 //              r26 = MSR at entry
  222 //              r27 = free
  223 //              r29 = SRR1 (ie, MSR at interrupt)
  224 // Note that EE and IR are always off, and SF is always on in this code.
  225 
  226                 rlwinm  r3,r29,0,MSR_DR_BIT,MSR_DR_BIT                  // was translation on at fault?
  227         rlwimi  r3,r3,32-MSR_RI_BIT+MSR_DR_BIT,MSR_RI_BIT,MSR_RI_BIT    // if DR was set, set RI too
  228         or              r25,r26,r3                                                              // assemble MSR to use accessing target space
  229         
  230 
  231 // Because the DSISR and DAR are either not set or are not to be trusted on some 64-bit
  232 // processors on an alignment interrupt, we must fetch the faulting instruction ourselves,
  233 // then decode/hash the opcode and reconstruct the EA manually.
  234 
  235         mtmsr   r25                                     // turn on FP and (if it was on at fault) DR and RI
  236         isync                                           // wait for it to happen
  237                 cmpw    r0,r0                           // turn on beq so we can check for DSIs
  238         lwz             r20,0(r28)                      // fetch faulting instruction, probably with DR on
  239         bne--   a64RedriveAsISI         // got a DSI trying to fetch it, pretend it was an ISI
  240         mtmsr   r26                                     // turn DR back off
  241         isync                                           // wait for it to happen
  242 
  243 
  244 // Set a few flags while we wait for the faulting instruction to arrive from cache.
  245 
  246         rlwinm. r0,r29,0,MSR_SE_BIT,MSR_SE_BIT                          // Were we single stepping?
  247                 stw             r20,savemisc2(r13)      // Save the instruction image in case we notify
  248         crnot   kTrace,cr0_eq
  249         rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb                      // Should we notify?
  250         crnot   kNotify,cr0_eq        
  251 
  252 
  253 // Hash the intruction into a 5-bit value "AAAAB" used to index the branch table, and a
  254 // 1-bit kUpdate flag, as follows:
  255 //  ¥ for X-form instructions (with primary opcode 31):
  256 //       the "AAAA" bits are bits 21-24 of the instruction
  257 //       the "B" bit is the XOR of bits 29 and 30
  258 //       the update bit is instruction bit 25
  259 //      ¥ for D and DS-form instructions (actually, any primary opcode except 31):
  260 //       the "AAAA" bits are bits 1-4 of the instruction
  261 //       the "B" bit is 0
  262 //       the update bit is instruction bit 5
  263 //
  264 // Just for fun (and perhaps a little speed on deep-pipe machines), we compute the hash,
  265 // update flag, and EA without branches and with ipc >= 2.
  266 //
  267 // When we "bctr" to the opcode-specific reoutine, the following are all set up:
  268 //              MSR = EE and IR off, SF and FP on
  269 //              r12 = full 64-bit EA (r17 is clamped EA)
  270 //              r13 = save-area pointer (physical)
  271 //              r14 = ptr to saver0 in save-area (ie, to base of GPRs)
  272 //              r15 = 0x00000000FFFFFFFF if 32-bit mode fault, 0xFFFFFFFFFFFFFFFF if 64
  273 //              r16 = RA * 8 (ie, reg# not reg value)
  274 //              r17 = EA, clamped to 32 bits if 32-bit mode fault (see also r12)
  275 //              r18 = (RA|0) (reg value)
  276 //              r19 = -1 if X-form, 0 if D-form
  277 //              r20 = faulting instruction
  278 //              r21 = RT * 8 (ie, reg# not reg value)
  279 //              r22 = addr(aaFPopTable)+(RT*32), ie ptr to floating pt table for target register
  280 //              r25 = MSR at entrance, probably with DR and RI set (for access to target space)
  281 //              r26 = MSR at entrance
  282 //              r27 = free
  283 //              r28 = SRR0 (ie, EA of faulting instruction)
  284 //              r29 = SRR1 (ie, MSR at fault)
  285 //              r30 = scratch, usually user data
  286 //              r31 = per-proc pointer
  287 //              cr2 = kTrace, kNotify, and kAlignment flags
  288 //      cr3 = saved copy of feature flags used in lowmem vector code
  289 //              cr6 = bits 24-27 of CR are bits 24-27 of opcode if X-form, or bits 4-5 and 00 if D-form
  290 //                        bit 25 is the kUpdate flag, set for update form instructions
  291 //              cr7 = bits 28-31 of CR are bits 28-31 of opcode if X-form, or 0 if D-form
  292 
  293 a64GotInstruction:                                      // here from program interrupt with instruction in r20
  294         rlwinm  r21,r20,6+6,20,25       // move the primary opcode (bits 0-6) to bits 20-25
  295         la              r14,saver0(r13)         // r14 <- base address of GPR registers
  296         xori    r19,r21,0x07C0          // iff primary opcode is 31, set r19 to 0
  297         rlwinm  r16,r20,16+3,24,28      // r16 <- RA*8
  298         subi    r19,r19,1                       // set bit 0 iff X-form (ie, if primary opcode is 31)
  299         rlwinm  r17,r20,21+3,24,28      // r17 <- RB*8 (if X-form)
  300         sradi   r19,r19,63                      // r19 <- -1 if X-form, 0 if D-form
  301         extsh   r22,r20                         // r22 <- displacement (if D-form)
  302 
  303         ldx             r23,r14,r17                     // get (RB), if any
  304         and             r15,r20,r19                     // instruction if X, 0 if D
  305         andc    r17,r21,r19                     // primary opcode in bits 20-25 if D, 0 if X
  306         ldx             r18,r14,r16                     // get (RA)
  307         subi    r24,r16,1                       // set bit 0 iff RA==0
  308         or              r21,r15,r17                     // r21 <- instruction if X, or bits 0-5 in bits 20-25 if D
  309         sradi   r24,r24,63                      // r24 <- -1 if RA==0, 0 otherwise
  310         rlwinm  r17,r21,32-4,25,28      // shift opcode bits 21-24 to 25-28 (hash "AAAA" bits)
  311         lis             r10,ha16(a64BranchTable)        // start to build up branch table address
  312         rlwimi  r17,r21,0,29,29         // move opcode bit 29 into hash as start of "B" bit
  313         rlwinm  r30,r21,1,29,29         // position opcode bit 30 in position 29
  314         and             r12,r23,r19                     // RB if X-form, 0 if D-form
  315         andc    r11,r22,r19                     // 0 if X-form, sign extended displacement if D-form
  316         xor             r17,r17,r30                     // bit 29 ("B") of hash is xor(bit29,bit30)
  317         addi    r10,r10,lo16(a64BranchTable)
  318         or              r12,r12,r11                     // r12 <- (RB) or displacement, as appropriate
  319         lwzx    r30,r10,r17                     // get address from branch table
  320         mtcrf   0x01,r21                        // move opcode bits 28-31 to CR7
  321         sradi   r15,r29,32                      // propogate SF bit from SRR1 (MSR_SF, which is bit 0)
  322         andc    r18,r18,r24                     // r18 <- (RA|0)
  323         mtcrf   0x02,r21                        // move opcode bits 24-27 to CR6 (kUpdate is bit 25)
  324         add             r12,r18,r12                     // r12 <- 64-bit EA
  325         mtctr   r30                                     // set up branch address
  326         
  327         oris    r15,r15,0xFFFF          // start to fill low word of r15 with 1s
  328         rlwinm  r21,r20,11+3,24,28      // r21 <- RT * 8
  329         lis             r22,ha16(EXT(aaFPopTable))      // start to compute address of floating pt table
  330         ori             r15,r15,0xFFFF          // now bits 32-63 of r15 are 1s
  331         addi    r22,r22,lo16(EXT(aaFPopTable))
  332         and             r17,r12,r15                     // clamp EA to 32 bits if fault occured in 32-bit mode
  333         rlwimi  r22,r21,2,22,26         // move RT into aaFPopTable address (which is 1KB aligned)
  334         
  335         bf--    kAlignment,a64HandleProgramInt  // return to Program Interrupt handler
  336         bctr                                            // if alignment interrupt, jump to opcode-specific routine
  337         
  338         
  339 // Floating-pt load single (lfs[u], lfsx[u])
  340 
  341 a64LfsLfsx:
  342         bl              a64Load4Bytes           // get data in r30
  343         mtctr   r22                                     // set up address of "lfs fRT,emfp0(r31)"
  344         stw             r30,emfp0(r31)          // put word here for aaFPopTable routine
  345         bctrl                                           // do the lfs
  346         b               a64UpdateCheck          // update RA if necessary and exit
  347         
  348         
  349 // Floating-pt store single (stfs[u], stfsx[u])
  350 
  351 a64StfsStfsx:
  352         ori             r22,r22,8                       // set dir==1 (ie, single store) in aaFPopTable
  353         mtctr   r22                                     // set up address of "stfs fRT,emfp0(r31)"
  354         bctrl                                           // execute the store into emfp0
  355         lwz             r30,emfp0(r31)          // get the word
  356         bl              a64Store4Bytes          // store r30 into user space
  357         b               a64UpdateCheck          // update RA if necessary and exit
  358         
  359 
  360 // Floating-pt store as integer word (stfiwx)
  361 
  362 a64Stfiwx:
  363         ori             r22,r22,16+8            // set size=1, dir==1 (ie, double store) in aaFPopTable
  364         mtctr   r22                                     // set up FP register table address
  365         bctrl                                           // double precision store into emfp0
  366         lwz             r30,emfp0+4(r31)        // get the low-order word
  367         bl              a64Store4Bytes          // store r30 into user space
  368         b               a64Exit                         // successfully emulated
  369         
  370 
  371 // Floating-pt load double (lfd[u], lfdx[u])
  372 
  373 a64LfdLfdx:
  374         ori             r22,r22,16                      // set Double bit in aaFPopTable address
  375         bl              a64Load8Bytes           // get data in r30
  376         mtctr   r22                                     // set up address of "lfd fRT,emfp0(r31)"
  377         std             r30,emfp0(r31)          // put doubleword here for aaFPopTable routine
  378         bctrl                                           // execute the load
  379         b               a64UpdateCheck          // update RA if necessary and exit
  380 
  381 
  382 // Floating-pt store double (stfd[u], stfdx[u])
  383 
  384 a64StfdStfdx:
  385         ori             r22,r22,16+8            // set size=1, dir==1 (ie, double store) in aaFPopTable address
  386         mtctr   r22                                     // address of routine to stfd RT
  387         bctrl                                           // store into emfp0
  388         ld              r30,emfp0(r31)          // get the doubleword
  389         bl              a64Store8Bytes          // store r30 into user space
  390         b               a64UpdateCheck          // update RA if necessary and exit
  391 
  392 
  393 // Load halfword w 0-fill (lhz[u], lhzx[u])
  394 
  395 a64LhzLhzx:
  396         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  397         stdx    r30,r14,r21                     // store into RT slot in register file
  398         b               a64UpdateCheck          // update RA if necessary and exit
  399 
  400 
  401 // Load halfword w sign fill (lha[u], lhax[u])
  402 
  403 a64LhaLhax:
  404         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  405         extsh   r30,r30                         // sign-extend
  406         stdx    r30,r14,r21                     // store into RT slot in register file
  407         b               a64UpdateCheck          // update RA if necessary and exit
  408 
  409 
  410 // Load halfword byte reversed (lhbrx)
  411 
  412 a64Lhbrx:
  413         bl              a64Load2Bytes           // load into r30 from user space (w 0-fill)
  414         rlwinm  r3,r30,8,16,23          // reverse bytes into r3
  415         rlwimi  r3,r30,24,24,31
  416         stdx    r3,r14,r21                      // store into RT slot in register file
  417         b               a64Exit                         // successfully emulated
  418 
  419 
  420 // Store halfword (sth[u], sthx[u])
  421 
  422 a64SthSthx:
  423         ldx             r30,r14,r21                     // get RT
  424         bl              a64Store2Bytes          // store r30 into user space
  425         b               a64UpdateCheck          // update RA if necessary and exit
  426 
  427 
  428 // Store halfword byte reversed (sthbrx)
  429 
  430 a64Sthbrx:
  431         addi    r21,r21,6                       // point to low two bytes of RT
  432         lhbrx   r30,r14,r21                     // load and reverse
  433         bl              a64Store2Bytes          // store r30 into user space
  434         b               a64Exit                         // successfully emulated
  435 
  436 
  437 // Load word w 0-fill (lwz[u], lwzx[u]), also lwarx.
  438 
  439 a64LwzLwzxLwarx:
  440         andc    r3,r19,r20                      // light bit 30 of r3 iff lwarx
  441         andi.   r0,r3,2                         // is it lwarx?
  442         bne--   a64PassAlong            // yes, never try to emulate a lwarx
  443         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  444         stdx    r30,r14,r21                     // update register file
  445         b               a64UpdateCheck          // update RA if necessary and exit
  446         
  447         
  448 // Load word w sign fill (lwa, lwax[u])
  449 
  450 a64Lwa:
  451         crclr   kUpdate                         // no update form of lwa (its a reserved encoding)
  452 a64Lwax:
  453         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  454         extsw   r30,r30                         // sign extend
  455         stdx    r30,r14,r21                     // update register file
  456         b               a64UpdateCheck          // update RA if necessary and exit
  457 
  458 
  459 // Load word byte reversed (lwbrx)
  460 
  461 a64Lwbrx:
  462         bl              a64Load4Bytes           // load 4 bytes from user space into r30 (0-filled)
  463         rlwinm  r3,r30,24,0,31          // flip bytes 1234 to 4123
  464         rlwimi  r3,r30,8,8,15           // r3 is now 4323
  465         rlwimi  r3,r30,8,24,31          // r3 is now 4321
  466         stdx    r3,r14,r21                      // update register file
  467         b               a64Exit                         // successfully emulated
  468 
  469         
  470 // Store word (stw[u], stwx[u])
  471 
  472 a64StwStwx:
  473         ldx             r30,r14,r21                     // get RT
  474         bl              a64Store4Bytes          // store r30 into user space
  475         b               a64UpdateCheck          // update RA if necessary and exit
  476 
  477 
  478 // Store word byte reversed (stwbrx)
  479 
  480 a64Stwbrx:
  481         addi    r21,r21,4                       // point to low word of RT
  482         lwbrx   r30,r14,r21                     // load and reverse
  483         bl              a64Store4Bytes          // store r30 into user space
  484         b               a64Exit                         // successfully emulated
  485 
  486 
  487 // Load doubleword (ld[u], ldx[u]), also lwa.
  488 
  489 a64LdLwa:                                                       // these are DS form: ld=0, ldu=1, and lwa=2
  490         mtcrf   0x01,r20                        // move DS field to cr7
  491         rlwinm  r3,r20,0,30,31          // must adjust EA by subtracting DS field
  492         sub             r12,r12,r3                      // subtract from full 64-bit EA
  493         and             r17,r12,r15                     // then re-clamp to 32 bits if necessary
  494         bt              30,a64Lwa                       // handle lwa
  495         crmove  kUpdate,31                      // if opcode bit 31 is set, it is ldu so set update flag
  496 a64Ldx:
  497         bl              a64Load8Bytes           // load 8 bytes from user space into r30
  498         stdx    r30,r14,r21                     // update register file
  499         b               a64UpdateCheck          // update RA if necessary and exit
  500 
  501 
  502 // Store doubleword (stdx[u], std[u], stwcx)
  503 
  504 a64StdxStwcx:
  505         bf--    30,a64PassAlong         // stwcx, so pass along alignment exception
  506         b               a64Stdx                         // was stdx
  507 a64StdStfiwx:                                           // if DS form: 0=std, 1=stdu, 2-3=undefined
  508         bt              30,a64Stfiwx            // handle stfiwx
  509         rlwinm  r3,r20,0,30,31          // must adjust EA by subtracting DS field
  510         mtcrf   0x01,r20                        // move DS field to cr7
  511         sub             r12,r12,r3                      // subtract from full 64-bit EA
  512         and             r17,r12,r15                     // then re-clamp to 32 bits if necessary
  513         crmove  kUpdate,31                      // if DS==1, then it is update form
  514 a64Stdx:
  515         ldx             r30,r14,r21                     // get RT
  516         bl              a64Store8Bytes          // store RT into user space
  517         b               a64UpdateCheck          // update RA if necessary and exit
  518 
  519 
  520 // Dcbz and Dcbz128 (bit 10 distinguishes the two forms)
  521 
  522 a64DcbzDcbz128:
  523         andis.  r0,r20,0x0020           // bit 10 set?
  524         li              r3,0                            // get a 0 to store
  525         li              r0,4                            // assume 32-bit version, store 8 bytes 4x
  526         rldicr  r17,r17,0,63-5          // 32-byte align EA
  527                 li              r4,_COMM_PAGE_BASE_ADDRESS
  528         beq             a64DcbzSetup            // it was the 32-byte version
  529         rldicr  r17,r17,0,63-7          // zero low 7 bits of EA
  530         li              r0,16                           // store 8 bytes 16x
  531 a64DcbzSetup:
  532                 sub             r4,r28,r4                       // get instruction offset from start of commpage
  533         and             r4,r4,r15                       // mask off high-order bits if 32-bit mode
  534                 cmpldi  r4,_COMM_PAGE_AREA_USED // did fault occur in commpage area?
  535         bge             a64NotCommpage          // not in commpage
  536         rlwinm. r4,r29,0,MSR_PR_BIT,MSR_PR_BIT  // did fault occur in user mode?
  537         beq--   a64NotCommpage          // do not zero cr7 if kernel got alignment exception
  538         lwz             r4,savecr(r13)          // if we take a dcbz{128} in the commpage...
  539         rlwinm  r4,r4,0,0,27            // ...clear user's cr7...
  540         stw             r4,savecr(r13)          // ...as a flag for commpage code
  541 a64NotCommpage:
  542         mtctr   r0
  543         cmpw    r0,r0                           // turn cr0 beq on so we can check for DSIs
  544         mtmsr   r25                                     // turn on DR and RI so we can address user space
  545         isync                                           // wait for it to happen
  546 a64DcbzLoop:
  547         std             r3,0(r17)                       // store into user space
  548         bne--   a64RedriveAsDSI
  549         addi    r17,r17,8
  550         bdnz    a64DcbzLoop
  551         
  552         mtmsr   r26                                     // restore MSR
  553         isync                                           // wait for it to happen
  554         b               a64Exit
  555 
  556 
  557 // Load and store multiple (lmw, stmw), distinguished by bit 25
  558 
  559 a64LmwStmw:
  560         subfic  r22,r21,32*8            // how many regs to load or store?
  561         srwi    r22,r22,1                       // get bytes to load/store
  562         bf              25,a64LoadMultiple      // handle lmw
  563         b               a64StoreMultiple        // it was stmw
  564         
  565         
  566 // Load string word immediate (lswi)
  567 
  568 a64Lswi:
  569         rlwinm  r22,r20,21,27,31        // get #bytes in r22
  570         and             r17,r18,r15                     // recompute EA as (RA|0), and clamp
  571         subi    r3,r22,1                        // r22==0?
  572         rlwimi  r22,r3,6,26,26          // map count of 0 to 32
  573         b               a64LoadMultiple
  574         
  575         
  576 // Store string word immediate (stswi)
  577 
  578 a64Stswi:
  579         rlwinm  r22,r20,21,27,31        // get #bytes in r22
  580         and             r17,r18,r15                     // recompute EA as (RA|0), and clamp
  581         subi    r3,r22,1                        // r22==0?
  582         rlwimi  r22,r3,6,26,26          // map count of 0 to 32
  583         b               a64StoreMultiple
  584         
  585         
  586 // Load string word indexed (lswx), also lwbrx
  587 
  588 a64LswxLwbrx:
  589         bf              30,a64Lwbrx                     // was lwbrx
  590         ld              r22,savexer(r13)        // get the xer
  591         rlwinm  r22,r22,0,25,31         // isolate the byte count
  592         b               a64LoadMultiple         // join common code
  593         
  594         
  595 // Store string word indexed (stswx), also stwbrx
  596 
  597 a64StswxStwbrx:
  598         bf              30,a64Stwbrx            // was stwbrx
  599         ld              r22,savexer(r13)        // get the xer
  600         rlwinm  r22,r22,0,25,31         // isolate the byte count
  601         b               a64StoreMultiple        // join common code
  602 
  603 
  604 // Load multiple words.  This handles lmw, lswi, and lswx.
  605 
  606 a64LoadMultiple:                                        // r22 = byte count, may be 0
  607         subic.  r3,r22,1                        // get (#bytes-1)
  608         blt             a64Exit                         // done if 0
  609         add             r4,r17,r3                       // get EA of last operand byte
  610         and             r4,r4,r15                       // clamp
  611         cmpld   r4,r17                          // address space wrap?
  612         blt--   a64PassAlong            // pass along exception if so
  613         srwi.   r4,r22,2                        // get # full words to load
  614         rlwinm  r22,r22,0,30,31         // r22 <- leftover byte count
  615         cmpwi   cr1,r22,0                       // leftover bytes?
  616         beq             a64Lm3                          // no words
  617         mtctr   r4                                      // set up word count
  618         cmpw    r0,r0                           // set beq for DSI test
  619 a64Lm2:
  620         mtmsr   r25                                     // turn on DR and RI
  621         isync                                           // wait for it to happen
  622         lbz             r3,0(r17)
  623         bne--   a64RedriveAsDSI         // got a DSI
  624         lbz             r4,1(r17)
  625         bne--   a64RedriveAsDSI         // got a DSI
  626         lbz             r5,2(r17)
  627         bne--   a64RedriveAsDSI         // got a DSI
  628         lbz             r6,3(r17)
  629         bne--   a64RedriveAsDSI         // got a DSI
  630         rlwinm  r30,r3,24,0,7           // pack bytes into r30
  631         rldimi  r30,r4,16,40
  632         rldimi  r30,r5,8,48
  633         rldimi  r30,r6,0,56
  634         mtmsr   r26                                     // turn DR back off so we can store into register file
  635         isync
  636         addi    r17,r17,4                       // bump EA
  637         stdx    r30,r14,r21                     // pack into register file
  638         addi    r21,r21,8                       // bump register file offset
  639         rlwinm  r21,r21,0,24,28         // wrap around to 0
  640         bdnz    a64Lm2
  641 a64Lm3:                                                         // cr1/r22 = leftover bytes (0-3), cr0 beq set
  642         beq             cr1,a64Exit                     // no leftover bytes
  643         mtctr   r22
  644         mtmsr   r25                                     // turn on DR so we can access user space
  645         isync
  646         lbz             r3,0(r17)                       // get 1st leftover byte
  647         bne--   a64RedriveAsDSI         // got a DSI
  648         rlwinm  r30,r3,24,0,7           // position in byte 4 of r30 (and clear rest of r30)
  649         bdz             a64Lm4                          // only 1 byte leftover
  650         lbz             r3,1(r17)                       // get 2nd byte
  651         bne--   a64RedriveAsDSI         // got a DSI
  652         rldimi  r30,r3,16,40            // insert into byte 5 of r30
  653         bdz             a64Lm4                          // only 2 bytes leftover
  654         lbz             r3,2(r17)                       // get 3rd byte
  655         bne--   a64RedriveAsDSI         // got a DSI
  656         rldimi  r30,r3,8,48                     // insert into byte 6
  657 a64Lm4:
  658         mtmsr   r26                                     // turn DR back off so we can store into register file
  659         isync
  660         stdx    r30,r14,r21                     // pack partially-filled word into register file
  661         b               a64Exit
  662 
  663 
  664 // Store multiple words.  This handles stmw, stswi, and stswx.
  665 
  666 a64StoreMultiple:                                       // r22 = byte count, may be 0
  667         subic.  r3,r22,1                        // get (#bytes-1)
  668         blt             a64Exit                         // done if 0
  669         add             r4,r17,r3                       // get EA of last operand byte
  670         and             r4,r4,r15                       // clamp
  671         cmpld   r4,r17                          // address space wrap?
  672         blt--   a64PassAlong            // pass along exception if so
  673         srwi.   r4,r22,2                        // get # full words to load
  674         rlwinm  r22,r22,0,30,31         // r22 <- leftover byte count
  675         cmpwi   cr1,r22,0                       // leftover bytes?
  676         beq             a64Sm3                          // no words
  677         mtctr   r4                                      // set up word count
  678         cmpw    r0,r0                           // turn on beq so we can check for DSIs
  679 a64Sm2:
  680         ldx             r30,r14,r21                     // get next register
  681         addi    r21,r21,8                       // bump register file offset
  682         rlwinm  r21,r21,0,24,28         // wrap around to 0
  683         srwi    r3,r30,24                       // shift the four bytes into position
  684         srwi    r4,r30,16
  685         srwi    r5,r30,8
  686         mtmsr   r25                                     // turn on DR so we can access user space
  687         isync                                           // wait for it to happen
  688         stb             r3,0(r17)
  689         bne--   a64RedriveAsDSI         // got a DSI
  690         stb             r4,1(r17)
  691         bne--   a64RedriveAsDSI         // got a DSI
  692         stb             r5,2(r17)
  693         bne--   a64RedriveAsDSI         // got a DSI
  694         stb             r30,3(r17)
  695         bne--   a64RedriveAsDSI         // got a DSI
  696         mtmsr   r26                                     // turn DR back off
  697         isync
  698         addi    r17,r17,4                       // bump EA
  699         bdnz    a64Sm2
  700 a64Sm3:                                                         // r22 = 0-3, cr1 set on r22, cr0 beq set
  701         beq             cr1,a64Exit                     // no leftover bytes
  702         ldx             r30,r14,r21                     // get last register
  703         mtctr   r22
  704         mtmsr   r25                                     // turn on DR so we can access user space
  705         isync                                           // wait for it to happen
  706 a64Sm4:
  707         rlwinm  r30,r30,8,0,31          // position next byte
  708         stb             r30,0(r17)                      // pack into user space
  709         addi    r17,r17,1                       // bump user space ptr
  710         bne--   a64RedriveAsDSI         // got a DSI
  711         bdnz    a64Sm4
  712         mtmsr   r26                                     // turn DR back off
  713         isync
  714         b               a64Exit
  715 
  716 
  717 // Subroutines to load bytes from user space.
  718 
  719 a64Load2Bytes:                                          // load 2 bytes right-justified into r30
  720         addi    r7,r17,1                        // get EA of last byte
  721         and             r7,r7,r15                       // clamp
  722         cmpld   r7,r17                          // address wrap?
  723         blt--   a64PassAlong            // yes
  724         mtmsr   r25                                     // turn on DR so we can access user space
  725         isync                                           // wait for it to happen
  726         sub.    r30,r30,r30                     // 0-fill dest and set beq
  727         b               a64Load2                        // jump into routine
  728 a64Load4Bytes:                                          // load 4 bytes right-justified into r30 (ie, low order word)
  729         addi    r7,r17,3                        // get EA of last byte
  730         and             r7,r7,r15                       // clamp
  731         cmpld   r7,r17                          // address wrap?
  732         blt--   a64PassAlong            // yes
  733         mtmsr   r25                                     // turn on DR so we can access user space
  734         isync                                           // wait for it to happen
  735         sub.    r30,r30,r30                     // 0-fill dest and set beq
  736         b               a64Load4                        // jump into routine
  737 a64Load8Bytes:                                          // load 8 bytes into r30
  738         addi    r7,r17,7                        // get EA of last byte
  739         and             r7,r7,r15                       // clamp
  740         cmpld   r7,r17                          // address wrap?
  741         blt--   a64PassAlong            // yes
  742         mtmsr   r25                                     // turn on DR so we can access user space
  743         isync                                           // wait for it to happen
  744         sub.    r30,r30,r30                     // 0-fill dest and set beq
  745         lbz             r3,-7(r7)                       // get byte 0
  746         bne--   a64RedriveAsDSI         // got a DSI
  747         lbz             r4,-6(r7)                       // and byte 1, etc
  748         bne--   a64RedriveAsDSI         // got a DSI
  749         lbz             r5,-5(r7)
  750         bne--   a64RedriveAsDSI         // got a DSI
  751         lbz             r6,-4(r7)
  752         bne--   a64RedriveAsDSI         // got a DSI
  753         rldimi  r30,r3,56,0                     // position bytes in upper word
  754         rldimi  r30,r4,48,8
  755         rldimi  r30,r5,40,16
  756         rldimi  r30,r6,32,24
  757 a64Load4:
  758         lbz             r3,-3(r7)
  759         bne--   a64RedriveAsDSI         // got a DSI
  760         lbz             r4,-2(r7)
  761         bne--   a64RedriveAsDSI         // got a DSI
  762         rldimi  r30,r3,24,32            // insert bytes 4 and 5 into r30
  763         rldimi  r30,r4,16,40
  764 a64Load2:
  765         lbz             r3,-1(r7)
  766         bne--   a64RedriveAsDSI         // got a DSI
  767         lbz             r4,0(r7)
  768         bne--   a64RedriveAsDSI         // got a DSI
  769         mtmsr   r26                                     // turn DR back off
  770         isync
  771         rldimi  r30,r3,8,48                     // insert bytes 6 and 7 into r30
  772         rldimi  r30,r4,0,56
  773         blr
  774         
  775         
  776 // Subroutines to store bytes into user space.
  777 
  778 a64Store2Bytes:                                         // store bytes 6 and 7 of r30
  779         addi    r7,r17,1                        // get EA of last byte
  780         and             r7,r7,r15                       // clamp
  781         cmpld   r7,r17                          // address wrap?
  782         blt--   a64PassAlong            // yes
  783         mtmsr   r25                                     // turn on DR so we can access user space
  784         isync                                           // wait for it to happen
  785         cmpw    r0,r0                           // set beq so we can check for DSI
  786         b               a64Store2                       // jump into routine
  787 a64Store4Bytes:                                         // store bytes 4-7 of r30 (ie, low order word)
  788         addi    r7,r17,3                        // get EA of last byte
  789         and             r7,r7,r15                       // clamp
  790         cmpld   r7,r17                          // address wrap?
  791         blt--   a64PassAlong            // yes
  792         mtmsr   r25                                     // turn on DR so we can access user space
  793         isync                                           // wait for it to happen
  794         cmpw    r0,r0                           // set beq so we can check for DSI
  795         b               a64Store4                       // jump into routine
  796 a64Store8Bytes:                                         // r30 = bytes
  797         addi    r7,r17,7                        // get EA of last byte
  798         and             r7,r7,r15                       // clamp
  799         cmpld   r7,r17                          // address wrap?
  800         blt--   a64PassAlong            // yes
  801         mtmsr   r25                                     // turn on DR so we can access user space
  802         isync                                           // wait for it to happen
  803         cmpw    r0,r0                           // set beq so we can check for DSI
  804         rotldi  r3,r30,8                        // shift byte 0 into position
  805         rotldi  r4,r30,16                       // and byte 1
  806         rotldi  r5,r30,24                       // and byte 2
  807         rotldi  r6,r30,32                       // and byte 3
  808         stb             r3,-7(r7)                       // store byte 0
  809         bne--   a64RedriveAsDSI         // got a DSI
  810         stb             r4,-6(r7)                       // and byte 1 etc...
  811         bne--   a64RedriveAsDSI         // got a DSI
  812         stb             r5,-5(r7)
  813         bne--   a64RedriveAsDSI         // got a DSI
  814         stb             r6,-4(r7)
  815         bne--   a64RedriveAsDSI         // got a DSI
  816 a64Store4:
  817         rotldi  r3,r30,40                       // shift byte 4 into position
  818         rotldi  r4,r30,48                       // and byte 5
  819         stb             r3,-3(r7)
  820         bne--   a64RedriveAsDSI         // got a DSI
  821         stb             r4,-2(r7)
  822         bne--   a64RedriveAsDSI         // got a DSI
  823 a64Store2:
  824         rotldi  r3,r30,56                       // shift byte 6 into position
  825         stb             r3,-1(r7)                       // store byte 6
  826         bne--   a64RedriveAsDSI         // got a DSI
  827         stb             r30,0(r7)                       // store byte 7, which is already positioned
  828         bne--   a64RedriveAsDSI         // got a DSI
  829         mtmsr   r26                                     // turn off DR
  830         isync
  831         blr
  832         
  833                 
  834 // Exit routines.
  835 
  836 a64ExitEm:
  837                 li              r30,T_EMULATE                   // Change exception code to emulate
  838                 stw             r30,saveexception(r13)  // Save it
  839                 b               a64Exit                                 // Join standard exit routine...
  840 
  841 a64PassAlong:                                                   // unhandled exception, just pass it along
  842         li              r0,1                                    // Set that the alignment/program exception was not emulated
  843         crset   kNotify                                 // return T_ALIGNMENT or T_PROGRAM
  844                 stw             r0,savemisc3(r13)               // Set that emulation was not done
  845         crclr   kTrace                                  // not a trace interrupt
  846         b               a64Exit1
  847 a64UpdateCheck:                                                 // successfully emulated, may be update form
  848         bf              kUpdate,a64Exit                 // update?
  849         stdx    r12,r14,r16                             // yes, store 64-bit EA into RA
  850 a64Exit:                                                                // instruction successfully emulated
  851         addi    r28,r28,4                               // bump SRR0 past the emulated instruction
  852         li              r30,T_IN_VAIN                   // eat the interrupt since we emulated it
  853         and             r28,r28,r15                             // clamp to address space size (32 vs 64)
  854         std             r28,savesrr0(r13)               // save, so we return to next instruction
  855 a64Exit1:
  856         bt--    kTrace,a64Trace                 // were we in single-step at fault?
  857         bt--    kNotify,a64Notify               // should we say T_ALIGNMENT anyway?
  858 a64Exit2:
  859         mcrf    cr6,cr3                                 // restore feature flags
  860         mr              r11,r30                                 // pass back exception code (T_IN_VAIN etc) in r11
  861         b               EXT(EmulExit)                   // return to exception processing
  862 
  863 
  864 // Notification requested: pass exception upstairs even though it might have been emulated.
  865 
  866 a64Notify:
  867         li              r30,T_ALIGNMENT                 // somebody wants to know about it (but don't redrive)
  868         bt              kAlignment,a64Exit2             // was an alignment exception
  869         li              r30,T_PROGRAM                   // was an emulated instruction
  870         b               a64Exit2
  871 
  872 
  873 // Emulate a trace interrupt after handling alignment interrupt.
  874 
  875 a64Trace:
  876         lwz             r9,SAVflags(r13)                // get the save-area flags
  877         li              r30,T_TRACE
  878         oris    r9,r9,hi16(SAVredrive)  // Set the redrive bit
  879         stw             r30,saveexception(r13)  // Set the exception code
  880         stw             r9,SAVflags(r13)                // Set the flags
  881         b               a64Exit2                                // Exit and do trace interrupt...
  882 
  883 
  884 // Got a DSI accessing user space.  Redrive.  One way this can happen is if another
  885 // processor removes a mapping while we are emulating.
  886 
  887 a64RedriveAsISI:                                                // this DSI happened fetching the opcode (r1==DSISR  r4==DAR)
  888         mtmsr   r26                                             // turn DR back off
  889         isync                                                   // wait for it to happen
  890         li              r30,T_INSTRUCTION_ACCESS
  891         rlwimi  r29,r1,0,0,4                    // insert the fault type from DSI's DSISR
  892         std             r29,savesrr1(r13)               // update SRR1 to look like an ISI
  893         b               a64Redrive
  894 
  895 a64RedriveAsDSI:                                                // r0==DAR  r1==DSISR
  896         mtmsr   r26                                             // turn DR back off
  897         isync                                                   // wait for it to happen
  898         stw             r1,savedsisr(r13)               // Set the DSISR of failed access
  899         std             r0,savedar(r13)                 // Set the address of the failed access
  900         li              r30,T_DATA_ACCESS               // Set failing data access code
  901 a64Redrive:
  902         lwz             r9,SAVflags(r13)                // Pick up the flags
  903         stw             r30,saveexception(r13)  // Set the replacement code
  904         oris    r9,r9,hi16(SAVredrive)  // Set the redrive bit
  905         stw             r9,SAVflags(r13)                // Set redrive request
  906         crclr   kTrace                                  // don't take a trace interrupt
  907         crclr   kNotify                                 // don't pass alignment exception
  908         b               a64Exit2                                // done
  909         
  910 
  911 // This is the branch table, indexed by the "AAAAB" opcode hash.
  912 
  913 a64BranchTable:
  914         .long   a64LwzLwzxLwarx         // 00000  lwz[u], lwzx[u], lwarx
  915         .long   a64Ldx                          // 00001  ldx[u]
  916         .long   a64PassAlong            // 00010  ldarx         (never emulate these)
  917         .long   a64PassAlong            // 00011
  918         .long   a64StwStwx                      // 00100  stw[u], stwx[u]
  919         .long   a64StdxStwcx            // 00101  stdx[u], stwcx
  920         .long   a64PassAlong            // 00110
  921         .long   a64PassAlong            // 00111  stdcx         (never emulate these)
  922         .long   a64LhzLhzx                      // 01000  lhz[u], lhzx[u]
  923         .long   a64PassAlong            // 01001
  924         .long   a64LhaLhax                      // 01010  lha[u], lhax[u]
  925         .long   a64Lwax                         // 01011  lwax[u]
  926         .long   a64SthSthx                      // 01100  sth[u], sthx[u]
  927         .long   a64PassAlong            // 01101
  928         .long   a64LmwStmw                      // 01110  lmw, stmw
  929         .long   a64PassAlong            // 01111
  930         .long   a64LfsLfsx                      // 10000  lfs[u], lfsx[u]
  931         .long   a64LswxLwbrx            // 10001  lswx, lwbrx
  932         .long   a64LfdLfdx                      // 10010  lfd[u], lfdx[u]
  933         .long   a64Lswi                         // 10011  lswi
  934         .long   a64StfsStfsx            // 10100  stfs[u], stfsx[u]
  935         .long   a64StswxStwbrx          // 10101  stswx, stwbrx
  936         .long   a64StfdStfdx            // 10110  stfd[u], stfdx[u]
  937         .long   a64Stswi                        // 10111  stswi
  938         .long   a64PassAlong            // 11000
  939         .long   a64Lhbrx                        // 11001  lhbrx
  940         .long   a64LdLwa                        // 11010  ld[u], lwa
  941         .long   a64PassAlong            // 11011
  942         .long   a64PassAlong            // 11100
  943         .long   a64Sthbrx                       // 11101  sthbrx
  944         .long   a64StdStfiwx            // 11110  std[u], stfiwx
  945         .long   a64DcbzDcbz128          // 11111  dcbz, dcbz128
  946 
  947 

Cache object: b78860e93f621aa22a246f60c9ee8cf9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.