The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/ppc/hw_lock.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * The contents of this file constitute Original Code as defined in and
    7  * are subject to the Apple Public Source License Version 1.1 (the
    8  * "License").  You may not use this file except in compliance with the
    9  * License.  Please obtain a copy of the License at
   10  * http://www.apple.com/publicsource and read it before using this file.
   11  * 
   12  * This Original Code and all software distributed under the License are
   13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
   17  * License for the specific language governing rights and limitations
   18  * under the License.
   19  * 
   20  * @APPLE_LICENSE_HEADER_END@
   21  */
   22 
   23 #include <mach_assert.h>
   24 #include <mach_ldebug.h>
   25 #include <ppc/asm.h>
   26 #include <ppc/proc_reg.h>
   27 #include <assym.s>
   28 
   29 #define STRING  ascii
   30 
   31 #define ILK_LOCKED              0x01
   32 #define WAIT_FLAG               0x02
   33 #define WANT_UPGRADE    0x04
   34 #define WANT_EXCL               0x08
   35 
   36 #define TH_FN_OWNED             0x01
   37 
   38 # volatile CR bits
   39 #define hwtimeout       20
   40 #define mlckmiss        21
   41 
   42 #define RW_DATA         0
   43 
   44 #define PROLOG(space)                                                                                                           \
   45                         stwu    r1,-(FM_ALIGN(space)+FM_SIZE)(r1)                       __ASMNL__       \
   46                         mfcr    r2                                                                                      __ASMNL__       \
   47                         mflr    r0                                                                                      __ASMNL__       \
   48                         stw             r3,FM_ARG0(r1)                                                          __ASMNL__       \
   49                         stw             r11,FM_ARG0+0x04(r1)                                            __ASMNL__       \
   50                         stw             r2,(FM_ALIGN(space)+FM_SIZE+FM_CR_SAVE)(r1)     __ASMNL__       \
   51                         stw             r0,(FM_ALIGN(space)+FM_SIZE+FM_LR_SAVE)(r1)     __ASMNL__
   52         
   53 #define EPILOG                                                                                                                                  \
   54                         lwz             r1,0(r1)                                                                                __ASMNL__       \
   55                         lwz             r0,FM_LR_SAVE(r1)                                                               __ASMNL__       \
   56                         mtlr    r0                                                                                              __ASMNL__
   57 
   58 /*
   59  *              void hw_lock_init(hw_lock_t)
   60  *
   61  *                      Initialize a hardware lock.
   62  */
   63                         .align  5
   64                         .globl  EXT(hw_lock_init)
   65 
   66 LEXT(hw_lock_init)
   67 
   68                         li      r0,     0                                                               ; set lock to free == 0 
   69                         stw     r0,     0(r3)                                                   ; Initialize the lock 
   70                         blr
   71         
   72 /*
   73  *              unsigned int hw_lock_bit(hw_lock_t, unsigned int bit, unsigned int timeout)
   74  *
   75  *                      Try to acquire spin-lock. The second parameter is the bit mask to test and set.
   76  *                      multiple bits may be set. Return success (1) or failure (0).
   77  *                      Attempt will fail after timeout ticks of the timebase.
   78  */
   79                         .align  5
   80                         .globl  EXT(hw_lock_bit)
   81 
   82 LEXT(hw_lock_bit)
   83 
   84                         crset   hwtimeout                                               ; timeout option
   85                         mr              r12,r4                                                  ; Load bit mask
   86                         mr              r4,r5                                                   ; Load timeout value
   87                         b               lckcomm                                                 ; Join on up...
   88 
   89 /*
   90  *      void hw_lock_lock(hw_lock_t)
   91  *
   92  *                      Acquire lock, spinning until it becomes available.
   93  *                      Return with preemption disabled.
   94  *                      We will just set a default timeout and jump into the NORMAL timeout lock.
   95  */
   96                         .align  5
   97                         .globl  EXT(hw_lock_lock)
   98 
   99 LEXT(hw_lock_lock)
  100                         crclr   hwtimeout                                               ; no timeout option
  101                         li              r4,0                                                    ; request default timeout value
  102                         li              r12,ILK_LOCKED                                  ; Load bit mask
  103                         b               lckcomm                                                 ; Join on up...
  104 
  105 lockDisa:
  106                         crset   hwtimeout                                               ; timeout option
  107                         li              r4,0                                                    ; request default timeout value
  108                         li              r12,ILK_LOCKED                                  ; Load bit mask
  109                         b               lckcomm                                                 ; Join on up...
  110 
  111 /*
  112  *              unsigned int hw_lock_to(hw_lock_t, unsigned int timeout)
  113  *
  114  *                      Try to acquire spin-lock. Return success (1) or failure (0).
  115  *                      Attempt will fail after timeout ticks of the timebase.
  116  *                      We try fairly hard to get this lock.  We disable for interruptions, but
  117  *                      reenable after a "short" timeout (128 ticks, we may want to change this).
  118  *                      After checking to see if the large timeout value (passed in) has expired and a
  119  *                      sufficient number of cycles have gone by (to insure pending 'rupts are taken),
  120  *                      we return either in abject failure, or disable and go back to the lock sniff routine.
  121  *                      If the sniffer finds the lock free, it jumps right up and tries to grab it.
  122  */
  123                         .align  5
  124                         .globl  EXT(hw_lock_to)
  125 
  126 LEXT(hw_lock_to)
  127                         crset   hwtimeout                                               ; timeout option
  128                         li              r12,ILK_LOCKED                                  ; Load bit mask
  129 lckcomm:
  130                         mfsprg  r6,1                                                    ; Get the current activation 
  131                         lwz             r5,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
  132                         addi    r5,r5,1                                                 ; Bring up the disable count
  133                         stw             r5,ACT_PREEMPT_CNT(r6)                  ; Save it back 
  134                         mr              r5,r3                                                   ; Get the address of the lock
  135                         li              r8,0                                                    ; Set r8 to zero
  136 
  137 lcktry:         lwarx   r6,0,r5                                                 ; Grab the lock value
  138                         and.    r3,r6,r12                                               ; Is it locked?
  139                         or              r6,r6,r12                                               ; Set interlock 
  140                         bne--   lckspin                                                 ; Yeah, wait for it to clear...
  141                         stwcx.  r6,0,r5                                                 ; Try to seize that there durn lock
  142                         bne--   lcktry                                                  ; Couldn't get it...
  143                         li              r3,1                                                    ; return true 
  144                         .globl  EXT(hwllckPatch_isync)
  145 LEXT(hwllckPatch_isync)   
  146                         isync                                                                   ; Make sure we don't use a speculativily loaded value
  147                         blr                                                                             ; Go on home...
  148 
  149 lckspin:        li              r6,lgKillResv                                   ; Get killing field     
  150                         stwcx.  r6,0,r6                                                 ; Kill reservation
  151                         
  152                         mr.             r4,r4                                                   ; Test timeout value
  153                         bne++   lockspin0
  154                         lis             r4,hi16(EXT(LockTimeOut))               ; Get the high part 
  155                         ori             r4,r4,lo16(EXT(LockTimeOut))    ; And the low part
  156                         lwz             r4,0(r4)                                                ; Get the timeout value
  157 lockspin0:
  158                         mr.             r8,r8                                                   ; Is r8 set to zero
  159                         bne++   lockspin1                                               ; If yes, first spin attempt
  160                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
  161                         mfmsr   r9                                                              ; Get the MSR value
  162                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
  163                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
  164                         andc    r9,r9,r0                                                ; Clear FP and VEC
  165                         andc    r7,r9,r7                                                ; Clear EE as well
  166                         mtmsr   r7                                                              ; Turn off interruptions 
  167                         isync                                                                   ; May have turned off vec and fp here 
  168                         mftb    r8                                                              ; Get timestamp on entry
  169                         b               lcksniff
  170 
  171 lockspin1:      mtmsr   r7                                                              ; Turn off interruptions 
  172                         mftb    r8                                                              ; Get timestamp on entry
  173 
  174 lcksniff:       lwz             r3,0(r5)                                                ; Get that lock in here
  175                         and.    r3,r3,r12                                               ; Is it free yet?
  176                         beq++   lckretry                                                ; Yeah, try for it again...
  177                         
  178                         mftb    r10                                                             ; Time stamp us now
  179                         sub             r10,r10,r8                                              ; Get the elapsed time
  180                         cmplwi  r10,128                                                 ; Have we been spinning for 128 tb ticks?
  181                         blt++   lcksniff                                                ; Not yet...
  182                         
  183                         mtmsr   r9                                                              ; Say, any interrupts pending?
  184 
  185 ;                       The following instructions force the pipeline to be interlocked to that only one
  186 ;                       instruction is issued per cycle.  The insures that we stay enabled for a long enough
  187 ;                       time; if it's too short, pending interruptions will not have a chance to be taken
  188 
  189                         subi    r4,r4,128                                               ; Back off elapsed time from timeout value
  190                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
  191                         mr.             r4,r4                                                   ; See if we used the whole timeout
  192                         li              r3,0                                                    ; Assume a timeout return code
  193                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
  194                         
  195                         ble--   lckfail                                                 ; We failed
  196                         b               lockspin1                                               ; Now that we've opened an enable window, keep trying...
  197 lckretry:
  198                         mtmsr   r9                                                              ; Restore interrupt state
  199                         li              r8,1                                                    ; Insure that R8 is not 0
  200                         b               lcktry
  201 lckfail:                                                                                        ; We couldn't get the lock
  202                         bf              hwtimeout,lckpanic
  203                         li              r3,0                                                    ; Set failure return code
  204                         blr                                                                             ; Return, head hanging low...
  205 lckpanic:
  206                         mr              r4,r5
  207                         mr              r5,r3
  208                         lis             r3,hi16(lckpanic_str)                   ; Get the failed lck message
  209                         ori             r3,r3,lo16(lckpanic_str)                ; Get the failed lck message
  210                         bl              EXT(panic)
  211                         BREAKPOINT_TRAP                                                 ; We die here anyway
  212                         .data
  213 lckpanic_str:
  214                         STRINGD "timeout on attempt to acquire lock (0x%08X), value = 0x%08X\n\000"
  215                         .text
  216 
  217 /*
  218  *      void hw_lock_unlock(hw_lock_t)
  219  *
  220  *      Unconditionally release lock.
  221  *      Release preemption level.
  222  */
  223                         .align  5
  224                         .globl  EXT(hw_lock_unlock)
  225 
  226 LEXT(hw_lock_unlock)
  227 
  228                         .globl  EXT(hwulckPatch_isync)
  229 LEXT(hwulckPatch_isync)   
  230                         isync 
  231                         .globl  EXT(hwulckPatch_eieio)
  232 LEXT(hwulckPatch_eieio)
  233                         eieio
  234                         li      r0,     0                                                               ; set lock to free
  235                         stw     r0,     0(r3)
  236 
  237                         b               epStart                                                 ; Go enable preemption...
  238 
  239 /*
  240  *              unsigned int hw_unlock_bit(hw_lock_t, unsigned int bit)
  241  *
  242  *                      Release bit based spin-lock. The second parameter is the bit mask to clear.
  243  *                      Multiple bits may be cleared.
  244  *
  245  */
  246                         .align  5
  247                         .globl  EXT(hw_unlock_bit)
  248 
  249 LEXT(hw_unlock_bit)
  250 
  251                         .globl  EXT(hwulckbPatch_isync)
  252 LEXT(hwulckbPatch_isync)   
  253                         isync 
  254                         .globl  EXT(hwulckbPatch_eieio)
  255 LEXT(hwulckbPatch_eieio)
  256                         eieio
  257 ubittry:        lwarx   r0,0,r3                                                 ; Grab the lock value
  258                         andc    r0,r0,r4                                                ; Clear the lock bits
  259                         stwcx.  r0,0,r3                                                 ; Try to clear that there durn lock
  260                         bne-    ubittry                                                 ; Try again, couldn't save it...
  261 
  262                         b               epStart                                                 ; Go enable preemption...
  263 
  264 /*
  265  *              unsigned int hw_lock_mbits(hw_lock_t, unsigned int bits, unsigned int value, 
  266  *                      unsigned int newb, unsigned int timeout)
  267  *
  268  *                      Try to acquire spin-lock. The second parameter is the bit mask to check.
  269  *                      The third is the value of those bits and the 4th is what to set them to.
  270  *                      Return success (1) or failure (0).
  271  *                      Attempt will fail after timeout ticks of the timebase.
  272  *                      We try fairly hard to get this lock.  We disable for interruptions, but
  273  *                      reenable after a "short" timeout (128 ticks, we may want to shorten this).
  274  *                      After checking to see if the large timeout value (passed in) has expired and a
  275  *                      sufficient number of cycles have gone by (to insure pending 'rupts are taken),
  276  *                      we return either in abject failure, or disable and go back to the lock sniff routine.
  277  *                      If the sniffer finds the lock free, it jumps right up and tries to grab it.
  278  */
  279                         .align  5
  280                         .globl  EXT(hw_lock_mbits)
  281 
  282 LEXT(hw_lock_mbits)
  283 
  284                         li              r10,0                   
  285 
  286 mbittry:        lwarx   r12,0,r3                                                ; Grab the lock value
  287                         and             r0,r12,r4                                               ; Clear extra bits
  288                         andc    r12,r12,r4                                              ; Clear all bits in the bit mask
  289                         or              r12,r12,r6                                              ; Turn on the lock bits
  290                         cmplw   r0,r5                                                   ; Are these the right bits?
  291                         bne--   mbitspin                                                ; Nope, wait for it to clear...
  292                         stwcx.  r12,0,r3                                                ; Try to seize that there durn lock
  293                         beq++   mbitgot                                                 ; We got it, yahoo...
  294                         b               mbittry                                                 ; Just start up again if the store failed...
  295 
  296                         .align  5
  297 mbitspin:       li              r11,lgKillResv                                  ; Point to killing field
  298                         stwcx.  r11,0,r11                                               ; Kill it
  299                         
  300                         mr.             r10,r10                                                 ; Is r10 set to zero
  301                         bne++   mbitspin0                                               ; If yes, first spin attempt
  302                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
  303                         mfmsr   r9                                                              ; Get the MSR value
  304                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
  305                         ori             r8,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
  306                         andc    r9,r9,r0                                                ; Clear FP and VEC
  307                         andc    r8,r9,r8                                                ; Clear EE as well
  308                         mtmsr   r8                                                              ; Turn off interruptions
  309                         isync                                                                   ; May have turned off vectors or float here
  310                         mftb    r10                                                             ; Get the low part of the time base
  311                         b               mbitsniff
  312 mbitspin0:
  313                         mtmsr   r8                                                              ; Turn off interruptions
  314                         mftb    r10                                                             ; Get the low part of the time base
  315 mbitsniff:
  316                         lwz             r12,0(r3)                                               ; Get that lock in here
  317                         and             r0,r12,r4                                               ; Clear extra bits
  318                         cmplw   r0,r5                                                   ; Are these the right bits?
  319                         beq++   mbitretry                                               ; Yeah, try for it again...
  320                         
  321                         mftb    r11                                                             ; Time stamp us now
  322                         sub             r11,r11,r10                                             ; Get the elapsed time
  323                         cmplwi  r11,128                                                 ; Have we been spinning for 128 tb ticks?
  324                         blt++   mbitsniff                                               ; Not yet...
  325                         
  326                         mtmsr   r9                                                              ; Say, any interrupts pending?                  
  327 
  328 ;                       The following instructions force the pipeline to be interlocked to that only one
  329 ;                       instruction is issued per cycle.  The insures that we stay enabled for a long enough
  330 ;                       time. If it is too short, pending interruptions will not have a chance to be taken 
  331                         
  332                         subi    r7,r7,128                                               ; Back off elapsed time from timeout value
  333                         or              r7,r7,r7                                                ; Do nothing here but force a single cycle delay
  334                         mr.             r7,r7                                                   ; See if we used the whole timeout
  335                         or              r7,r7,r7                                                ; Do nothing here but force a single cycle delay
  336                         
  337                         ble--   mbitfail                                                ; We failed
  338                         b               mbitspin0                                               ; Now that we have opened an enable window, keep trying...
  339 mbitretry:
  340                         mtmsr   r9                                                              ; Enable for interruptions
  341                         li              r10,1                                                   ; Make sure this is non-zero
  342                         b               mbittry
  343 
  344                         .align  5
  345 mbitgot:        
  346                         li              r3,1                                                    ; Set good return code
  347                         .globl  EXT(hwlmlckPatch_isync)
  348 LEXT(hwlmlckPatch_isync)   
  349                         isync                                                                   ; Make sure we do not use a speculativily loaded value
  350                         blr
  351 
  352 mbitfail:       li              r3,0                                                    ; Set failure return code
  353                         blr                                                                             ; Return, head hanging low...
  354 
  355 /*
  356  *      unsigned int hw_cpu_sync(unsigned int *, unsigned int timeout)
  357  *
  358  *                      Spin until word hits 0 or timeout. 
  359  *                      Return success (1) or failure (0).
  360  *                      Attempt will fail after timeout ticks of the timebase.
  361  *
  362  *                      The theory is that a processor will bump a counter as it signals
  363  *                      other processors.  Then it will spin untl the counter hits 0 (or
  364  *                      times out).  The other processors, as it receives the signal will 
  365  *                      decrement the counter.
  366  *
  367  *                      The other processors use interlocked update to decrement, this one
  368  *                      does not need to interlock.
  369  */
  370                         .align  5
  371                         .globl  EXT(hw_cpu_sync)
  372 
  373 LEXT(hw_cpu_sync)
  374 
  375                         mftb    r10                                                             ; Get the low part of the time base
  376                         mr              r9,r3                                                   ; Save the sync word address
  377                         li              r3,1                                                    ; Assume we work
  378 
  379 csynctry:       lwz             r11,0(r9)                                               ; Grab the sync value
  380                         mr.             r11,r11                                                 ; Counter hit 0?
  381                         beqlr-                                                                  ; Yeah, we are sunk...
  382                         mftb    r12                                                             ; Time stamp us now
  383 
  384                         sub             r12,r12,r10                                             ; Get the elapsed time
  385                         cmplw   r4,r12                                                  ; Have we gone too long?
  386                         bge+    csynctry                                                ; Not yet...
  387                         
  388                         li              r3,0                                                    ; Set failure...
  389                         blr                                                                             ; Return, head hanging low...
  390 
  391 /*
  392  *      unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int timeout)
  393  *
  394  *                      Spin until word changes or timeout. 
  395  *                      Return success (1) or failure (0).
  396  *                      Attempt will fail after timeout ticks of the timebase.
  397  *
  398  *                      This is used to insure that a processor passes a certain point.
  399  *                      An example of use is to monitor the last interrupt time in the 
  400  *                      per_proc block.  This can be used to insure that the other processor
  401  *                      has seen at least one interrupt since a specific time.
  402  */
  403                         .align  5
  404                         .globl  EXT(hw_cpu_wcng)
  405 
  406 LEXT(hw_cpu_wcng)
  407 
  408                         mftb    r10                                                             ; Get the low part of the time base
  409                         mr              r9,r3                                                   ; Save the sync word address
  410                         li              r3,1                                                    ; Assume we work
  411 
  412 wcngtry:        lwz             r11,0(r9)                                               ; Grab the  value
  413                         cmplw   r11,r4                                                  ; Do they still match?
  414                         bnelr-                                                                  ; Nope, cool...
  415                         mftb    r12                                                             ; Time stamp us now
  416 
  417                         sub             r12,r12,r10                                             ; Get the elapsed time
  418                         cmplw   r5,r12                                                  ; Have we gone too long?
  419                         bge+    wcngtry                                                 ; Not yet...
  420                         
  421                         li              r3,0                                                    ; Set failure...
  422                         blr                                                                             ; Return, head hanging low...
  423                         
  424 
  425 /*
  426  *              unsigned int hw_lock_try(hw_lock_t)
  427  *
  428  *                      Try to acquire spin-lock. Return success (1) or failure (0)
  429  *                      Returns with preemption disabled on success.
  430  *
  431  */
  432                         .align  5
  433                         .globl  EXT(hw_lock_try)
  434 
  435 LEXT(hw_lock_try)
  436 
  437                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
  438                         mfmsr   r9                                                              ; Get the MSR value 
  439                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
  440                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
  441                         andc    r9,r9,r0                                                ; Clear FP and VEC
  442                         andc    r7,r9,r7                                                ; Clear EE as well
  443 
  444                         mtmsr   r7                                                              ; Disable interruptions and thus, preemption
  445 
  446                         lwz             r5,0(r3)                                                ; Quick load
  447                         andi.   r6,r5,ILK_LOCKED                                ; TEST...
  448                         bne--   .L_lock_try_failed                              ; No go...
  449 
  450 .L_lock_try_loop:       
  451                         lwarx   r5,0,r3                                                 ; Ld from addr of arg and reserve
  452 
  453                         andi.   r6,r5,ILK_LOCKED                                ; TEST...
  454                         ori             r5,r5,ILK_LOCKED
  455                         bne--   .L_lock_try_failedX                             ; branch if taken. Predict free 
  456         
  457                         stwcx.  r5,0,r3                                                 ; And SET (if still reserved)
  458                         bne--   .L_lock_try_loop                                ; If set failed, loop back 
  459                         
  460                         .globl  EXT(hwltlckPatch_isync)
  461 LEXT(hwltlckPatch_isync)   
  462                         isync
  463 
  464                         mfsprg  r6,1                                                    ; Get current activation 
  465                         lwz             r5,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
  466                         addi    r5,r5,1                                                 ; Bring up the disable count 
  467                         stw             r5,ACT_PREEMPT_CNT(r6)                  ; Save it back
  468 
  469                         mtmsr   r9                                                              ; Allow interruptions now 
  470                         li              r3,1                                                    ; Set that the lock was free 
  471                         blr
  472 
  473 .L_lock_try_failedX:
  474                         li              r6,lgKillResv                                   ; Killing field
  475                         stwcx.  r6,0,r6                                                 ; Kill reservation
  476                         
  477 .L_lock_try_failed:
  478                         mtmsr   r9                                                              ; Allow interruptions now 
  479                         li              r3,0                                                    ; FAILURE - lock was taken 
  480                         blr
  481 
  482 /*
  483  *              unsigned int hw_lock_held(hw_lock_t)
  484  *
  485  *                      Return 1 if lock is held
  486  *                      Doesn't change preemption state.
  487  *                      N.B.  Racy, of course.
  488  */
  489                         .align  5
  490                         .globl  EXT(hw_lock_held)
  491 
  492 LEXT(hw_lock_held)
  493 
  494                         isync                                                                   ; Make sure we don't use a speculativily fetched lock 
  495                         lwz             r3, 0(r3)                                               ; Get lock value 
  496                         andi.   r6,r3,ILK_LOCKED                                ; Extract the ILK_LOCKED bit
  497                         blr
  498 
  499 /*
  500  *              uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, uint32_t *dest)
  501  *
  502  *                      Compare old to area if equal, store new, and return true
  503  *                      else return false and no store
  504  *                      This is an atomic operation
  505  */
  506                         .align  5
  507                         .globl  EXT(hw_compare_and_store)
  508 
  509 LEXT(hw_compare_and_store)
  510 
  511                         mr              r6,r3                                                   ; Save the old value
  512 
  513 cstry:          lwarx   r9,0,r5                                                 ; Grab the area value
  514                         li              r3,1                                                    ; Assume it works
  515                         cmplw   cr0,r9,r6                                               ; Does it match the old value?
  516                         bne--   csfail                                                  ; No, it must have changed...
  517                         stwcx.  r4,0,r5                                                 ; Try to save the new value
  518                         bne--   cstry                                                   ; Didn't get it, try again...
  519                         .globl  EXT(hwcsatomicPatch_isync)
  520 LEXT(hwcsatomicPatch_isync)   
  521                         isync                                                                   ; Just hold up prefetch
  522                         blr                                                                             ; Return...
  523                         
  524 csfail:         li              r3,lgKillResv                                   ; Killing field
  525                         stwcx.  r3,0,r3                                                 ; Blow reservation
  526                         
  527                         li              r3,0                                                    ; Set failure
  528                         blr                                                                             ; Better luck next time...
  529 
  530 
  531 /*
  532  *              uint32_t hw_atomic_add(uint32_t *dest, uint32_t delt)
  533  *
  534  *                      Atomically add the second parameter to the first.
  535  *                      Returns the result.
  536  *
  537  */
  538                         .align  5
  539                         .globl  EXT(hw_atomic_add)
  540 
  541 LEXT(hw_atomic_add)
  542 
  543                         mr              r6,r3                                                   ; Save the area
  544 
  545 addtry:         lwarx   r3,0,r6                                                 ; Grab the area value
  546                         add             r3,r3,r4                                                ; Add the value
  547                         stwcx.  r3,0,r6                                                 ; Try to save the new value
  548                         bne--   addtry                                                  ; Didn't get it, try again...
  549                         blr                                                                             ; Return...
  550 
  551 
  552 /*
  553  *              uint32_t hw_atomic_sub(uint32_t *dest, uint32_t delt)
  554  *
  555  *                      Atomically subtract the second parameter from the first.
  556  *                      Returns the result.
  557  *
  558  */
  559                         .align  5
  560                         .globl  EXT(hw_atomic_sub)
  561 
  562 LEXT(hw_atomic_sub)
  563 
  564                         mr              r6,r3                                                   ; Save the area
  565 
  566 subtry:         lwarx   r3,0,r6                                                 ; Grab the area value
  567                         sub             r3,r3,r4                                                ; Subtract the value
  568                         stwcx.  r3,0,r6                                                 ; Try to save the new value
  569                         bne--   subtry                                                  ; Didn't get it, try again...
  570                         blr                                                                             ; Return...
  571 
  572 
  573 /*
  574  *              uint32_t hw_atomic_or(uint32_t *dest, uint32_t mask)
  575  *
  576  *                      Atomically ORs the second parameter into the first.
  577  *                      Returns the result.
  578  */
  579                         .align  5
  580                         .globl  EXT(hw_atomic_or)
  581 
  582 LEXT(hw_atomic_or)
  583 
  584                         mr              r6,r3                                                   ; Save the area                 
  585 
  586 ortry:          lwarx   r3,0,r6                                                 ; Grab the area value
  587                         or              r3,r3,r4                                                ; OR the value 
  588                         stwcx.  r3,0,r6                                                 ; Try to save the new value
  589                         bne--   ortry                                                   ; Did not get it, try again...
  590                         blr                                                                             ; Return...
  591 
  592 
  593 /*
  594  *              uint32_t hw_atomic_and(uint32_t *dest, uint32_t mask)
  595  *
  596  *                      Atomically ANDs the second parameter with the first.
  597  *                      Returns the result.
  598  *
  599  */
  600                         .align  5
  601                         .globl  EXT(hw_atomic_and)
  602 
  603 LEXT(hw_atomic_and)
  604 
  605                         mr              r6,r3                                                   ; Save the area                 
  606 
  607 andtry:         lwarx   r3,0,r6                                                 ; Grab the area value
  608                         and             r3,r3,r4                                                ; AND the value 
  609                         stwcx.  r3,0,r6                                                 ; Try to save the new value
  610                         bne--   andtry                                                  ; Did not get it, try again...
  611                         blr                                                                             ; Return...
  612 
  613 
  614 /*
  615  *              void hw_queue_atomic(unsigned int * anchor, unsigned int * elem, unsigned int disp)
  616  *
  617  *                      Atomically inserts the element at the head of the list
  618  *                      anchor is the pointer to the first element
  619  *                      element is the pointer to the element to insert
  620  *                      disp is the displacement into the element to the chain pointer
  621  *
  622  *          NOTE: OSEnqueueAtomic() is aliased to this, see xnu/libkern/Makefile
  623  */
  624                         .align  5
  625                         .globl  EXT(hw_queue_atomic)
  626 
  627 LEXT(hw_queue_atomic)
  628 
  629                         mr              r7,r4                                                   ; Make end point the same as start
  630                         mr              r8,r5                                                   ; Copy the displacement also
  631                         b               hw_queue_comm                                   ; Join common code...
  632 
  633 /*
  634  *              void hw_queue_atomic_list(unsigned int * anchor, unsigned int * first, unsigned int * last, unsigned int disp)
  635  *
  636  *                      Atomically inserts the list of elements at the head of the list
  637  *                      anchor is the pointer to the first element
  638  *                      first is the pointer to the first element to insert
  639  *                      last is the pointer to the last element to insert
  640  *                      disp is the displacement into the element to the chain pointer
  641  */
  642                         .align  5
  643                         .globl  EXT(hw_queue_atomic_list)
  644 
  645 LEXT(hw_queue_atomic_list)
  646 
  647                         mr              r7,r5                                                   ; Make end point the same as start
  648                         mr              r8,r6                                                   ; Copy the displacement also
  649 
  650 hw_queue_comm:
  651                         lwarx   r9,0,r3                                                 ; Pick up the anchor
  652                         stwx    r9,r8,r7                                                ; Chain that to the end of the new stuff
  653                         eieio                                                                   ; Make sure this store makes it before the anchor update
  654                         stwcx.  r4,0,r3                                                 ; Try to chain into the front
  655                         bne--   hw_queue_comm                                   ; Didn't make it, try again...
  656 
  657                         blr                                                                             ; Return...
  658 
  659 /*
  660  *              unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp)
  661  *
  662  *                      Atomically removes the first element in a list and returns it.
  663  *                      anchor is the pointer to the first element
  664  *                      disp is the displacement into the element to the chain pointer
  665  *                      Returns element if found, 0 if empty.
  666  *
  667  *          NOTE: OSDequeueAtomic() is aliased to this, see xnu/libkern/Makefile
  668  */
  669                         .align  5
  670                         .globl  EXT(hw_dequeue_atomic)
  671 
  672 LEXT(hw_dequeue_atomic)
  673 
  674                         mr              r5,r3                                                   ; Save the anchor
  675 
  676 hw_dequeue_comm:
  677                         lwarx   r3,0,r5                                                 ; Pick up the anchor
  678                         mr.             r3,r3                                                   ; Is the list empty?
  679                         beq--   hdcFail                                                 ; Leave it list empty...
  680                         lwzx    r9,r4,r3                                                ; Get the next in line
  681                         stwcx.  r9,0,r5                                                 ; Try to chain into the front
  682                         beqlr++                                                                 ; Got the thing, go away with it...
  683                         b               hw_dequeue_comm                                 ; Did not make it, try again...
  684 
  685 hdcFail:        li              r4,lgKillResv                                   ; Killing field
  686                         stwcx.  r4,0,r4                                                 ; Dump reservation
  687                         blr                                                                             ; Leave...
  688 
  689 
  690 /*
  691  * Routines for mutex lock debugging.
  692  */
  693 
  694 /* 
  695  * Gets lock check flags in CR6: CR bits 24-27
  696  */
  697 #define CHECK_SETUP(rg)                                                                                 \
  698                         lbz             rg,lglcksWork(0)                                __ASMNL__       \
  699                         mtcrf   2,rg                                                    __ASMNL__ 
  700 
  701 
  702 /*
  703  * Checks for expected lock type.
  704  */
  705 #define CHECK_MUTEX_TYPE()                                                                              \
  706                         bf              MUTEX_ATTR_DEBUGb,1f                    __ASMNL__       \
  707                         bt              24+disLktypeb,1f                                __ASMNL__       \
  708                         lwz             r10,MUTEX_TYPE(r3)                              __ASMNL__       \
  709                         cmpwi   r10,MUTEX_TAG                                   __ASMNL__       \
  710                         beq++   1f                                                              __ASMNL__       \
  711                         PROLOG(0)                                                               __ASMNL__       \
  712                         mr              r4,r11                                                  __ASMNL__       \
  713                         mr              r5,r10                                                  __ASMNL__       \
  714                         lis             r3,hi16(not_a_mutex)                    __ASMNL__       \
  715                         ori             r3,r3,lo16(not_a_mutex)                 __ASMNL__       \
  716                         bl              EXT(panic)                                              __ASMNL__       \
  717                         BREAKPOINT_TRAP                                                 __ASMNL__       \
  718 1:
  719 
  720         .data
  721 not_a_mutex:
  722                         STRINGD "mutex (0x%08X) not a mutex type (0x%08X)\n\000"
  723                         .text
  724 
  725 /* 
  726  * Verifies return to the correct thread in "unlock" situations.
  727  */
  728 #define CHECK_THREAD(thread_offset)                                                             \
  729                         bf              MUTEX_ATTR_DEBUGb,3f                    __ASMNL__       \
  730                         bt              24+disLkThreadb,3f                              __ASMNL__       \
  731                         mfsprg  r10,1                                                   __ASMNL__       \
  732                         lwz             r5,MUTEX_DATA(r3)                               __ASMNL__       \
  733                         rlwinm. r9,r5,0,0,29                                    __ASMNL__       \
  734                         bne++   1f                                                              __ASMNL__       \
  735                         lis             r3,hi16(not_held)                               __ASMNL__       \
  736                         ori             r3,r3,lo16(not_held)                    __ASMNL__       \
  737                         b               2f                                                              __ASMNL__       \
  738 1:                                                                                                      __ASMNL__       \
  739                         cmpw    r9,r10                                                  __ASMNL__       \
  740                         beq++   3f                                                              __ASMNL__       \
  741                         mr              r5,r10                                                  __ASMNL__       \
  742                         mr              r6,r9                                                   __ASMNL__       \
  743                         lis             r3,hi16(wrong_thread)                   __ASMNL__       \
  744                         ori             r3,r3,lo16(wrong_thread)                __ASMNL__       \
  745 2:                                                                                                      __ASMNL__       \
  746                         mr              r4,r11                                                  __ASMNL__       \
  747                         PROLOG(0)                                                               __ASMNL__       \
  748                         bl              EXT(panic)                                              __ASMNL__       \
  749                         BREAKPOINT_TRAP                                                 __ASMNL__       \
  750 3:
  751 
  752         .data
  753 not_held:
  754         STRINGD "mutex (0x%08X) not held\n\000"
  755 wrong_thread:
  756         STRINGD "mutex (0x%08X) unlocked by non-owner(0x%08X), current owner(0x%08X)\n\000"
  757         .text
  758 
  759 #define CHECK_MYLOCK()                                                                                  \
  760                         bf              MUTEX_ATTR_DEBUGb,1f                    __ASMNL__       \
  761                         bt              24+disLkMyLckb,1f                               __ASMNL__       \
  762                         mfsprg  r10,1                                                   __ASMNL__       \
  763                         lwz             r9,MUTEX_DATA(r3)                               __ASMNL__       \
  764                         rlwinm  r9,r9,0,0,29                                    __ASMNL__       \
  765                         cmpw    r9,r10                                                  __ASMNL__       \
  766                         bne++   1f                                                              __ASMNL__       \
  767                         mr              r4,r11                                                  __ASMNL__       \
  768                         lis             r3,     hi16(mylock_attempt)            __ASMNL__       \
  769                         ori             r3,r3,lo16(mylock_attempt)              __ASMNL__       \
  770                         bl              EXT(panic)                                              __ASMNL__       \
  771                         BREAKPOINT_TRAP                                                 __ASMNL__       \
  772 1:      
  773         
  774         .data
  775 mylock_attempt:
  776         STRINGD "mutex (0x%08X) recursive lock attempt\n\000"
  777         .text
  778 
  779 #define LCK_STACK(lck, stack, lck_stack, frame_cnt, lr_save, tmp)               \
  780                         bf              24+enaLkExtStckb,3f                             __ASMNL__       \
  781                         addi    lck_stack,lck,MUTEX_STACK               __ASMNL__       \
  782                         li              frame_cnt,MUTEX_FRAMES-1                __ASMNL__       \
  783 1:                                                                                                      __ASMNL__       \
  784                         mr              tmp,stack                                               __ASMNL__       \
  785                         lwz             stack,0(stack)                                  __ASMNL__       \
  786                         xor             tmp,stack,tmp                                   __ASMNL__       \
  787                         cmplwi  tmp,8192                                                __ASMNL__       \
  788                         bge--   2f                                                              __ASMNL__       \
  789                         lwz             lr_save,FM_LR_SAVE(stack)               __ASMNL__       \
  790                         stwu    lr_save,4(lck_stack)                    __ASMNL__       \
  791                         subi    frame_cnt,frame_cnt,1                   __ASMNL__       \
  792                         cmpi    cr0,frame_cnt,0                                 __ASMNL__       \
  793                         bne             1b                                                              __ASMNL__       \
  794                         b               3f                                                              __ASMNL__       \
  795 2:                                                                                                      __ASMNL__       \
  796                         li              tmp,0                                                   __ASMNL__       \
  797                         stwu    tmp,4(lck_stack)                                __ASMNL__       \
  798                         subi    frame_cnt,frame_cnt,1                   __ASMNL__       \
  799                         cmpi    cr0,frame_cnt,0                                 __ASMNL__       \
  800                         bne             2b                                                              __ASMNL__       \
  801 3:      
  802 
  803 /*
  804  *              void mutex_init(mutex_t* l, etap_event_t etap)
  805  *
  806  */
  807                         .align  5
  808                         .globl  EXT(mutex_init)
  809 LEXT(mutex_init)
  810 
  811                         PROLOG(0)
  812                         li              r10,0
  813                         stw             r10,MUTEX_DATA(r3)                              ; clear lock word
  814                         sth             r10,MUTEX_WAITERS(r3)                   ; init waiter count
  815                         sth             r10,MUTEX_PROMOTED_PRI(r3)
  816 #if     MACH_LDEBUG
  817                         li              r11,MUTEX_ATTR_DEBUG
  818                         stw             r10,MUTEX_STACK(r3)                             ; init caller pc
  819                         stw             r10,MUTEX_THREAD(r3)                    ; and owning thread
  820                         li              r9,     MUTEX_TAG
  821                         stw             r9,     MUTEX_TYPE(r3)                          ; set lock type
  822                         stw             r11,MUTEX_ATTR(r3)
  823                         addi    r8,r3,MUTEX_STACK-4
  824                         li              r9,MUTEX_FRAMES
  825 mlistck:
  826                         stwu    r10,4(r8)                                               ; init stack
  827                         subi    r9,r9,1
  828                         cmpi    cr0,r9,0
  829                         bne             mlistck
  830 #endif  /* MACH_LDEBUG */
  831                         EPILOG
  832                         blr
  833 
  834 /*
  835  *              void lck_mtx_lock_ext(lck_mtx_ext_t*)
  836  *
  837  */
  838                         .align  5
  839                         .globl  EXT(lck_mtx_lock_ext)
  840 LEXT(lck_mtx_lock_ext)
  841 #if     MACH_LDEBUG
  842                         .globl  EXT(mutex_lock)
  843 LEXT(mutex_lock)
  844 
  845                         .globl  EXT(_mutex_lock)
  846 LEXT(_mutex_lock)
  847 #endif
  848                         mr              r11,r3                                                  ; Save lock addr
  849 mlckeEnter:
  850                         lwz             r0,MUTEX_ATTR(r3)
  851                         mtcrf   1,r0                                                    ; Set cr7
  852                         CHECK_SETUP(r12)        
  853                         CHECK_MUTEX_TYPE()
  854 
  855                         bf              MUTEX_ATTR_DEBUGb,L_mutex_lock_assert_wait_2
  856                         PROLOG(0)
  857                         bl              EXT(assert_wait_possible)
  858                         mr.             r3,r3
  859                         bne             L_mutex_lock_assert_wait_1
  860                         lis             r3,hi16(L_mutex_lock_assert_wait_panic_str)
  861                         ori             r3,r3,lo16(L_mutex_lock_assert_wait_panic_str)
  862                         bl              EXT(panic)
  863                         BREAKPOINT_TRAP                                                 ; We die here anyway
  864 
  865                         .data
  866 L_mutex_lock_assert_wait_panic_str:
  867                         STRINGD "mutex lock attempt with  assert_wait_possible false\n\000" 
  868                         .text
  869 
  870 L_mutex_lock_assert_wait_1:
  871                         lwz             r3,FM_ARG0(r1)
  872                         lwz             r11,FM_ARG0+0x04(r1)
  873                         lwz             r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
  874                         mtcr    r2
  875                         EPILOG
  876 L_mutex_lock_assert_wait_2:
  877 
  878                         mfsprg  r6,1                                                    ; load the current thread
  879                         bf              MUTEX_ATTR_STATb,mlckestatskip  ; Branch if no stat
  880                         lwz             r5,MUTEX_GRP(r3)                                ; Load lock group
  881                         li              r7,GRP_MTX_STAT_UTIL+4                  ; Set stat util offset
  882 mlckestatloop:
  883                         lwarx   r8,r7,r5                                                ; Load stat util cnt
  884                         addi    r8,r8,1                                                 ; Increment stat util cnt
  885                         stwcx.  r8,r7,r5                                                ; Store stat util cnt
  886                         bne--   mlckestatloop                                   ; Retry if failed
  887                         mr.             r8,r8                                                   ; Test for zero
  888                         bne++   mlckestatskip                                   ; Did stat util cnt wrapped?
  889                         lwz             r8,GRP_MTX_STAT_UTIL(r5)                ; Load upper stat util cnt
  890                         addi    r8,r8,1                                                 ; Increment upper stat util cnt
  891                         stw             r8,GRP_MTX_STAT_UTIL(r5)                ; Store upper stat util cnt
  892 mlckestatskip:
  893                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock quickly
  894                         li              r4,0
  895                         li              r8,0
  896                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
  897                         mfmsr   r9                                                              ; Get the MSR value
  898                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
  899                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
  900                         andc    r9,r9,r0                                                ; Clear FP and VEC
  901                         andc    r7,r9,r7                                                ; Clear EE as well
  902                         mtmsr   r7                                                              ; Turn off interruptions 
  903                         isync                                                                   ; May have turned off vec and fp here 
  904                         mr.             r5,r5                                                   ; Quick check
  905                         bne--   mlckespin01                                             ; Can not get it right now...
  906 
  907 mlcketry:
  908                         lwarx   r5,MUTEX_DATA,r3                                ; load the mutex lock
  909                         mr.             r5,r5
  910                         bne--   mlckespin0                                              ; Can not get it right now...
  911                         stwcx.  r6,MUTEX_DATA,r3                                ; grab the lock
  912                         bne--   mlcketry                                                ; loop back if failed
  913                         .globl  EXT(mlckePatch_isync)
  914 LEXT(mlckePatch_isync)
  915                         isync                                                                   ; stop prefeteching
  916                         mflr    r12
  917                         bf              MUTEX_ATTR_DEBUGb,mlckedebskip
  918                         mr              r8,r6                                                   ; Get the active thread
  919                         stw             r12,MUTEX_STACK(r3)                             ; Save our caller
  920                         stw             r8,MUTEX_THREAD(r3)                             ; Set the mutex's holding thread
  921                         mr              r5,r1
  922                         LCK_STACK(r3,r5,r6,r7,r8,r10)
  923 mlckedebskip:
  924                         mtmsr   r9                                                              ; Say, any interrupts pending?
  925                         blr
  926 
  927 mlckespin0:
  928                         li              r5,lgKillResv                                   ; Killing field
  929                         stwcx.  r5,0,r5                                                 ; Kill reservation
  930 mlckespin01:
  931                         mflr    r12
  932                         mtmsr   r9                                                              ; Say, any interrupts pending?
  933                         bl              mlckspin1       
  934                         mtmsr   r7                                                              ; Turn off interruptions, vec and fp off already
  935                         mtlr    r12
  936                         b               mlcketry
  937 
  938 /*
  939  *              void lck_mtx_lock(lck_mtx_t*)
  940  *
  941  */
  942                         .align  5
  943                         .globl  EXT(lck_mtx_lock)
  944 LEXT(lck_mtx_lock)
  945 
  946 #if     !MACH_LDEBUG
  947                         .globl  EXT(mutex_lock)
  948 LEXT(mutex_lock)
  949 
  950                         .globl  EXT(_mutex_lock)
  951 LEXT(_mutex_lock)
  952 #endif
  953 
  954                         mfsprg  r6,1                                                    ; load the current thread
  955                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock quickly
  956                         mr              r11,r3                                                  ; Save lock addr
  957                         li              r4,0
  958                         li              r8,0
  959                         li              r9,0
  960                         mr.             r5,r5                                                   ; Quick check
  961                         bne--   mlckspin00                                              ; Indirect or Can not get it right now...
  962 
  963 mlcktry:
  964                         lwarx   r5,MUTEX_DATA,r3                                ; load the mutex lock
  965                         mr.             r5,r5
  966                         bne--   mlckspin01                                              ; Can not get it right now...
  967                         stwcx.  r6,MUTEX_DATA,r3                                ; grab the lock
  968                         bne--   mlcktry                                                 ; loop back if failed
  969                         .globl  EXT(mlckPatch_isync)
  970 LEXT(mlckPatch_isync)
  971                         isync                                                                   ; stop prefeteching
  972                         blr
  973 
  974 mlckspin00:
  975                         cmpli   cr0,r5,MUTEX_IND                                ; Is it a mutex indirect 
  976                         bne--   mlckspin02                                              ; No, go handle contention 
  977                         lwz             r3,MUTEX_PTR(r3)                                ; load mutex ext pointer
  978                         b               mlckeEnter
  979 mlckspin01:
  980                         li              r5,lgKillResv                                   ; Killing field
  981                         stwcx.  r5,0,r5                                                 ; Kill reservation
  982 mlckspin02:
  983                         mflr    r12
  984                         li              r0,0
  985                         mtcrf   1,r0                                                    ; Set cr7 to zero
  986                         bl              mlckspin1
  987                         mtlr    r12
  988                         b               mlcktry
  989 
  990 
  991 mlckspin1:
  992                         mr.             r4,r4                                                   ; Test timeout value
  993                         bne++   mlckspin2
  994                         lis             r4,hi16(EXT(MutexSpin))                 ; Get the high part 
  995                         ori             r4,r4,lo16(EXT(MutexSpin)       )       ; And the low part
  996                         lwz             r4,0(r4)                                                ; Get spin timerout value
  997                         mr.             r4,r4                                                   ; Test spin timeout value
  998                         bne++   mlckspin2                                               ; Is spin timeout requested
  999                         crclr   mlckmiss                                                ; Clear miss test
 1000                         b               mlckslow1                                               ; Don't try to spin
 1001 
 1002 mlckspin2:      mr.             r8,r8                                                   ; Is r8 set to zero
 1003                         bne++   mlckspin3                                               ; If yes, first spin attempt
 1004                         crclr   mlckmiss                                                ; Clear miss test
 1005                         mr.             r9,r9                                                   ; Is r9 set to zero
 1006                         bne++   mlckspin3                                               ; If yes, r9 set with  msr value
 1007                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1008                         mfmsr   r9                                                              ; Get the MSR value
 1009                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1010                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1011                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1012                         andc    r7,r9,r7                                                ; Clear EE as well
 1013                         mtmsr   r7                                                              ; Turn off interruptions 
 1014                         isync                                                                   ; May have turned off vec and fp here 
 1015                         mftb    r8                                                              ; Get timestamp on entry
 1016                         b               mlcksniff
 1017 
 1018 mlckspin3:      mtmsr   r7                                                              ; Turn off interruptions 
 1019                         mftb    r8                                                              ; Get timestamp on entry
 1020 
 1021 mlcksniff:      lwz             r5,MUTEX_DATA(r3)                               ; Get that lock in here
 1022                         mr.             r5,r5                                                   ; Is the lock held
 1023                         beq++   mlckretry                                               ; No, try for it again...
 1024                         rlwinm. r10,r5,0,0,29                                   ; Extract the lock owner
 1025                         beq++   mlckslow0                                               ; InterLock is held
 1026                         bf              MUTEX_ATTR_STATb,mlStatSkip             ; Branch if no stat
 1027                         andi.   r5,r5,ILK_LOCKED                                ; extract interlocked?
 1028                         bne             mlStatSkip                                              ; yes, skip
 1029                         bt              mlckmiss,mlStatSkip                             ; miss already counted
 1030                         crset   mlckmiss                                                ; Remember miss recorded
 1031                         lwz             r5,MUTEX_GRP(r3)                                ; Load lock group
 1032                         addi    r5,r5,GRP_MTX_STAT_MISS+4                       ; Add stat miss offset
 1033 mlStatLoop:
 1034                         lwarx   r6,0,r5                                                 ; Load stat miss cnt
 1035                         addi    r6,r6,1                                                 ; Increment stat miss cnt
 1036                         stwcx.  r6,0,r5                                                 ; Update stat miss cnt
 1037                         bne--   mlStatLoop                                              ; Retry if failed
 1038                         mfsprg  r6,1                                                    ; Reload current thread
 1039 mlStatSkip:
 1040                         lwz             r2,ACT_MACT_SPF(r10)                    ; Get the special flags
 1041                         rlwinm. r2,r2,0,OnProcbit,OnProcbit     ; Is OnProcbit set?
 1042                         beq             mlckslow0                                               ; Lock owner isn't running
 1043                         lis             r2,hi16(TH_OPT_DELAYIDLE)               ; Get DelayedIdle Option
 1044                         ori             r2,r2,lo16(TH_OPT_DELAYIDLE)    ; Get DelayedIdle Option
 1045                         lwz             r10,THREAD_OPTIONS(r10)                 ; Get the thread options
 1046                         and.    r10,r10,r2                                              ; Is DelayedIdle set?
 1047                         bne             mlckslow0                                               ; Lock owner is in delay idle
 1048 
 1049                         mftb    r10                                                             ; Time stamp us now
 1050                         sub             r10,r10,r8                                              ; Get the elapsed time
 1051                         cmplwi  r10,128                                                 ; Have we been spinning for 128 tb ticks?
 1052                         blt++   mlcksniff                                               ; Not yet...
 1053                         
 1054                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1055 
 1056 ;                       The following instructions force the pipeline to be interlocked to that only one
 1057 ;                       instruction is issued per cycle.  The insures that we stay enabled for a long enough
 1058 ;                       time; if it's too short, pending interruptions will not have a chance to be taken
 1059 
 1060                         subi    r4,r4,128                                               ; Back off elapsed time from timeout value
 1061                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
 1062                         mr.             r4,r4                                                   ; See if we used the whole timeout
 1063                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
 1064                         
 1065                         ble--   mlckslow1                                               ; We failed
 1066                         b               mlckspin3                                               ; Now that we've opened an enable window, keep trying...
 1067 mlckretry:
 1068                         mtmsr   r9                                                              ; Restore interrupt state
 1069                         li              r8,1                                                    ; Show already through once
 1070                         blr     
 1071 
 1072 mlckslow0:                                                                                      ; We couldn't get the lock
 1073                         mtmsr   r9                                                              ; Restore interrupt state
 1074 
 1075 mlckslow1:
 1076                         mtlr    r12
 1077 
 1078                         PROLOG(0)
 1079 .L_ml_retry:
 1080                         bl              lockDisa                                                ; Go get a lock on the mutex's interlock lock
 1081                         mr.             r4,r3                                                   ; Did we get it?
 1082                         lwz             r3,FM_ARG0(r1)                                  ; Restore the lock address
 1083                         bne++   mlGotInt                                                ; We got it just fine...
 1084                         mr              r4,r11                                                  ; Saved lock addr
 1085                         lis             r3,hi16(mutex_failed1)                  ; Get the failed mutex message
 1086                         ori             r3,r3,lo16(mutex_failed1)               ; Get the failed mutex message
 1087                         bl              EXT(panic)                                              ; Call panic
 1088                         BREAKPOINT_TRAP                                                 ; We die here anyway, can not get the lock
 1089         
 1090                         .data
 1091 mutex_failed1:
 1092                         STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock\n\000"
 1093                         .text
 1094                         
 1095 mlGotInt:
 1096                         
 1097 ;                       Note that there is no reason to do a load and reserve here.  We already
 1098 ;                       hold the interlock lock and no one can touch this field unless they 
 1099 ;                       have that, so, we're free to play
 1100 
 1101                         lwz             r4,MUTEX_DATA(r3)                               ; Get the mutex's lock field
 1102                         rlwinm. r9,r4,30,2,31                                   ; So, can we have it?
 1103                         bne-    mlInUse                                                 ; Nope, sombody's playing already...
 1104 
 1105                         bf++            MUTEX_ATTR_DEBUGb,mlDebSkip
 1106                         CHECK_SETUP(r5)
 1107                         mfsprg  r9,1                                                    ; Get the current activation
 1108                         lwz             r5,0(r1)                                                ; Get previous save frame
 1109                         lwz             r6,FM_LR_SAVE(r5)                               ; Get our caller's address
 1110                         mr              r8,r9                                                   ; Get the active thread
 1111                         stw             r6,MUTEX_STACK(r3)                              ; Save our caller
 1112                         stw             r8,MUTEX_THREAD(r3)                             ; Set the mutex's holding thread
 1113                         LCK_STACK(r3,r5,r6,r7,r8,r10)
 1114 mlDebSkip:
 1115                         mr              r3,r11                                                  ; Get the based lock address
 1116                         bl      EXT(lck_mtx_lock_acquire)
 1117                         lwz             r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
 1118                         mfsprg  r5,1
 1119                         mtcr    r2
 1120                         mr.             r4,r3
 1121                         lwz             r3,FM_ARG0(r1)                                  ; restore r3 (saved in prolog)
 1122                         lwz             r11,FM_ARG0+0x04(r1)                    ; restore r11 (saved in prolog)
 1123                         beq             mlUnlock
 1124                         ori             r5,r5,WAIT_FLAG
 1125 
 1126 mlUnlock:       eieio   
 1127                         stw     r5,MUTEX_DATA(r3)                                       ; grab the mutexlock and free the interlock
 1128 
 1129                         EPILOG                                                                  ; Restore all saved registers
 1130                         b               epStart                                                 ; Go enable preemption...
 1131 
 1132 ;                       We come to here when we have a resource conflict.  In other words,
 1133 ;                       the mutex is held.
 1134 
 1135 mlInUse:
 1136 
 1137                         CHECK_SETUP(r12)        
 1138                         CHECK_MYLOCK()                                                  ; Assert we don't own the lock already */
 1139 
 1140 ;                       Note that we come in here with the interlock set.  The wait routine
 1141 ;                       will unlock it before waiting.
 1142 
 1143                         bf              MUTEX_ATTR_STATb,mlStatSkip2    ; Branch if no stat
 1144                         lwz             r5,MUTEX_GRP(r3)                                ; Load lck group
 1145                         bt              mlckmiss,mlStatSkip1                    ; Skip miss already counted
 1146                         crset   mlckmiss                                                ; Remember miss recorded
 1147                         li              r9,GRP_MTX_STAT_MISS+4                  ; Get stat miss offset
 1148 mlStatLoop1:
 1149                         lwarx   r8,r9,r5                                                ; Load stat miss cnt
 1150                         addi    r8,r8,1                                                 ; Increment stat miss cnt       
 1151                         stwcx.  r8,r9,r5                                                ; Store stat miss cnt
 1152                         bne--   mlStatLoop1                                             ; Retry if failed
 1153 mlStatSkip1:
 1154                         lwz             r9,GRP_MTX_STAT_WAIT+4(r5)              ; Load wait cnt
 1155                         addi    r9,r9,1                                                 ; Increment wait cnt
 1156                         stw             r9,GRP_MTX_STAT_WAIT+4(r5)              ; Update miss cnt
 1157 mlStatSkip2:
 1158                         ori             r4,r4,WAIT_FLAG                                 ; Set the wait flag
 1159                         stw             r4,MUTEX_DATA(r3)
 1160                         rlwinm  r4,r4,0,0,29                                    ; Extract the lock owner
 1161                         mfcr    r2
 1162                         stw             r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
 1163                         mr              r3,r11                                                  ; Get the based lock address
 1164                         bl              EXT(lck_mtx_lock_wait)                  ; Wait for our turn at the lock
 1165                         
 1166                         lwz             r3,FM_ARG0(r1)                                  ; restore r3 (saved in prolog)
 1167                         lwz             r11,FM_ARG0+0x04(r1)                    ; restore r11 (saved in prolog)
 1168                         lwz             r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
 1169                         mtcr    r2
 1170                         b               .L_ml_retry                                             ; and try again...
 1171 
 1172         
 1173 /*
 1174  *              void lck_mtx_try_lock(_extlck_mtx_ext_t*)
 1175  *
 1176  */
 1177                         .align  5
 1178                         .globl  EXT(lck_mtx_try_lock_ext)
 1179 LEXT(lck_mtx_try_lock_ext)
 1180 #if     MACH_LDEBUG
 1181                         .globl  EXT(mutex_try)
 1182 LEXT(mutex_try)
 1183                         .globl  EXT(_mutex_try)
 1184 LEXT(_mutex_try)
 1185 #endif
 1186                         mr              r11,r3                                                  ; Save lock addr
 1187 mlteEnter:
 1188                         lwz             r0,MUTEX_ATTR(r3)
 1189                         mtcrf   1,r0                                                    ; Set cr7
 1190                         CHECK_SETUP(r12)        
 1191                         CHECK_MUTEX_TYPE()
 1192                         
 1193                         bf              MUTEX_ATTR_STATb,mlteStatSkip   ; Branch if no stat
 1194                         lwz             r5,MUTEX_GRP(r3)                                ; Load lock group
 1195                         li              r7,GRP_MTX_STAT_UTIL+4                  ; Set stat util offset
 1196 mlteStatLoop:
 1197                         lwarx   r8,r7,r5                                                ; Load stat util cnt
 1198                         addi    r8,r8,1                                                 ; Increment stat util cnt
 1199                         stwcx.  r8,r7,r5                                                ; Store stat util cnt
 1200                         bne--   mlteStatLoop                                    ; Retry if failed
 1201                         mr.             r8,r8                                                   ; Test for zero
 1202                         bne++   mlteStatSkip                                    ; Did stat util cnt wrapped?
 1203                         lwz             r8,GRP_MTX_STAT_UTIL(r5)                ; Load upper stat util cnt
 1204                         addi    r8,r8,1                                                 ; Increment upper stat util cnt
 1205                         stw             r8,GRP_MTX_STAT_UTIL(r5)                ; Store upper stat util cnt
 1206 mlteStatSkip:
 1207                         mfsprg  r6,1                                                    ; load the current thread
 1208                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock value
 1209                         mr.             r5,r5                                                   ; Quick check
 1210                         bne--   L_mutex_try_slow                                ; Can not get it now...
 1211                         mfmsr   r9                                                              ; Get the MSR value
 1212                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1213                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1214                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1215                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1216                         andc    r7,r9,r7                                                ; Clear EE as well
 1217                         mtmsr   r7                                                              ; Turn off interruptions 
 1218                         isync                                                                   ; May have turned off vec and fp here 
 1219 
 1220 mlteLoopTry:
 1221                         lwarx   r5,MUTEX_DATA,r3                                ; load the lock value
 1222                         mr.             r5,r5
 1223                         bne--   mlteSlowX                                               ; branch to the slow path
 1224                         stwcx.  r6,MUTEX_DATA,r3                                ; grab the lock
 1225                         bne--   mlteLoopTry                                             ; retry if failed
 1226                         .globl  EXT(mltelckPatch_isync)
 1227 LEXT(mltelckPatch_isync)
 1228                         isync                                                                   ; stop prefetching
 1229                         mflr    r12
 1230                         bf              MUTEX_ATTR_DEBUGb,mlteDebSkip
 1231                         mr              r8,r6                                                   ; Get the active thread
 1232                         stw             r12,MUTEX_STACK(r3)                             ; Save our caller
 1233                         stw             r8,MUTEX_THREAD(r3)                             ; Set the mutex's holding thread
 1234                         mr              r5,r1
 1235                         LCK_STACK(r3,r5,r6,r7,r8,r10)
 1236 mlteDebSkip:
 1237                         li              r3, 1
 1238                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1239                         blr
 1240 mlteSlowX:
 1241                         li              r5,lgKillResv                                   ; Killing field
 1242                         stwcx.  r5,0,r5                                                 ; Kill reservation
 1243                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1244                         b               L_mutex_try_slow
 1245 
 1246 
 1247 /*
 1248  *              void lck_mtx_try_lock(lck_mtx_t*)
 1249  *
 1250  */
 1251                         .align  5
 1252                         .globl  EXT(lck_mtx_try_lock)
 1253 LEXT(lck_mtx_try_lock)
 1254 #if     !MACH_LDEBUG
 1255                         .globl  EXT(mutex_try)
 1256 LEXT(mutex_try)
 1257                         .globl  EXT(_mutex_try)
 1258 LEXT(_mutex_try)
 1259 #endif
 1260 
 1261                         mfsprg  r6,1                                                    ; load the current thread
 1262                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock value
 1263                         mr              r11,r3                                                  ; Save lock addr
 1264                         mr.             r5,r5                                                   ; Quick check
 1265                         bne--   mltSlow00                                               ; Indirect or Can not get it now...
 1266 
 1267 mltLoopTry:
 1268                         lwarx   r5,MUTEX_DATA,r3                                ; load the lock value
 1269                         mr.             r5,r5
 1270                         bne--   mltSlow01                                               ; branch to the slow path
 1271                         stwcx.  r6,MUTEX_DATA,r3                                ; grab the lock
 1272                         bne--   mltLoopTry                                              ; retry if failed
 1273                         .globl  EXT(mltlckPatch_isync)
 1274 LEXT(mltlckPatch_isync)
 1275                         isync                                                                   ; stop prefetching
 1276                         li              r3, 1
 1277                         blr
 1278 
 1279 mltSlow00:
 1280                         cmpli   cr0,r5,MUTEX_IND                                ; Is it a mutex indirect 
 1281                         bne--   mltSlow02                                               ; No, go handle contention 
 1282                         lwz             r3,MUTEX_PTR(r3)                                ; load mutex ext pointer
 1283                         b               mlteEnter
 1284 mltSlow01:
 1285                         li              r5,lgKillResv                                   ; Killing field
 1286                         stwcx.  r5,0,r5                                                 ; Kill reservation
 1287 
 1288 mltSlow02:
 1289                         li              r0,0
 1290                         mtcrf   1,r0                                                    ; Set cr7 to zero
 1291 
 1292 L_mutex_try_slow:
 1293                         PROLOG(0)
 1294         
 1295                         lwz             r6,MUTEX_DATA(r3)                               ; Quick check
 1296                         rlwinm. r6,r6,30,2,31                                   ; to see if someone has this lock already
 1297                         bne-    mtFail                                                  ; Someone's got it already...
 1298 
 1299                         bl              lockDisa                                                ; Go get a lock on the mutex's interlock lock
 1300                         mr.             r4,r3                                                   ; Did we get it?
 1301                         lwz             r3,FM_ARG0(r1)                                  ; Restore the lock address
 1302                         bne++   mtGotInt                                                ; We got it just fine...
 1303                         mr              r4,r11                                                  ; Saved lock addr
 1304                         lis             r3,hi16(mutex_failed2)                  ; Get the failed mutex message
 1305                         ori             r3,r3,lo16(mutex_failed2)               ; Get the failed mutex message
 1306                         bl              EXT(panic)                                              ; Call panic
 1307                         BREAKPOINT_TRAP                                                 ; We die here anyway, can not get the lock
 1308         
 1309                         .data
 1310 mutex_failed2:
 1311                         STRINGD "attempt to interlock mutex (0x%08X) failed on mutex lock try\n\000"
 1312                         .text
 1313                         
 1314 mtGotInt:
 1315                         
 1316 ;                       Note that there is no reason to do a load and reserve here.  We already
 1317 ;                       hold the interlock and no one can touch at this field unless they 
 1318 ;                       have that, so, we're free to play 
 1319                         
 1320                         lwz             r4,MUTEX_DATA(r3)                               ; Get the mutex's lock field
 1321                         rlwinm. r9,r4,30,2,31                                   ; So, can we have it?
 1322                         bne-    mtInUse                                                 ; Nope, sombody's playing already...
 1323                         
 1324                         bf++    MUTEX_ATTR_DEBUGb,mtDebSkip
 1325                         CHECK_SETUP(r5)
 1326                         mfsprg  r9,1                                                    ; Get the current activation
 1327                         lwz             r5,0(r1)                                                ; Get previous save frame
 1328                         lwz             r6,FM_LR_SAVE(r5)                               ; Get our caller's address
 1329                         mr              r8,r9                                                   ; Get the active thread
 1330                         stw             r6,MUTEX_STACK(r3)                              ; Save our caller
 1331                         stw             r8,MUTEX_THREAD(r3)                             ; Set the mutex's holding thread
 1332                         LCK_STACK(r3,r5,r6,r7,r8,r10)
 1333 mtDebSkip:
 1334                         mr              r3,r11                                                  ; Get the based lock address
 1335                         bl      EXT(lck_mtx_lock_acquire)
 1336                         mfsprg  r5,1
 1337                         mr.             r4,r3
 1338                         lwz             r3,FM_ARG0(r1)                                  ; restore r3 (saved in prolog)
 1339                         lwz             r11,FM_ARG0+0x04(r1)                    ; restore r11 (saved in prolog)
 1340                         beq             mtUnlock
 1341                         ori             r5,r5,WAIT_FLAG
 1342 
 1343 mtUnlock:       eieio
 1344                         stw     r5,MUTEX_DATA(r3)                                       ; grab the mutexlock and free the interlock
 1345 
 1346                         bl              epStart                                                 ; Go enable preemption...
 1347 
 1348                         li              r3, 1
 1349                         EPILOG                                                                  ; Restore all saved registers
 1350                         blr                                                                             ; Return...
 1351 
 1352 ;                       We come to here when we have a resource conflict.  In other words,
 1353 ;                       the mutex is held.
 1354 
 1355 mtInUse:        
 1356                         bf++    MUTEX_ATTR_STATb,mtStatSkip             ; Branch if no stat
 1357                         lwz             r5,MUTEX_GRP(r3)                                ; Load lock group
 1358                         li              r9,GRP_MTX_STAT_MISS+4                  ; Get stat miss offset
 1359 mtStatLoop:
 1360                         lwarx   r8,r9,r5                                                ; Load stat miss cnt
 1361                         addi    r8,r8,1                                                 ; Increment stat miss cnt       
 1362                         stwcx.  r8,r9,r5                                                ; Store stat miss cnt
 1363                         bne--   mtStatLoop                                              ; Retry if failed
 1364 mtStatSkip:
 1365                         rlwinm  r4,r4,0,0,30                                    ; Get the unlock value
 1366                         stw             r4,MUTEX_DATA(r3)                               ; free the interlock
 1367                         bl              epStart                                                 ; Go enable preemption...
 1368 
 1369 mtFail:         li              r3,0                                                    ; Set failure code
 1370                         EPILOG                                                                  ; Restore all saved registers
 1371                         blr                                                                             ; Return...
 1372 
 1373                 
 1374 /*
 1375  *              void mutex_unlock(mutex_t* l)
 1376  *
 1377  */
 1378                         .align  5
 1379                         .globl  EXT(mutex_unlock)
 1380 LEXT(mutex_unlock)
 1381 
 1382                         sync
 1383                         mr              r11,r3                                                  ; Save lock addr
 1384 #if     MACH_LDEBUG
 1385                         b               mlueEnter1
 1386 #else
 1387                         b               mluEnter1
 1388 #endif
 1389 
 1390 /*
 1391  *              void lck_mtx_ext_unlock(lck_mtx_ext_t* l)
 1392  *
 1393  */
 1394                         .align  5
 1395                         .globl  EXT(lck_mtx_ext_unlock)
 1396 LEXT(lck_mtx_ext_unlock)
 1397 #if     MACH_LDEBUG
 1398                         .globl  EXT(mutex_unlock_rwcmb)
 1399 LEXT(mutex_unlock_rwcmb)
 1400 #endif
 1401 mlueEnter:
 1402                         .globl  EXT(mulckePatch_isync)
 1403 LEXT(mulckePatch_isync)
 1404                         isync
 1405                         .globl  EXT(mulckePatch_eieio)     
 1406 LEXT(mulckePatch_eieio)
 1407                         eieio
 1408                         mr              r11,r3                                                  ; Save lock addr
 1409 mlueEnter1:
 1410                         lwz             r0,MUTEX_ATTR(r3)
 1411                         mtcrf   1,r0                                                    ; Set cr7
 1412                         CHECK_SETUP(r12)        
 1413                         CHECK_MUTEX_TYPE()
 1414                         CHECK_THREAD(MUTEX_THREAD)
 1415 
 1416                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock
 1417                         rlwinm. r4,r5,0,30,31                                   ; Quick check
 1418                         bne--   L_mutex_unlock_slow                             ; Can not get it now...
 1419                         mfmsr   r9                                                              ; Get the MSR value
 1420                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1421                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1422                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1423                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1424                         andc    r7,r9,r7                                                ; Clear EE as well
 1425                         mtmsr   r7                                                              ; Turn off interruptions 
 1426                         isync                                                                   ; May have turned off vec and fp here 
 1427 
 1428 mlueLoop:
 1429                         lwarx   r5,MUTEX_DATA,r3
 1430                         rlwinm. r4,r5,0,30,31                                   ; Bail if pending waiter or interlock set
 1431                         li              r5,0                                                    ; Clear the mutexlock
 1432                         bne--   mlueSlowX
 1433                         stwcx.  r5,MUTEX_DATA,r3
 1434                         bne--   mlueLoop
 1435                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1436                         blr
 1437 
 1438 mlueSlowX:
 1439                         li              r5,lgKillResv                                   ; Killing field
 1440                         stwcx.  r5,0,r5                                                 ; Dump reservation
 1441                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1442                         b               L_mutex_unlock_slow                             ; Join slow path...
 1443 
 1444 /*
 1445  *              void lck_mtx_unlock(lck_mtx_t* l)
 1446  *
 1447  */
 1448                         .align  5
 1449                         .globl  EXT(lck_mtx_unlock)
 1450 LEXT(lck_mtx_unlock)
 1451 #if     !MACH_LDEBUG
 1452                         .globl  EXT(mutex_unlock_rwcmb)
 1453 LEXT(mutex_unlock_rwcmb)
 1454 #endif
 1455 mluEnter:
 1456                         .globl  EXT(mulckPatch_isync)
 1457 LEXT(mulckPatch_isync)
 1458                         isync
 1459                         .globl  EXT(mulckPatch_eieio)     
 1460 LEXT(mulckPatch_eieio)
 1461                         eieio
 1462                         mr              r11,r3                                                  ; Save lock addr
 1463 mluEnter1:
 1464                         lwz             r5,MUTEX_DATA(r3)                               ; Get the lock
 1465                         rlwinm. r4,r5,0,30,31                                   ; Quick check
 1466                         bne--   mluSlow0                                                ; Indirect or Can not get it now...
 1467 
 1468 mluLoop:
 1469                         lwarx   r5,MUTEX_DATA,r3
 1470                         rlwinm. r4,r5,0,30,31                                   ; Bail if pending waiter or interlock set
 1471                         li              r5,0                                                    ; Clear the mutexlock
 1472                         bne--   mluSlowX
 1473                         stwcx.  r5,MUTEX_DATA,r3
 1474                         bne--   mluLoop
 1475                         blr
 1476 
 1477 mluSlow0:
 1478                         cmpli   cr0,r5,MUTEX_IND                                ; Is it a mutex indirect 
 1479                         bne--   L_mutex_unlock_slow                             ; No, go handle contention 
 1480                         lwz             r3,MUTEX_PTR(r3)                                ; load mutex ext pointer
 1481                         b               mlueEnter1
 1482 mluSlowX:
 1483                         li              r5,lgKillResv                                   ; Killing field
 1484                         stwcx.  r5,0,r5                                                 ; Dump reservation
 1485 
 1486 L_mutex_unlock_slow:
 1487                         
 1488                         PROLOG(0)
 1489         
 1490                         bl              lockDisa                                                ; Go get a lock on the mutex's interlock lock
 1491                         mr.             r4,r3                                                   ; Did we get it?
 1492                         lwz             r3,FM_ARG0(r1)                                  ; Restore the lock address
 1493                         bne++   muGotInt                                                ; We got it just fine...
 1494                         mr              r4,r11                                                  ; Saved lock addr
 1495                         lis             r3,hi16(mutex_failed3)                  ; Get the failed mutex message
 1496                         ori             r3,r3,lo16(mutex_failed3)               ; Get the failed mutex message
 1497                         bl              EXT(panic)                                              ; Call panic
 1498                         BREAKPOINT_TRAP                                                 ; We die here anyway, can not get the lock
 1499         
 1500                         .data
 1501 mutex_failed3:
 1502                         STRINGD "attempt to interlock mutex (0x%08X) failed on mutex unlock\n\000"
 1503                         .text
 1504                         
 1505                         
 1506 muGotInt:
 1507                         lwz             r4,MUTEX_DATA(r3)
 1508                         andi.   r5,r4,WAIT_FLAG                                 ; are there any waiters ?
 1509                         rlwinm  r4,r4,0,0,29
 1510                         beq+    muUnlock                                                ; Nope, we're done...
 1511 
 1512                         mr              r3,r11                                                  ; Get the based lock address
 1513                         bl              EXT(lck_mtx_unlock_wakeup)              ; yes, wake a thread
 1514                         lwz             r3,FM_ARG0(r1)                                  ; restore r3 (saved in prolog)
 1515                         lwz             r11,FM_ARG0+0x04(r1)                    ; restore r11 (saved in prolog)
 1516                         lwz             r5,MUTEX_DATA(r3)                               ; load the lock
 1517 
 1518 muUnlock:
 1519                         andi.   r5,r5,WAIT_FLAG                                 ; Get the unlock value
 1520                         eieio
 1521                         stw             r5,MUTEX_DATA(r3)                               ; unlock the interlock and lock
 1522 
 1523                         EPILOG                                                                  ; Deal with the stack now, enable_preemption doesn't always want one
 1524                         b               epStart                                                 ; Go enable preemption...
 1525 
 1526 /*
 1527  *              void lck_mtx_assert(lck_mtx_t* l, unsigned int)
 1528  *
 1529  */
 1530                         .align  5
 1531                         .globl  EXT(lck_mtx_assert)
 1532 LEXT(lck_mtx_assert)
 1533                         .globl  EXT(_mutex_assert)
 1534 LEXT(_mutex_assert)
 1535                         mr              r11,r3
 1536 maEnter:
 1537                         lwz             r5,MUTEX_DATA(r3)
 1538                         cmpli   cr0,r5,MUTEX_IND                                ; Is it a mutex indirect 
 1539                         bne--   maCheck                                                 ; No, go check the assertion
 1540                         lwz             r3,MUTEX_PTR(r3)                                ; load mutex ext pointer
 1541                         b               maEnter
 1542 maCheck:
 1543                         mfsprg  r6,1                                                    ; load the current thread
 1544                         rlwinm  r5,r5,0,0,29                                    ; Extract the lock owner
 1545                         cmpwi   r4,MUTEX_ASSERT_OWNED
 1546                         cmplw   cr1,r6,r5                                               ; Is the lock held by current act
 1547                         crandc  cr0_eq,cr0_eq,cr1_eq                    ; Check owned assertion
 1548                         bne--   maNext
 1549                         mr              r4,r11
 1550                         lis             r3,hi16(mutex_assert1)                  ; Get the failed mutex message
 1551                         ori             r3,r3,lo16(mutex_assert1)               ; Get the failed mutex message
 1552                         b               maPanic                                                 ; Panic path
 1553 maNext:
 1554                         cmpwi   r4,MUTEX_ASSERT_NOTOWNED                ; Check not owned assertion
 1555                         crand   cr0_eq,cr0_eq,cr1_eq                    ;
 1556                         bnelr++
 1557 maPanic:
 1558                         PROLOG(0)
 1559                         mr              r4,r11
 1560                         lis             r3,hi16(mutex_assert2)                  ; Get the failed mutex message
 1561                         ori             r3,r3,lo16(mutex_assert2)               ; Get the failed mutex message
 1562                         bl              EXT(panic)                                              ; Call panic
 1563                         BREAKPOINT_TRAP                                                 ; We die here anyway
 1564 
 1565                         .data
 1566 mutex_assert1:
 1567                         STRINGD "mutex (0x%08X) not owned\n\000"
 1568 mutex_assert2:
 1569                         STRINGD "mutex (0x%08X) owned\n\000"
 1570                         .text
 1571                         
 1572                         
 1573 /*
 1574  *              void lck_mtx_ilk_unlock(lck_mtx *lock)
 1575  */
 1576                         .globl  EXT(lck_mtx_ilk_unlock)
 1577 LEXT(lck_mtx_ilk_unlock)
 1578 
 1579                         lwz             r10,MUTEX_DATA(r3)
 1580                         rlwinm  r10,r10,0,0,30
 1581                         eieio
 1582                         stw             r10,MUTEX_DATA(r3)
 1583 
 1584                         b               epStart                                                 ; Go enable preemption...
 1585 
 1586 /*              
 1587  *              void _enable_preemption_no_check(void)
 1588  *
 1589  *                      This version does not check if we get preempted or not
 1590  */
 1591                         .align  4
 1592                         .globl  EXT(_enable_preemption_no_check)
 1593 
 1594 LEXT(_enable_preemption_no_check)
 1595 
 1596                         cmplw   cr1,r1,r1                                               ; Force zero cr so we know not to check if preempted
 1597                         b               epCommn                                                 ; Join up with the other enable code... 
 1598 
 1599 /*              
 1600  *              void _enable_preemption(void)
 1601  *
 1602  *                      This version checks if we get preempted or not
 1603  */
 1604                         .align  5
 1605                         .globl  EXT(_enable_preemption)
 1606 
 1607 LEXT(_enable_preemption)
 1608 
 1609 ;               Here is where we enable preemption.
 1610 
 1611 epStart:
 1612                         cmplwi  cr1,r1,0                                                ; Force non-zero cr so we know to check if preempted
 1613 
 1614 epCommn:
 1615                         mfsprg  r3,1                                                    ; Get current activation
 1616                         li              r8,-1                                                   ; Get a decrementer
 1617                         lwz             r5,ACT_PREEMPT_CNT(r3)                  ; Get the preemption level
 1618                         add.    r5,r5,r8                                                ; Bring down the disable count
 1619                         blt-    epTooFar                                                ; Yeah, we did...
 1620                         stw             r5,ACT_PREEMPT_CNT(r3)                  ; Save it back
 1621                         crandc  cr0_eq,cr0_eq,cr1_eq
 1622                         beq+    epCheckPreempt                                  ; Go check if we need to be preempted...
 1623                         blr                                                                             ; Leave...
 1624 epTooFar:       
 1625                         mr              r4,r5
 1626                         lis             r3,hi16(epTooFarStr)                    ; First half of panic string
 1627                         ori             r3,r3,lo16(epTooFarStr)                 ; Second half of panic string
 1628                         PROLOG(0)
 1629                         bl              EXT(panic)
 1630                         BREAKPOINT_TRAP                                                 ; We die here anyway
 1631 
 1632                         .data
 1633 epTooFarStr:
 1634                         STRINGD "enable_preemption: preemption_level %d\n\000"
 1635 
 1636                         .text
 1637                         .align  5
 1638 epCheckPreempt:
 1639                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1640                         mfmsr   r9                                                              ; Get the MSR value
 1641                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1642                         andi.   r4,r9,lo16(MASK(MSR_EE))                ; We cannot preempt if interruptions are off
 1643                         beq+    epCPno                                                  ; No preemption here...
 1644                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1645                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1646                         andc    r7,r9,r7                                                ; Clear EE as well
 1647                         mtmsr   r7                                                              ; Turn off interruptions 
 1648                         isync                                                                   ; May have turned off vec and fp here 
 1649                         lwz             r3,ACT_PER_PROC(r3)                             ; Get the per_proc block
 1650                         lwz             r7,PP_PENDING_AST(r3)                   ; Get pending AST mask
 1651                         li              r5,AST_URGENT                                   ; Get the requests we do honor
 1652                         lis             r0,hi16(DoPreemptCall)                  ; Just in case, get the top of firmware call
 1653                         and.    r7,r7,r5                                                ; Should we preempt?
 1654                         ori             r0,r0,lo16(DoPreemptCall)               ; Merge in bottom part
 1655                         mtmsr   r9                                                              ; Allow interrupts if we can
 1656 epCPno:         
 1657                         beqlr+                                                                  ; We probably will not preempt...
 1658                         sc                                                                              ; Do the preemption
 1659                         blr                                                                             ; Now, go away now...
 1660 
 1661 /*
 1662  *              void disable_preemption(void)
 1663  *
 1664  *                      Here is where we disable preemption.
 1665  */
 1666                         .align  5
 1667                         .globl  EXT(_disable_preemption)
 1668 
 1669 LEXT(_disable_preemption)
 1670 
 1671                         mfsprg  r6,1                                                    ; Get the current activation
 1672                         lwz             r5,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
 1673                         addi    r5,r5,1                                                 ; Bring up the disable count
 1674                         stw             r5,ACT_PREEMPT_CNT(r6)                  ; Save it back 
 1675                         blr                                                                             ; Return...
 1676 
 1677 /*
 1678  *              int get_preemption_level(void)
 1679  *
 1680  *                      Return the current preemption level
 1681  */
 1682                         .align  5
 1683                         .globl  EXT(get_preemption_level)
 1684 
 1685 LEXT(get_preemption_level)
 1686  
 1687                         mfsprg  r6,1                                                    ; Get current activation
 1688                         lwz             r3,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
 1689                         blr                                                                             ; Return...
 1690 
 1691 /*
 1692  *              void ppc_usimple_lock_init(simple_lock_t, etap_event_t)
 1693  *
 1694  *                      Initialize a simple lock.
 1695  */
 1696                         .align  5
 1697                         .globl  EXT(ppc_usimple_lock_init)
 1698 
 1699 LEXT(ppc_usimple_lock_init)
 1700 
 1701                         li      r0,     0                                                               ; set lock to free == 0 
 1702                         stw     r0,     0(r3)                                                   ; Initialize the lock 
 1703                         blr
 1704         
 1705 /*
 1706  *              void lck_spin_lock(lck_spin_t *)
 1707  *              void ppc_usimple_lock(simple_lock_t *)
 1708  *
 1709  */
 1710                         .align  5
 1711                         .globl  EXT(lck_spin_lock)
 1712 LEXT(lck_spin_lock)
 1713                         .globl  EXT(ppc_usimple_lock)
 1714 LEXT(ppc_usimple_lock)
 1715 
 1716                         mfsprg  r6,1                                                    ; Get the current activation 
 1717                         lwz             r5,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
 1718                         addi    r5,r5,1                                                 ; Bring up the disable count
 1719                         stw             r5,ACT_PREEMPT_CNT(r6)                  ; Save it back 
 1720                         mr              r5,r3                                                   ; Get the address of the lock
 1721                         li              r8,0                                                    ; Set r8 to zero
 1722                         li              r4,0                                                    ; Set r4 to zero
 1723 
 1724 slcktry:        lwarx   r11,SLOCK_ILK,r5                                ; Grab the lock value
 1725                         andi.   r3,r11,ILK_LOCKED                               ; Is it locked?
 1726                         ori             r11,r6,ILK_LOCKED                               ; Set interlock 
 1727                         bne--   slckspin                                                ; Yeah, wait for it to clear...
 1728                         stwcx.  r11,SLOCK_ILK,r5                                ; Try to seize that there durn lock
 1729                         bne--   slcktry                                                 ; Couldn't get it...
 1730                         .globl  EXT(slckPatch_isync)
 1731 LEXT(slckPatch_isync)
 1732                         isync                                                                   ; Make sure we don't use a speculativily loaded value
 1733                         blr                                                                             ; Go on home...
 1734 
 1735 slckspin:       li              r11,lgKillResv                                  ; Killing field
 1736                         stwcx.  r11,0,r11                                               ; Kill reservation
 1737 
 1738                         mr.             r4,r4                                                   ; Test timeout value
 1739                         bne++   slockspin0
 1740                         lis             r4,hi16(EXT(LockTimeOut))               ; Get the high part 
 1741                         ori             r4,r4,lo16(EXT(LockTimeOut))    ; And the low part
 1742                         lwz             r4,0(r4)                                                ; Get the timerout value
 1743 
 1744 slockspin0:     mr.             r8,r8                                                   ; Is r8 set to zero
 1745                         bne++   slockspin1                                              ; If yes, first spin attempt
 1746                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1747                         mfmsr   r9                                                              ; Get the MSR value
 1748                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1749                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1750                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1751                         andc    r7,r9,r7                                                ; Clear EE as well
 1752                         mtmsr   r7                                                              ; Turn off interruptions 
 1753                         isync                                                                   ; May have turned off vec and fp here 
 1754                         mftb    r8                                                              ; Get timestamp on entry
 1755                         b               slcksniff
 1756 
 1757 slockspin1:     mtmsr   r7                                                              ; Turn off interruptions 
 1758                         mftb    r8                                                              ; Get timestamp on entry
 1759 
 1760 slcksniff:      lwz             r3,SLOCK_ILK(r5)                                ; Get that lock in here
 1761                         andi.   r3,r3,ILK_LOCKED                                ; Is it free yet?
 1762                         beq++   slckretry                                               ; Yeah, try for it again...
 1763                         
 1764                         mftb    r10                                                             ; Time stamp us now
 1765                         sub             r10,r10,r8                                              ; Get the elapsed time
 1766                         cmplwi  r10,128                                                 ; Have we been spinning for 128 tb ticks?
 1767                         blt++   slcksniff                                               ; Not yet...
 1768                         
 1769                         mtmsr   r9                                                              ; Say, any interrupts pending?
 1770 
 1771 ;                       The following instructions force the pipeline to be interlocked to that only one
 1772 ;                       instruction is issued per cycle.  The insures that we stay enabled for a long enough
 1773 ;                       time; if it's too short, pending interruptions will not have a chance to be taken
 1774 
 1775                         subi    r4,r4,128                                               ; Back off elapsed time from timeout value
 1776                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
 1777                         mr.             r4,r4                                                   ; See if we used the whole timeout
 1778                         li              r3,0                                                    ; Assume a timeout return code
 1779                         or              r4,r4,r4                                                ; Do nothing here but force a single cycle delay
 1780                         
 1781                         ble--   slckfail                                                ; We failed
 1782                         b               slockspin1                                              ; Now that we've opened an enable window, keep trying...
 1783 slckretry:
 1784                         mtmsr   r9                                                              ; Restore interrupt state
 1785                         li              r8,1                                                    ; Show already through once
 1786                         b               slcktry
 1787 slckfail:                                                                                       ; We couldn't get the lock
 1788                         lis             r3,hi16(slckpanic_str)
 1789                         ori             r3,r3,lo16(slckpanic_str)
 1790                         mr              r4,r5
 1791                         mflr    r5
 1792                         PROLOG(0)
 1793                         bl              EXT(panic)
 1794                         BREAKPOINT_TRAP                                                 ; We die here anyway
 1795 
 1796                 .data
 1797 slckpanic_str:
 1798                 STRINGD "simple lock (0x%08X) deadlock detection, pc=0x%08X\n\000"
 1799                 .text
 1800 
 1801 /*
 1802  *              boolean_t lck_spin_try_lock(lck_spin_t *)
 1803  *              unsigned int ppc_usimple_lock_try(simple_lock_t *)
 1804  *
 1805  */
 1806                         .align  5
 1807                         .globl  EXT(lck_spin_try_lock)
 1808 LEXT(lck_spin_try_lock)
 1809                         .globl  EXT(ppc_usimple_lock_try)
 1810 LEXT(ppc_usimple_lock_try)
 1811 
 1812                         lis             r0,hi16(MASK(MSR_VEC))                  ; Get vector enable
 1813                         mfmsr   r9                                                              ; Get the MSR value 
 1814                         ori             r0,r0,lo16(MASK(MSR_FP))                ; Get FP enable
 1815                         ori             r7,r0,lo16(MASK(MSR_EE))                ; Get EE bit on too
 1816                         andc    r9,r9,r0                                                ; Clear FP and VEC
 1817                         andc    r7,r9,r7                                                ; Clear EE as well
 1818                         mtmsr   r7                                                              ; Disable interruptions and thus, preemption
 1819                         mfsprg  r6,1                                                    ; Get current activation 
 1820 
 1821                         lwz             r11,SLOCK_ILK(r3)                               ; Get the lock
 1822                         andi.   r5,r11,ILK_LOCKED                               ; Check it...
 1823                         bne--   slcktryfail                                             ; Quickly fail...
 1824 
 1825 slcktryloop:    
 1826                         lwarx   r11,SLOCK_ILK,r3                                ; Ld from addr of arg and reserve
 1827 
 1828                         andi.   r5,r11,ILK_LOCKED                               ; TEST...
 1829                         ori             r5,r6,ILK_LOCKED
 1830                         bne--   slcktryfailX                                    ; branch if taken. Predict free 
 1831         
 1832                         stwcx.  r5,SLOCK_ILK,r3                                 ; And SET (if still reserved)
 1833                         bne--   slcktryloop                                             ; If set failed, loop back 
 1834                         
 1835                         .globl  EXT(stlckPatch_isync)
 1836 LEXT(stlckPatch_isync)
 1837                         isync
 1838 
 1839                         lwz             r5,ACT_PREEMPT_CNT(r6)                  ; Get the preemption level
 1840                         addi    r5,r5,1                                                 ; Bring up the disable count 
 1841                         stw             r5,ACT_PREEMPT_CNT(r6)                  ; Save it back
 1842 
 1843                         mtmsr   r9                                                              ; Allow interruptions now 
 1844                         li              r3,1                                                    ; Set that the lock was free 
 1845                         blr
 1846 
 1847 slcktryfailX:
 1848                         li              r5,lgKillResv                                   ; Killing field
 1849                         stwcx.  r5,0,r5                                                 ; Kill reservation
 1850 
 1851 slcktryfail:
 1852                         mtmsr   r9                                                              ; Allow interruptions now 
 1853                         li              r3,0                                                    ; FAILURE - lock was taken 
 1854                         blr
 1855 
 1856 
 1857 /*
 1858  *              void lck_spin_unlock(lck_spin_t *)
 1859  *              void ppc_usimple_unlock_rwcmb(simple_lock_t *)
 1860  *
 1861  */
 1862                         .align  5
 1863                         .globl  EXT(lck_spin_unlock)
 1864 LEXT(lck_spin_unlock)
 1865                         .globl  EXT(ppc_usimple_unlock_rwcmb)
 1866 LEXT(ppc_usimple_unlock_rwcmb)
 1867 
 1868                         li              r0,0
 1869                         .globl  EXT(sulckPatch_isync)
 1870 LEXT(sulckPatch_isync)
 1871                         isync
 1872                         .globl  EXT(sulckPatch_eieio)
 1873 LEXT(sulckPatch_eieio)
 1874                         eieio
 1875                         stw             r0, SLOCK_ILK(r3)
 1876 
 1877                         b               epStart                                                 ; Go enable preemption...
 1878 
 1879 /*
 1880  *              void ppc_usimple_unlock_rwmb(simple_lock_t *)
 1881  *
 1882  */
 1883                         .align  5
 1884                         .globl  EXT(ppc_usimple_unlock_rwmb)
 1885 
 1886 LEXT(ppc_usimple_unlock_rwmb)
 1887 
 1888                         li              r0,0
 1889                         sync
 1890                         stw             r0, SLOCK_ILK(r3)
 1891 
 1892                         b               epStart                                                 ; Go enable preemption...
 1893 
 1894 /*
 1895  *              void enter_funnel_section(funnel_t *)
 1896  *
 1897  */
 1898                         .align  5
 1899                         .globl  EXT(enter_funnel_section)
 1900 
 1901 LEXT(enter_funnel_section)
 1902 
 1903 #if     !MACH_LDEBUG
 1904                         lis             r10,hi16(EXT(kdebug_enable))
 1905                         ori             r10,r10,lo16(EXT(kdebug_enable))
 1906                         lwz             r10,0(r10)
 1907                         lis             r11,hi16(EXT(split_funnel_off))
 1908                         ori             r11,r11,lo16(EXT(split_funnel_off))
 1909                         lwz             r11,0(r11)
 1910                         or.             r10,r11,r10                                             ; Check kdebug_enable or split_funnel_off
 1911                         bne-    L_enter_funnel_section_slow             ; If set, call the slow path
 1912                         mfsprg  r6,1                                                    ; Get the current activation
 1913                         lwz             r7,LOCK_FNL_MUTEX(r3)
 1914 
 1915                         lwz             r5,0(r7)                                                ; Get lock quickly
 1916                         mr.             r5,r5                                                   ; Locked?
 1917                         bne--   L_enter_funnel_section_slow             ; Yup...
 1918 
 1919 L_enter_funnel_section_loop:
 1920                         lwarx   r5,0,r7                                                 ; Load the mutex lock
 1921                         mr.             r5,r5
 1922                         bne--   L_enter_funnel_section_slowX    ; Go to the slow path
 1923                         stwcx.  r6,0,r7                                                 ; Grab the lock
 1924                         bne--   L_enter_funnel_section_loop             ; Loop back if failed
 1925                         .globl  EXT(entfsectPatch_isync)     
 1926 LEXT(entfsectPatch_isync)
 1927                         isync                                                                   ; Stop prefeteching
 1928                         li              r7,TH_FN_OWNED
 1929                         stw             r3,THREAD_FUNNEL_LOCK(r6)               ; Set the funnel lock reference
 1930                         stw             r7,THREAD_FUNNEL_STATE(r6)              ; Set the funnel state
 1931                         blr
 1932 
 1933 L_enter_funnel_section_slowX:
 1934                         li              r4,lgKillResv                                   ; Killing field
 1935                         stwcx.  r4,0,r4                                                 ; Kill reservation
 1936 
 1937 L_enter_funnel_section_slow:
 1938 #endif
 1939                         li              r4,TRUE
 1940                         b               EXT(thread_funnel_set)
 1941 
 1942 /*
 1943  *              void exit_funnel_section(void)
 1944  *
 1945  */
 1946                         .align  5
 1947                         .globl  EXT(exit_funnel_section)
 1948 
 1949 LEXT(exit_funnel_section)
 1950 
 1951                         mfsprg  r6,1                                                    ; Get the current activation
 1952                         lwz             r3,THREAD_FUNNEL_LOCK(r6)               ; Get the funnel lock
 1953                         mr.             r3,r3                                                   ; Check on funnel held
 1954                         beq-    L_exit_funnel_section_ret               ; 
 1955 #if     !MACH_LDEBUG
 1956                         lis             r10,hi16(EXT(kdebug_enable))
 1957                         ori             r10,r10,lo16(EXT(kdebug_enable))
 1958                         lwz             r10,0(r10)
 1959                         mr.             r10,r10
 1960                         bne-    L_exit_funnel_section_slow              ; If set, call the slow path
 1961                         lwz             r7,LOCK_FNL_MUTEX(r3)                   ; Get the funnel mutex lock
 1962                         .globl  EXT(retfsectPatch_isync)     
 1963 LEXT(retfsectPatch_isync)
 1964                         isync
 1965                         .globl  EXT(retfsectPatch_eieio)     
 1966 LEXT(retfsectPatch_eieio)
 1967                         eieio
 1968 
 1969                         lwz             r5,0(r7)                                                ; Get lock
 1970                         rlwinm. r4,r5,0,30,31                                   ; Quick check for bail if pending waiter or interlock set 
 1971                         bne--   L_exit_funnel_section_slow              ; No can get...
 1972 
 1973 L_exit_funnel_section_loop:
 1974                         lwarx   r5,0,r7
 1975                         rlwinm. r4,r5,0,30,31                                   ; Bail if pending waiter or interlock set 
 1976                         li              r5,0                                                    ; Clear the mutexlock 
 1977                         bne--   L_exit_funnel_section_slowX
 1978                         stwcx.  r5,0,r7                                                 ; Release the funnel mutexlock
 1979                         bne--   L_exit_funnel_section_loop
 1980                         li              r7,0
 1981                         stw             r7,THREAD_FUNNEL_STATE(r6)              ; Clear the funnel state
 1982                         stw             r7,THREAD_FUNNEL_LOCK(r6)               ; Clear the funnel lock reference
 1983                         blr                                                                             ; Return
 1984 
 1985 L_exit_funnel_section_slowX:
 1986                         li              r4,lgKillResv                                   ; Killing field
 1987                         stwcx.  r4,0,r4                                                 ; Kill it
 1988 
 1989 L_exit_funnel_section_slow:
 1990 #endif
 1991                         li              r4,FALSE
 1992                         b               EXT(thread_funnel_set)
 1993 L_exit_funnel_section_ret:
 1994                         blr
 1995 
 1996 /*
 1997  *              void lck_rw_lock_exclusive(lck_rw_t*)
 1998  *
 1999  */
 2000                         .align  5
 2001                         .globl  EXT(lck_rw_lock_exclusive)
 2002 LEXT(lck_rw_lock_exclusive)
 2003 #if     !MACH_LDEBUG
 2004                         .globl  EXT(lock_write)
 2005 LEXT(lock_write)
 2006 #endif
 2007 rwleloop:       lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2008                         rlwinm. r7,r5,30,1,31                                   ; Can we have it?
 2009                         ori             r6,r5,WANT_EXCL                                 ; Mark Exclusive
 2010                         bne--   rwlespin                                                ; Branch if cannot be held
 2011                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2012                         bne--   rwleloop
 2013                         .globl  EXT(rwlePatch_isync)
 2014 LEXT(rwlePatch_isync)
 2015                         isync
 2016                         blr
 2017 rwlespin:
 2018                         li              r4,lgKillResv                                   ; Killing field
 2019                         stwcx.  r4,0,r4                                                 ; Kill it
 2020                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2021                         bne--   rwlespin1                                               ; No, go handle contention 
 2022                         mr              r4,r3                                                   ; pass lock pointer
 2023                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2024                         b               EXT(lck_rw_lock_exclusive_ext)
 2025 rwlespin1:
 2026                         b               EXT(lck_rw_lock_exclusive_gen)
 2027 
 2028 /*
 2029  *              void lck_rw_lock_shared(lck_rw_t*)
 2030  *
 2031  */
 2032                         .align  5
 2033                         .globl  EXT(lck_rw_lock_shared)
 2034 LEXT(lck_rw_lock_shared)
 2035 #if     !MACH_LDEBUG
 2036                         .globl  EXT(lock_read)
 2037 LEXT(lock_read)
 2038 #endif
 2039 rwlsloop:       lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2040                         andi.   r7,r5,WANT_EXCL|WANT_UPGRADE|ILK_LOCKED ; Can we have it?
 2041                         addis   r6,r5,1                                                 ; Increment read cnt
 2042                         bne--   rwlsspin                                                ; Branch if cannot be held
 2043                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2044                         bne--   rwlsloop
 2045                         .globl  EXT(rwlsPatch_isync)
 2046 LEXT(rwlsPatch_isync)
 2047                         isync
 2048                         blr
 2049 rwlsspin:
 2050                         li              r4,lgKillResv                                   ; Killing field
 2051                         stwcx.  r4,0,r4                                                 ; Kill it
 2052                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2053                         bne--   rwlsspin1                                               ; No, go handle contention 
 2054                         mr              r4,r3                                                   ; pass lock pointer
 2055                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2056                         b               EXT(lck_rw_lock_shared_ext)
 2057 rwlsspin1:
 2058                         b               EXT(lck_rw_lock_shared_gen)
 2059 
 2060 /*
 2061  *              boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t*)
 2062  *
 2063  */
 2064                         .align  5
 2065                         .globl  EXT(lck_rw_lock_shared_to_exclusive)
 2066 LEXT(lck_rw_lock_shared_to_exclusive)
 2067 #if     !MACH_LDEBUG
 2068                         .globl  EXT(lock_read_to_write)
 2069 LEXT(lock_read_to_write)
 2070 #endif
 2071 rwlseloop:      lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2072                         addis   r6,r5,0xFFFF                                    ; Decrement read cnt
 2073                         lis             r8,0xFFFF                                               ; Get read count mask
 2074                         ori             r8,r8,WANT_UPGRADE|ILK_LOCKED   ; Include Interlock and upgrade flags
 2075                         and.    r7,r6,r8                                                ; Can we have it?
 2076                         ori             r9,r6,WANT_UPGRADE                              ; Mark Exclusive
 2077                         bne--   rwlsespin                                               ; Branch if cannot be held
 2078                         stwcx.  r9,RW_DATA,r3                                   ; Update lock word
 2079                         bne--   rwlseloop
 2080                         .globl  EXT(rwlsePatch_isync)
 2081 LEXT(rwlsePatch_isync)
 2082                         isync
 2083                         li              r3,0                                                    ; Succeed, return FALSE...
 2084                         blr
 2085 rwlsespin:
 2086                         li              r4,lgKillResv                                   ; Killing field
 2087                         stwcx.  r4,0,r4                                                 ; Kill it
 2088                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2089                         bne--   rwlsespin1                                              ; No, go handle contention 
 2090                         mr              r4,r3                                                   ; pass lock pointer
 2091                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2092                         b               EXT(lck_rw_lock_shared_to_exclusive_ext)
 2093 rwlsespin1:
 2094                         b               EXT(lck_rw_lock_shared_to_exclusive_gen)
 2095 
 2096 
 2097 
 2098 /*
 2099  *              void lck_rw_lock_exclusive_to_shared(lck_rw_t*)
 2100  *
 2101  */
 2102                         .align  5
 2103                         .globl  EXT(lck_rw_lock_exclusive_to_shared)
 2104 LEXT(lck_rw_lock_exclusive_to_shared)
 2105 #if     !MACH_LDEBUG
 2106                         .globl  EXT(lock_write_to_read)
 2107 LEXT(lock_write_to_read)
 2108 #endif
 2109                         .globl  EXT(rwlesPatch_isync)
 2110 LEXT(rwlesPatch_isync)
 2111                         isync
 2112                         .globl  EXT(rwlesPatch_eieio)
 2113 LEXT(rwlesPatch_eieio)
 2114                         eieio
 2115 rwlesloop:      lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2116                         andi.   r7,r5,ILK_LOCKED                                ; Test interlock flag
 2117                         bne--   rwlesspin                                               ; Branch if interlocked
 2118                         lis             r6,1                                                    ; Get 1 for read count
 2119                         andi.   r10,r5,WANT_UPGRADE                             ; Is it held with upgrade
 2120                         li              r9,WANT_UPGRADE|WAIT_FLAG               ; Get upgrade and wait flags mask
 2121                         bne             rwlesexcl1                                              ; Skip if held with upgrade
 2122                         li              r9,WANT_EXCL|WAIT_FLAG                  ; Get exclusive and wait flags mask
 2123 rwlesexcl1:
 2124                         andc    r7,r5,r9                                                ; Marked free
 2125                         rlwimi  r6,r7,0,16,31                                   ; Set shared cnt to one
 2126                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2127                         bne--   rwlesloop
 2128                         andi.   r7,r5,WAIT_FLAG                                 ; Test wait flag
 2129                         beqlr++                                                                 ; Return of no waiters
 2130                         addi    r3,r3,RW_EVENT                                  ; Get lock event address
 2131                         b               EXT(thread_wakeup)                              ; wakeup waiters
 2132 rwlesspin:
 2133                         li              r4,lgKillResv                                   ; Killing field
 2134                         stwcx.  r4,0,r4                                                 ; Kill it
 2135                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2136                         bne--   rwlesspin1                                              ; No, go handle contention 
 2137                         mr              r4,r3                                                   ; pass lock pointer
 2138                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2139                         b               EXT(lck_rw_lock_exclusive_to_shared_ext)
 2140 rwlesspin1:
 2141                         b               EXT(lck_rw_lock_exclusive_to_shared_gen)
 2142 
 2143 
 2144 
 2145 /*
 2146  *              boolean_t lck_rw_try_lock_exclusive(lck_rw_t*)
 2147  *
 2148  */
 2149                         .align  5
 2150                         .globl  EXT(lck_rw_try_lock_exclusive)
 2151 LEXT(lck_rw_try_lock_exclusive)
 2152                         lis             r10,0xFFFF                                              ; Load read count mask
 2153                         ori             r10,r10,WANT_EXCL|WANT_UPGRADE  ; Include exclusive and upgrade flags
 2154 rwtleloop:      lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2155                         andi.   r7,r5,ILK_LOCKED                                ; Test interlock flag
 2156                         bne--   rwtlespin                                               ; Branch if interlocked
 2157                         and.    r7,r5,r10                                               ; Can we have it
 2158                         ori             r6,r5,WANT_EXCL                                 ; Mark Exclusive
 2159                         bne--   rwtlefail                                               ; 
 2160                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2161                         bne--   rwtleloop
 2162                         .globl  EXT(rwtlePatch_isync)
 2163 LEXT(rwtlePatch_isync)
 2164                         isync
 2165                         li              r3,1                                                    ; Return TRUE
 2166                         blr
 2167 rwtlefail:
 2168                         li              r4,lgKillResv                                   ; Killing field
 2169                         stwcx.  r4,0,r4                                                 ; Kill it
 2170                         li              r3,0                                                    ; Return FALSE
 2171                         blr
 2172 rwtlespin:
 2173                         li              r4,lgKillResv                                   ; Killing field
 2174                         stwcx.  r4,0,r4                                                 ; Kill it
 2175                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2176                         bne--   rwtlespin1                                              ; No, go handle contention 
 2177                         mr              r4,r3                                                   ; pass lock pointer
 2178                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2179                         b               EXT(lck_rw_try_lock_exclusive_ext)
 2180 rwtlespin1:
 2181                         b               EXT(lck_rw_try_lock_exclusive_gen)
 2182 
 2183 
 2184 /*
 2185  *              boolean_t lck_rw_try_lock_shared(lck_rw_t*)
 2186  *
 2187  */
 2188                         .align  5
 2189                         .globl  EXT(lck_rw_try_lock_shared)
 2190 LEXT(lck_rw_try_lock_shared)
 2191 rwtlsloop:      lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2192                         andi.   r7,r5,ILK_LOCKED                                ; Test interlock flag
 2193                         bne--   rwtlsspin                                               ; Branch if interlocked
 2194                         andi.   r7,r5,WANT_EXCL|WANT_UPGRADE    ; So, can we have it?
 2195                         addis   r6,r5,1                                                 ; Increment read cnt
 2196                         bne--   rwtlsfail                                               ; Branch if held exclusive
 2197                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2198                         bne--   rwtlsloop
 2199                         .globl  EXT(rwtlsPatch_isync)
 2200 LEXT(rwtlsPatch_isync)
 2201                         isync
 2202                         li              r3,1                                                    ; Return TRUE
 2203                         blr
 2204 rwtlsfail:
 2205                         li              r3,0                                                    ; Return FALSE
 2206                         blr
 2207 rwtlsspin:
 2208                         li              r4,lgKillResv                                   ; Killing field
 2209                         stwcx.  r4,0,r4                                                 ; Kill it
 2210                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2211                         bne--   rwtlsspin1                                              ; No, go handle contention 
 2212                         mr              r4,r3                                                   ; pass lock pointer
 2213                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2214                         b               EXT(lck_rw_try_lock_shared_ext)
 2215 rwtlsspin1:
 2216                         b               EXT(lck_rw_try_lock_shared_gen)
 2217 
 2218 
 2219 
 2220 /*
 2221  *              lck_rw_type_t lck_rw_done(lck_rw_t*)
 2222  *
 2223  */
 2224                         .align  5
 2225                         .globl  EXT(lck_rw_done)
 2226 LEXT(lck_rw_done)
 2227 #if     !MACH_LDEBUG
 2228                         .globl  EXT(lock_done)
 2229 LEXT(lock_done)
 2230 #endif
 2231                         .globl  EXT(rwldPatch_isync)
 2232 LEXT(rwldPatch_isync)
 2233                         isync
 2234                         .globl  EXT(rwldPatch_eieio)
 2235 LEXT(rwldPatch_eieio)
 2236                         eieio
 2237                         li              r10,WAIT_FLAG                                   ; Get wait flag
 2238                         lis             r7,0xFFFF                                               ; Get read cnt mask
 2239                         mr              r12,r3                                                  ; Save lock addr
 2240 rwldloop:       lwarx   r5,RW_DATA,r3                                   ; Grab the lock value
 2241                         andi.   r8,r5,ILK_LOCKED                                ; Test interlock flag
 2242                         bne--   rwldspin                                                ; Branch if interlocked
 2243                         and.    r8,r5,r7                                                ; Is it shared
 2244                         cmpi    cr1,r8,0                                                ; Is it shared
 2245                         beq             cr1,rwldexcl                                    ; No, check exclusive
 2246                         li              r11,RW_SHARED                                   ; Set return value
 2247                         addis   r6,r5,0xFFFF                                    ; Decrement read count
 2248                         and.    r8,r6,r7                                                ; Is it still shared
 2249                         li              r8,0                                                    ; Assume no wakeup
 2250                         bne             rwldshared1                                             ; Skip if still held shared
 2251                         and             r8,r6,r10                                               ; Extract wait flag
 2252                         andc    r6,r6,r10                                               ; Clear wait flag
 2253 rwldshared1:
 2254                         b               rwldstore
 2255 rwldexcl:
 2256                         li              r11,RW_EXCL                                             ; Set return value
 2257                         li              r9,WANT_UPGRADE                                 ; Get upgrade flag
 2258                         and.    r6,r5,r9                                                ; Is it held with upgrade
 2259                         li              r9,WANT_UPGRADE|WAIT_FLAG               ; Mask upgrade abd wait flags
 2260                         bne             rwldexcl1                                               ; Skip if held with upgrade
 2261                         li              r9,WANT_EXCL|WAIT_FLAG                  ; Mask exclusive and wait flags
 2262 rwldexcl1:
 2263                         andc    r6,r5,r9                                                ; Marked free
 2264                         and             r8,r5,r10                                               ; Null if no waiter
 2265 rwldstore:
 2266                         stwcx.  r6,RW_DATA,r3                                   ; Update lock word
 2267                         bne--   rwldloop
 2268                         mr.             r8,r8                                                   ; wakeup needed?
 2269                         mr              r3,r11                                                  ; Return lock held type
 2270                         beqlr++
 2271                         mr              r3,r12                                                  ; Restore lock address
 2272                         PROLOG(0)
 2273                         addi    r3,r3,RW_EVENT                                  ; Get lock event address
 2274                         bl              EXT(thread_wakeup)                              ; wakeup threads
 2275                         lwz             r2,(FM_ALIGN(0)+FM_SIZE+FM_CR_SAVE)(r1)
 2276                         mtcr    r2
 2277                         EPILOG
 2278                         li              r3,RW_SHARED                                    ; Assume lock type shared
 2279                         bne             cr1,rwldret                                             ; Branch if was held exclusive
 2280                         li              r3,RW_EXCL                                              ; Return lock type exclusive
 2281 rwldret:
 2282                         blr
 2283 rwldspin:
 2284                         li              r4,lgKillResv                                   ; Killing field
 2285                         stwcx.  r4,0,r4                                                 ; Kill it
 2286                         cmpli   cr0,r5,RW_IND                                   ; Is it a lock indirect 
 2287                         bne--   rwldspin1                                               ; No, go handle contention 
 2288                         mr              r4,r3                                                   ; pass lock pointer
 2289                         lwz             r3,RW_PTR(r3)                                   ; load lock ext pointer
 2290                         b               EXT(lck_rw_done_ext)
 2291 rwldspin1:
 2292                         b               EXT(lck_rw_done_gen)
 2293 
 2294 /*
 2295  *              void lck_rw_ilk_lock(lck_rw_t *lock)
 2296  */
 2297                         .globl  EXT(lck_rw_ilk_lock)
 2298 LEXT(lck_rw_ilk_lock)
 2299                         crclr   hwtimeout                                               ; no timeout option
 2300                         li              r4,0                                                    ; request default timeout value
 2301                         li              r12,ILK_LOCKED                                  ; Load bit mask
 2302                         b               lckcomm                                                 ; Join on up...
 2303 
 2304 /*
 2305  *              void lck_rw_ilk_unlock(lck_rw_t *lock)
 2306  */
 2307                         .globl  EXT(lck_rw_ilk_unlock)
 2308 LEXT(lck_rw_ilk_unlock)
 2309                         li              r4,1
 2310                         b               EXT(hw_unlock_bit)

Cache object: ce1e509e7ef8b84ecbd957aad8c93f5e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.