The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sqt/mutex.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1991 Carnegie Mellon University
    4  * Copyright (c) 1991 Sequent Computer Systems
    5  * All Rights Reserved.
    6  * 
    7  * Permission to use, copy, modify and distribute this software and its
    8  * documentation is hereby granted, provided that both the copyright
    9  * notice and this permission notice appear in all copies of the
   10  * software, derivative works or modified versions, and any portions
   11  * thereof, and that both notices appear in supporting documentation.
   12  * 
   13  * CARNEGIE MELLON AND SEQUENT COMPUTER SYSTEMS ALLOW FREE USE OF
   14  * THIS SOFTWARE IN ITS "AS IS" CONDITION.  CARNEGIE MELLON AND
   15  * SEQUENT COMPUTER SYSTEMS DISCLAIM ANY LIABILITY OF ANY KIND FOR
   16  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   17  * 
   18  * Carnegie Mellon requests users of this software to return to
   19  * 
   20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   21  *  School of Computer Science
   22  *  Carnegie Mellon University
   23  *  Pittsburgh PA 15213-3890
   24  * 
   25  * any improvements or extensions that they make and grant Carnegie Mellon 
   26  * the rights to redistribute these changes.
   27  */
   28 
   29 /*
   30  * HISTORY
   31  * $Log:        mutex.s,v $
   32  * Revision 2.3  91/07/31  18:03:23  dbg
   33  *      Changed copyright.
   34  *      [91/07/31            dbg]
   35  * 
   36  * Revision 2.2  91/05/08  12:58:26  dbg
   37  *      Use entire word for lock to match lock primitives.
   38  *      [91/02/05            dbg]
   39  * 
   40  *      Put parentheses around substituted immediate expressions, so
   41  *      that they will pass through the GNU preprocessor.
   42  *      [91/01/31            dbg]
   43  * 
   44  *      Adapted for pure kernel.
   45  *      [90/09/24            dbg]
   46  * 
   47  */
   48 
   49 /*
   50  * mutex.s
   51  *
   52  * Implements: simple locks, interlocked bit set
   53  * and clear, and bit locks.  Should eventually be moved to inline asm
   54  *
   55  * This code assumes that the caller uses spl to implement
   56  * interrupt mutex as needed.
   57  */
   58 
   59 #include <machine/asm.h>
   60 #include <assym.s>
   61 #include <sqt/asm_macros.h>
   62 #include <sqt/intctl.h>
   63 
   64 /*
   65  * Simple locks occupy one word.  A value of zero is unlocked,
   66  * a value of one is locked.
   67  */
   68 
   69 #define L_LOCKED        1
   70 #define L_UNLOCKED      0
   71 #define CPLOCKFAIL      (-1)            /* invalid SPL value */
   72 
   73  /*
   74   * simple_lock_init(int *lock) - set the lock to the unlocked state
   75   */
   76 
   77 ENTRY(simple_lock_init)
   78         movl    S_ARG0, %ecx            / lock address
   79         movl    $(L_UNLOCKED), (%ecx)   / unlock
   80         ret
   81 
   82  /*
   83   * simple_unlock(int *lock) - release a lock
   84   */
   85 
   86 ENTRY(simple_unlock)
   87 #ifdef  SIMPLE_LOCK_LOG
   88         pushl   S_ARG0
   89         pushl   $0
   90         call    _lock_log
   91         addl    $8, %esp
   92 #endif  SIMPLE_LOCK_LOG
   93         movl    S_ARG0, %ecx            / lock address
   94         movl    $(L_UNLOCKED), %eax     / exchange value
   95         xchgl   %eax, (%ecx)            / exchange lock and acc.
   96         ret
   97 
   98 /*
   99  * simple_lock(int *lock) - set lock with busy wait
  100  */
  101 
  102 ENTRY(simple_lock)
  103 #ifdef  SIMPLE_LOCK_LOG
  104         pushl   S_ARG0
  105         pushl   $1
  106         call    _lock_log
  107         addl    $8, %esp
  108 #endif  SIMPLE_LOCK_LOG
  109         movl    S_ARG0, %ecx            / lock address
  110 0:
  111         movl    $(L_LOCKED), %eax       / exchange value
  112         xchgl   %eax, (%ecx)            / exchange lock and acc.
  113         cmpl    $(L_UNLOCKED), %eax     / test if was unlocked
  114         je      2f                      / jump if so - we have lock
  115 1:
  116         cmpl    $(L_UNLOCKED), (%ecx)   / otherwise, spin until
  117         jne     1b                      / value is UNLOCKED
  118         jmp     0b                      / and try again
  119 2:
  120 #ifdef  SIMPLE_LOCK_LOG
  121         pushl   S_ARG0
  122         pushl   $2
  123         call    _lock_log
  124         addl    $8, %esp
  125 #endif  SIMPLE_LOCK_LOG
  126         ret                             / got lock, return 
  127 
  128 /*
  129  * simple_lock_try(int *lock) - try setting lock.
  130  * Returns zero if unsuccessful and non-zero if successful.
  131  */
  132 
  133 ENTRY(simple_lock_try)
  134         movl    S_ARG0, %ecx            / lock address
  135         movl    $(L_LOCKED), %eax       / exchange value
  136         xchgl   %eax, (%ecx)            / exchange lock and acc.
  137         cmpl    $(L_UNLOCKED), %eax     / was it unlocked?
  138         je      9f                      / jump if yes
  139         movl    $0, %eax                / else, return 0
  140         ret
  141 9:
  142         movb    $1, %al                 / non-zero return
  143         ret
  144 
  145 /*
  146  * i_bit_set(int bitno, int *s) - atomically set bit in bit string
  147  */
  148 
  149 ENTRY(i_bit_set)
  150         movl    S_ARG0, %ecx            / bit number
  151         movl    S_ARG1, %eax            / address
  152         lock
  153         btsl    %ecx, (%eax)            / set bit
  154         ret
  155 
  156 /*
  157  * i_bit_clear(int bitno, int *s) - atomically clear bit in bit string
  158  */
  159 
  160 ENTRY(i_bit_clear)
  161 ENTRY(bit_unlock)
  162         movl    S_ARG0, %ecx            / bit number
  163         movl    S_ARG1, %eax            / address
  164         lock
  165         btrl    %ecx, (%eax)            / clear bit
  166         ret
  167 
  168 /*
  169  * bit_lock(int bitno, int *s) - set a bit lock with busy wait
  170  */
  171 
  172 ENTRY(bit_lock)
  173         movl    S_ARG0, %ecx            / bit number
  174         movl    S_ARG1, %eax            / address
  175 1:
  176         lock
  177         btsl    %ecx, (%eax)            / test and set bit
  178         jb      1b                      / if was set, loop      /* jc? */
  179         ret                             / otherwise have lock
  180 
  181 /*
  182  * bit_lock_try(int bitno, int *s) - try to set bit lock.
  183  * Returns 0 on failure, 1 on success
  184  */
  185 
  186 ENTRY(bit_lock_try)
  187         movl    S_ARG0, %ecx            / bit number
  188         movl    S_ARG1, %eax            / address
  189         lock
  190         btsl    %ecx, (%eax)            / test and set bit
  191         jb      1f                      / jump, if was set      /* jc? */
  192         movl    $1, %eax                / else, return success
  193         ret
  194 1:
  195         movl    $0,  %eax               / return failure
  196         ret
  197 
  198 /*
  199  * spl_t
  200  * p_lock(lockp, retipl)
  201  *      lock_t  *lockp;
  202  *      spl_t   retipl;
  203  *
  204  * Lock the lock and return at interrupt priority level "retipl".
  205  * Return previous interrupt priority level.
  206  *
  207  * When the assembler handles inter-sub-segment branches, the "fail/spin"
  208  * code can be moved out-of-line in (eg) ".text 3".
  209  *
  210  * After writing SLIC local mask, must do a read to synchronize the write
  211  * and then insure 500 ns = 8 cycles @ 16MHz to occur before the 
  212  * xchg (to allow a now masked interrupt to occur before hold the 
  213  * locked resource).  The time of the "read" counts 2 cycles towards the 500ns.
  214  */
  215 
  216         .globl  _p_lock
  217 _p_lock:
  218         movb    S_ARG1, %ah                     / new interrupt mask
  219         movb    VA_SLIC+SL_LMASK, %al           / old interrupt mask
  220 /PEEPOFF                                        / turn off peephole optimizer
  221 0:      movb    %ah, VA_SLIC+SL_LMASK           / write new mask
  222 /*****************************************************************/
  223         movb    VA_SLIC+SL_LMASK, %dl           / dummy read to synch write
  224                                                 / sync+2 clocks
  225         movl    $(L_LOCKED), %edx               / value to exchange
  226                                                 / sync+4 clocks
  227         movl    S_ARG0, %ecx                    / &lock
  228                                                 / sync+8
  229 #if MHz == 20
  230         movl    %ecx, %ecx                      / 2 cycle nop
  231         movl    %ecx, %ecx                      / 2 cycle nop
  232 #endif
  233 /PEEPON                                         / turn peephole opt back on
  234 /***************SLICSYNC 8/12 ***************************************/
  235         xchgl   %edx, (%ecx)                    / try for lock
  236         cmpl    $(L_UNLOCKED), %edx             / got it?
  237         je      2f                              / yup
  238         movb    %al, VA_SLIC+SL_LMASK           / restore previous mask
  239 1:      cmpl    $(L_UNLOCKED), (%ecx)           / spin until...
  240         je      0b                              /       ...lock is clear
  241         jmp     1b                              / while not clear...
  242 2:
  243         ret
  244 
  245 
  246 /*
  247  * spl_t
  248  * cp_lock(lockp, retipl)
  249  *      lock_t  *lockp;
  250  *      spl_t   retipl;
  251  *
  252  * Conditionally acquire a lock.
  253  *
  254  * If lock is available, lock the lock and return at interrupt priority
  255  * level "retipl". Return previous interrupt priority level.
  256  * If lock is unavailable, return CPLOCKFAIL.
  257  *
  258  * See comments in p_lock() about writing SLIC mask.
  259  */
  260 
  261         .globl  _cp_lock
  262 _cp_lock:
  263         movb    S_ARG1, %ah                     / new interrupt mask
  264 /PEEPOFF                                        / turn off peephole optimizer
  265         movb    VA_SLIC+SL_LMASK, %al           / old interrupt mask
  266 /*****************************************************************/
  267         movb    %ah, VA_SLIC+SL_LMASK           / write new mask
  268         movb    VA_SLIC+SL_LMASK, %dl           / dummy read to synch write
  269                                                 / sync+2 clocks
  270         movl    $(L_LOCKED), %edx               / value to exchange
  271                                                 / sync+4 clocks
  272         movl    S_ARG0, %ecx                    / &lock
  273                                                 / sync+8 clocks
  274         movb    $0, %ah                         / so %eax != CPLOCKFAIL
  275                                                 / sync+10 clocks
  276 #if MHz == 20
  277         movl    %eax,%eax                       / 2 cycle nop
  278 #endif
  279 /***************SLICSYNC 10/12 ***************************************/
  280 /PEEPON                                         / turn peephole opt back on
  281         xchgl   %edx, (%ecx)                    / try for lock
  282                                                 / sync+13 clocks
  283         cmpl    $(L_UNLOCKED), %edx             / got it?
  284                                                 / sync+15 clocks
  285         je      0f                              / yup
  286         movb    %al, VA_SLIC+SL_LMASK           / restore previous mask
  287         movl    $(CPLOCKFAIL), %eax             / and return failure
  288 0:
  289         ret
  290 
  291 

Cache object: 5f3f2ed0879109ea0f88e6d3f17eaa34


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.