The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/simplelock.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, by Steve Passe
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. The name of the developer may NOT be used to endorse or promote products
   11  *    derived from this software without specific prior written permission.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   23  * SUCH DAMAGE.
   24  *
   25  * $FreeBSD$
   26  */
   27 
   28 /*
   29  * credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization.
   30  */
   31 
   32 #include <machine/asmacros.h>                   /* miscellaneous macros */
   33 #include <i386/isa/intr_machdep.h>
   34 #include <machine/psl.h>
   35         
   36 #include <machine/smptests.h>                   /** FAST_HI */
   37 
   38 /*
   39  * The following impliments the primitives described in i386/i386/param.h
   40  * necessary for the Lite2 lock manager system.
   41  * The major difference is that the "volatility" of the lock datum has been
   42  * pushed down from the various functions to lock_data itself.
   43  */
   44 
   45 /*
   46  * The simple-lock routines are the primitives out of which the lock
   47  * package is built. The machine-dependent code must implement an
   48  * atomic test_and_set operation that indivisibly sets the simple lock
   49  * to non-zero and returns its old value. It also assumes that the
   50  * setting of the lock to zero below is indivisible. Simple locks may
   51  * only be used for exclusive locks.
   52  * 
   53  * struct simplelock {
   54  *      volatile int    lock_data;
   55  * };
   56  */
   57 
   58 /*
   59  * void
   60  * s_lock_init(struct simplelock *lkp)
   61  * {
   62  *      lkp->lock_data = 0;
   63  * }
   64  */
   65 ENTRY(s_lock_init)
   66         movl    4(%esp), %eax           /* get the address of the lock */
   67         movl    $0, (%eax)
   68         ret
   69 
   70 
   71 /*
   72  * void
   73  * s_lock(struct simplelock *lkp)
   74  * {
   75  *      while (test_and_set(&lkp->lock_data))
   76  *              continue;
   77  * }
   78  *
   79  * Note:
   80  *      If the acquire fails we do a loop of reads waiting for the lock to
   81  *      become free instead of continually beating on the lock with xchgl.
   82  *      The theory here is that the CPU will stay within its cache until
   83  *      a write by the other CPU updates it, instead of continually updating
   84  *      the local cache (and thus causing external bus writes) with repeated
   85  *      writes to the lock.
   86  */
   87 #ifndef SL_DEBUG
   88 
   89 ENTRY(s_lock)
   90         movl    4(%esp), %eax           /* get the address of the lock */
   91         movl    $1, %ecx
   92 setlock:
   93         xchgl   %ecx, (%eax)
   94         testl   %ecx, %ecx
   95         jz      gotit                   /* it was clear, return */
   96 wait:
   97         cmpl    $0, (%eax)              /* wait to empty */
   98         jne     wait                    /* still set... */
   99         jmp     setlock                 /* empty again, try once more */
  100 gotit:
  101         ret
  102 
  103 #else /* SL_DEBUG */
  104 
  105 ENTRY(s_lock)
  106         movl    4(%esp), %edx           /* get the address of the lock */
  107 setlock:
  108         movl    _cpu_lockid, %ecx       /* add cpu id portion */
  109         incl    %ecx                    /* add lock portion */
  110         movl    $0, %eax
  111         lock
  112         cmpxchgl %ecx, (%edx)
  113         jz      gotit                   /* it was clear, return */
  114         pushl   %eax                    /* save what we xchanged */
  115         decl    %eax                    /* remove lock portion */
  116         cmpl    _cpu_lockid, %eax       /* do we hold it? */
  117         je      bad_slock               /* yes, thats not good... */
  118         addl    $4, %esp                /* clear the stack */
  119 wait:
  120         cmpl    $0, (%edx)              /* wait to empty */
  121         jne     wait                    /* still set... */
  122         jmp     setlock                 /* empty again, try once more */
  123 gotit:
  124         ret
  125 
  126         ALIGN_TEXT
  127 bad_slock:
  128         /* %eax (current lock) is already on the stack */
  129         pushl   %edx
  130         pushl   _cpuid
  131         pushl   $bsl1
  132         call    _panic
  133 
  134 bsl1:   .asciz  "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
  135 
  136 #endif /* SL_DEBUG */
  137 
  138 
  139 /*
  140  * int
  141  * s_lock_try(struct simplelock *lkp)
  142  * {
  143  *      return (!test_and_set(&lkp->lock_data));
  144  * }
  145  */
  146 ENTRY(s_lock_try)
  147         movl    4(%esp), %eax           /* get the address of the lock */
  148         movl    $1, %ecx
  149 
  150         xchgl   %ecx, (%eax)
  151         testl   %ecx, %ecx
  152         setz    %al                     /* 1 if previous value was 0 */
  153         movzbl  %al, %eax               /* convert to an int */
  154 
  155         ret
  156 
  157 
  158 /*
  159  * void
  160  * s_unlock(struct simplelock *lkp)
  161  * {
  162  *      lkp->lock_data = 0;
  163  * }
  164  */
  165 ENTRY(s_unlock)
  166         movl    4(%esp), %eax           /* get the address of the lock */
  167         movl    $0, (%eax)
  168         ret
  169 
  170 
  171 #ifdef needed
  172 
  173 /*
  174  * int test_and_set(struct simplelock *lkp);
  175  */
  176 ENTRY(test_and_set)
  177         movl    4(%esp), %eax           /* get the address of the lock */
  178         movl    $1, %ecx
  179 
  180         xchgl   %ecx, (%eax)
  181         testl   %ecx, %ecx
  182         setz    %al                     /* 1 if previous value was 0 */
  183         movzbl  %al, %eax               /* convert to an int */
  184 
  185         ret
  186 
  187 #endif /* needed */
  188 
  189 
  190 /*
  191  * These versions of simple_lock block interrupts,
  192  * making it suitable for regions accessed by both top and bottom levels.
  193  * This is done by saving the current value of the cpu flags in a per-cpu
  194  * global, and disabling interrupts when the lock is taken.  When the
  195  * lock is released, interrupts might be enabled, depending upon the saved
  196  * cpu flags.
  197  * Because of this, it must ONLY be used for SHORT, deterministic paths!
  198  *
  199  * Note:
  200  * It would appear to be "bad behaviour" to blindly store a value in
  201  * ss_eflags, as this could destroy the previous contents.  But since ss_eflags
  202  * is a per-cpu variable, and its fatal to attempt to acquire a simplelock
  203  * that you already hold, we get away with it.  This needs to be cleaned
  204  * up someday...
  205  */
  206 
  207 /*
  208  * void ss_lock(struct simplelock *lkp)
  209  */
  210 #ifndef SL_DEBUG
  211 
  212 ENTRY(ss_lock)
  213         movl    4(%esp), %eax           /* get the address of the lock */
  214         movl    $1, %ecx                /* value for a held lock */
  215 ssetlock:
  216         pushfl
  217         cli
  218         xchgl   %ecx, (%eax)            /* compete */
  219         testl   %ecx, %ecx
  220         jz      sgotit                  /* it was clear, return */
  221         popfl                           /* previous value while waiting */
  222 swait:
  223         cmpl    $0, (%eax)              /* wait to empty */
  224         jne     swait                   /* still set... */
  225         jmp     ssetlock                /* empty again, try once more */
  226 sgotit:
  227         popl    _ss_eflags              /* save the old eflags */
  228         ret
  229 
  230 #else /* SL_DEBUG */
  231 
  232 ENTRY(ss_lock)
  233         movl    4(%esp), %edx           /* get the address of the lock */
  234 ssetlock:
  235         movl    _cpu_lockid, %ecx       /* add cpu id portion */
  236         incl    %ecx                    /* add lock portion */
  237         pushfl
  238         cli
  239         movl    $0, %eax
  240         lock
  241         cmpxchgl %ecx, (%edx)           /* compete */
  242         jz      sgotit                  /* it was clear, return */
  243         pushl   %eax                    /* save what we xchanged */
  244         decl    %eax                    /* remove lock portion */
  245         cmpl    _cpu_lockid, %eax       /* do we hold it? */
  246         je      sbad_slock              /* yes, thats not good... */
  247         addl    $4, %esp                /* clear the stack */
  248         popfl
  249 swait:
  250         cmpl    $0, (%edx)              /* wait to empty */
  251         jne     swait                   /* still set... */
  252         jmp     ssetlock                /* empty again, try once more */
  253 sgotit:
  254         popl    _ss_eflags              /* save the old task priority */
  255 sgotit2:
  256         ret
  257 
  258         ALIGN_TEXT
  259 sbad_slock:
  260         /* %eax (current lock) is already on the stack */
  261         pushl   %edx
  262         pushl   _cpuid
  263         pushl   $sbsl1
  264         call    _panic
  265 
  266 sbsl1:  .asciz  "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
  267 
  268 #endif /* SL_DEBUG */
  269 
  270 /*
  271  * void ss_unlock(struct simplelock *lkp)
  272  */
  273 ENTRY(ss_unlock)
  274         movl    4(%esp), %eax           /* get the address of the lock */
  275         movl    $0, (%eax)              /* clear the simple lock */
  276         testl   $PSL_I, _ss_eflags
  277         jz      ss_unlock2
  278         sti
  279 ss_unlock2:     
  280         ret
  281 
  282 /* 
  283  * These versions of simple_lock does not contain calls to profiling code.
  284  * Thus they can be called from the profiling code. 
  285  */
  286                 
  287 /*
  288  * void s_lock_np(struct simplelock *lkp)
  289  */
  290 NON_GPROF_ENTRY(s_lock_np)
  291         movl    4(%esp), %eax           /* get the address of the lock */
  292         movl    $1, %ecx
  293 1:
  294         xchgl   %ecx, (%eax)
  295         testl   %ecx, %ecx
  296         jz      3f
  297 2:
  298         cmpl    $0, (%eax)              /* wait to empty */
  299         jne     2b                      /* still set... */
  300         jmp     1b                      /* empty again, try once more */
  301 3:
  302         NON_GPROF_RET
  303 
  304 /*
  305  * void s_unlock_np(struct simplelock *lkp)
  306  */
  307 NON_GPROF_ENTRY(s_unlock_np)
  308         movl    4(%esp), %eax           /* get the address of the lock */
  309         movl    $0, (%eax)
  310         NON_GPROF_RET

Cache object: 767450312ca89a21c7936413092e4964


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.