The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/simplelock.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1997, by Steve Passe
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. The name of the developer may NOT be used to endorse or promote products
   11  *    derived from this software without specific prior written permission.
   12  *
   13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   23  * SUCH DAMAGE.
   24  *
   25  * $FreeBSD$
   26  */
   27 
   28 /*
   29  * credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization.
   30  */
   31 
   32 #include <machine/asmacros.h>                   /* miscellaneous macros */
   33 #include <i386/isa/intr_machdep.h>
   34 #include <machine/psl.h>
   35         
   36 #include <machine/smptests.h>                   /** FAST_HI */
   37 
   38 /*
   39  * The following impliments the primitives described in i386/i386/param.h
   40  * necessary for the Lite2 lock manager system.
   41  * The major difference is that the "volatility" of the lock datum has been
   42  * pushed down from the various functions to lock_data itself.
   43  */
   44 
   45 /*
   46  * The simple-lock routines are the primitives out of which the lock
   47  * package is built. The machine-dependent code must implement an
   48  * atomic test_and_set operation that indivisibly sets the simple lock
   49  * to non-zero and returns its old value. It also assumes that the
   50  * setting of the lock to zero below is indivisible. Simple locks may
   51  * only be used for exclusive locks.
   52  * 
   53  * struct simplelock {
   54  *      volatile int    lock_data;
   55  * };
   56  */
   57 
   58 /*
   59  * void
   60  * s_lock_init(struct simplelock *lkp)
   61  * {
   62  *      lkp->lock_data = 0;
   63  * }
   64  */
   65 ENTRY(s_lock_init)
   66         movl    4(%esp), %eax           /* get the address of the lock */
   67         movl    $0, (%eax)
   68         ret
   69 
   70 
   71 /*
   72  * void
   73  * s_lock(struct simplelock *lkp)
   74  * {
   75  *      while (test_and_set(&lkp->lock_data))
   76  *              continue;
   77  * }
   78  *
   79  * Note:
   80  *      If the acquire fails we do a loop of reads waiting for the lock to
   81  *      become free instead of continually beating on the lock with xchgl.
   82  *      The theory here is that the CPU will stay within its cache until
   83  *      a write by the other CPU updates it, instead of continually updating
   84  *      the local cache (and thus causing external bus writes) with repeated
   85  *      writes to the lock.
   86  */
   87 #ifndef SL_DEBUG
   88 
   89 ENTRY(s_lock)
   90         movl    4(%esp), %eax           /* get the address of the lock */
   91         movl    $1, %ecx
   92 setlock:
   93         xchgl   %ecx, (%eax)
   94         testl   %ecx, %ecx
   95         jz      gotit                   /* it was clear, return */
   96 wait:
   97         pause
   98         cmpl    $0, (%eax)              /* wait to empty */
   99         jne     wait                    /* still set... */
  100         jmp     setlock                 /* empty again, try once more */
  101 gotit:
  102         ret
  103 
  104 #else /* SL_DEBUG */
  105 
  106 ENTRY(s_lock)
  107         movl    4(%esp), %edx           /* get the address of the lock */
  108 setlock:
  109         movl    _cpu_lockid, %ecx       /* add cpu id portion */
  110         incl    %ecx                    /* add lock portion */
  111         movl    $0, %eax
  112         lock
  113         cmpxchgl %ecx, (%edx)
  114         jz      gotit                   /* it was clear, return */
  115         pushl   %eax                    /* save what we xchanged */
  116         decl    %eax                    /* remove lock portion */
  117         cmpl    _cpu_lockid, %eax       /* do we hold it? */
  118         je      bad_slock               /* yes, thats not good... */
  119         addl    $4, %esp                /* clear the stack */
  120 wait:
  121         pause
  122         cmpl    $0, (%edx)              /* wait to empty */
  123         jne     wait                    /* still set... */
  124         jmp     setlock                 /* empty again, try once more */
  125 gotit:
  126         ret
  127 
  128         ALIGN_TEXT
  129 bad_slock:
  130         /* %eax (current lock) is already on the stack */
  131         pushl   %edx
  132         pushl   _cpuid
  133         pushl   $bsl1
  134         call    _panic
  135 
  136 bsl1:   .asciz  "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
  137 
  138 #endif /* SL_DEBUG */
  139 
  140 
  141 /*
  142  * int
  143  * s_lock_try(struct simplelock *lkp)
  144  * {
  145  *      return (!test_and_set(&lkp->lock_data));
  146  * }
  147  */
  148 #ifndef SL_DEBUG
  149 
  150 ENTRY(s_lock_try)
  151         movl    4(%esp), %eax           /* get the address of the lock */
  152         movl    $1, %ecx
  153 
  154         xchgl   %ecx, (%eax)
  155         testl   %ecx, %ecx
  156         setz    %al                     /* 1 if previous value was 0 */
  157         movzbl  %al, %eax               /* convert to an int */
  158 
  159         ret
  160 
  161 #else /* SL_DEBUG */
  162 
  163 ENTRY(s_lock_try)
  164         movl    4(%esp), %edx           /* get the address of the lock */
  165         movl    _cpu_lockid, %ecx       /* add cpu id portion */
  166         incl    %ecx                    /* add lock portion */
  167 
  168         xorl    %eax, %eax
  169         lock
  170         cmpxchgl %ecx, (%edx)
  171         setz    %al                     /* 1 if previous value was 0 */
  172         movzbl  %al, %eax               /* convert to an int */
  173 
  174         ret
  175 
  176 #endif /* SL_DEBUG */
  177 
  178 
  179 /*
  180  * void
  181  * s_unlock(struct simplelock *lkp)
  182  * {
  183  *      lkp->lock_data = 0;
  184  * }
  185  */
  186 ENTRY(s_unlock)
  187         movl    4(%esp), %eax           /* get the address of the lock */
  188         movl    $0, (%eax)
  189         ret
  190 
  191 #if 0
  192 
  193 /*
  194  *      XXX CRUFTY SS_LOCK IMPLEMENTATION REMOVED XXX
  195  *
  196  * These versions of simple_lock block interrupts,
  197  * making it suitable for regions accessed by both top and bottom levels.
  198  * This is done by saving the current value of the cpu flags in a per-cpu
  199  * global, and disabling interrupts when the lock is taken.  When the
  200  * lock is released, interrupts might be enabled, depending upon the saved
  201  * cpu flags.
  202  * Because of this, it must ONLY be used for SHORT, deterministic paths!
  203  *
  204  * Note:
  205  * It would appear to be "bad behaviour" to blindly store a value in
  206  * ss_eflags, as this could destroy the previous contents.  But since ss_eflags
  207  * is a per-cpu variable, and its fatal to attempt to acquire a simplelock
  208  * that you already hold, we get away with it.  This needs to be cleaned
  209  * up someday...
  210  */
  211 
  212 /*
  213  * void ss_lock(struct simplelock *lkp)
  214  */
  215 #ifndef SL_DEBUG
  216 
  217 ENTRY(ss_lock)
  218         movl    4(%esp), %eax           /* get the address of the lock */
  219         movl    $1, %ecx                /* value for a held lock */
  220 ssetlock:
  221         pushfl
  222         cli
  223         xchgl   %ecx, (%eax)            /* compete */
  224         testl   %ecx, %ecx
  225         jz      sgotit                  /* it was clear, return */
  226         popfl                           /* previous value while waiting */
  227 swait:
  228         pause
  229         cmpl    $0, (%eax)              /* wait to empty */
  230         jne     swait                   /* still set... */
  231         jmp     ssetlock                /* empty again, try once more */
  232 sgotit:
  233         popl    _ss_eflags              /* save the old eflags */
  234         ret
  235 
  236 #else /* SL_DEBUG */
  237 
  238 ENTRY(ss_lock)
  239         movl    4(%esp), %edx           /* get the address of the lock */
  240 ssetlock:
  241         movl    _cpu_lockid, %ecx       /* add cpu id portion */
  242         incl    %ecx                    /* add lock portion */
  243         pushfl
  244         cli
  245         movl    $0, %eax
  246         lock
  247         cmpxchgl %ecx, (%edx)           /* compete */
  248         jz      sgotit                  /* it was clear, return */
  249         pushl   %eax                    /* save what we xchanged */
  250         decl    %eax                    /* remove lock portion */
  251         cmpl    _cpu_lockid, %eax       /* do we hold it? */
  252         je      sbad_slock              /* yes, thats not good... */
  253         addl    $4, %esp                /* clear the stack */
  254         popfl
  255 swait:
  256         pause
  257         cmpl    $0, (%edx)              /* wait to empty */
  258         jne     swait                   /* still set... */
  259         jmp     ssetlock                /* empty again, try once more */
  260 sgotit:
  261         popl    _ss_eflags              /* save the old task priority */
  262 sgotit2:
  263         ret
  264 
  265         ALIGN_TEXT
  266 sbad_slock:
  267         /* %eax (current lock) is already on the stack */
  268         pushl   %edx
  269         pushl   _cpuid
  270         pushl   $sbsl1
  271         call    _panic
  272 
  273 sbsl1:  .asciz  "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
  274 
  275 #endif /* SL_DEBUG */
  276 
  277 /*
  278  * void ss_unlock(struct simplelock *lkp)
  279  */
  280 ENTRY(ss_unlock)
  281         movl    4(%esp), %eax           /* get the address of the lock */
  282         movl    $0, (%eax)              /* clear the simple lock */
  283         testl   $PSL_I, _ss_eflags
  284         jz      ss_unlock2
  285         sti
  286 ss_unlock2:     
  287         ret
  288 
  289 #endif
  290 
  291 /* 
  292  * These versions of simple_lock does not contain calls to profiling code.
  293  * Thus they can be called from the profiling code. 
  294  */
  295                 
  296 /*
  297  * void s_lock_np(struct simplelock *lkp)
  298  */
  299 NON_GPROF_ENTRY(s_lock_np)
  300         movl    4(%esp), %eax           /* get the address of the lock */
  301         movl    $1, %ecx
  302 1:
  303         xchgl   %ecx, (%eax)
  304         testl   %ecx, %ecx
  305         jz      3f
  306 2:
  307         pause
  308         cmpl    $0, (%eax)              /* wait to empty */
  309         jne     2b                      /* still set... */
  310         jmp     1b                      /* empty again, try once more */
  311 3:
  312         NON_GPROF_RET
  313 
  314 /*
  315  * void s_unlock_np(struct simplelock *lkp)
  316  */
  317 NON_GPROF_ENTRY(s_unlock_np)
  318         movl    4(%esp), %eax           /* get the address of the lock */
  319         movl    $0, (%eax)
  320         NON_GPROF_RET

Cache object: 57d124835cf2cea86c132e2fba1ac87e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.