The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/lib/semaphore-sleepers.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * i386 and x86-64 semaphore implementation.
    3  *
    4  * (C) Copyright 1999 Linus Torvalds
    5  *
    6  * Portions Copyright 1999 Red Hat, Inc.
    7  *
    8  *      This program is free software; you can redistribute it and/or
    9  *      modify it under the terms of the GNU General Public License
   10  *      as published by the Free Software Foundation; either version
   11  *      2 of the License, or (at your option) any later version.
   12  *
   13  * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
   14  */
   15 #include <linux/sched.h>
   16 #include <linux/err.h>
   17 #include <linux/init.h>
   18 #include <asm/semaphore.h>
   19 
   20 /*
   21  * Semaphores are implemented using a two-way counter:
   22  * The "count" variable is decremented for each process
   23  * that tries to acquire the semaphore, while the "sleeping"
   24  * variable is a count of such acquires.
   25  *
   26  * Notably, the inline "up()" and "down()" functions can
   27  * efficiently test if they need to do any extra work (up
   28  * needs to do something only if count was negative before
   29  * the increment operation.
   30  *
   31  * "sleeping" and the contention routine ordering is protected
   32  * by the spinlock in the semaphore's waitqueue head.
   33  *
   34  * Note that these functions are only called when there is
   35  * contention on the lock, and as such all this is the
   36  * "non-critical" part of the whole semaphore business. The
   37  * critical part is the inline stuff in <asm/semaphore.h>
   38  * where we want to avoid any extra jumps and calls.
   39  */
   40 
   41 /*
   42  * Logic:
   43  *  - only on a boundary condition do we need to care. When we go
   44  *    from a negative count to a non-negative, we wake people up.
   45  *  - when we go from a non-negative count to a negative do we
   46  *    (a) synchronize with the "sleeper" count and (b) make sure
   47  *    that we're on the wakeup list before we synchronize so that
   48  *    we cannot lose wakeup events.
   49  */
   50 
   51 void __up(struct semaphore *sem)
   52 {
   53         wake_up(&sem->wait);
   54 }
   55 
   56 void __sched __down(struct semaphore *sem)
   57 {
   58         struct task_struct *tsk = current;
   59         DECLARE_WAITQUEUE(wait, tsk);
   60         unsigned long flags;
   61 
   62         tsk->state = TASK_UNINTERRUPTIBLE;
   63         spin_lock_irqsave(&sem->wait.lock, flags);
   64         add_wait_queue_exclusive_locked(&sem->wait, &wait);
   65 
   66         sem->sleepers++;
   67         for (;;) {
   68                 int sleepers = sem->sleepers;
   69 
   70                 /*
   71                  * Add "everybody else" into it. They aren't
   72                  * playing, because we own the spinlock in
   73                  * the wait_queue_head.
   74                  */
   75                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
   76                         sem->sleepers = 0;
   77                         break;
   78                 }
   79                 sem->sleepers = 1;      /* us - see -1 above */
   80                 spin_unlock_irqrestore(&sem->wait.lock, flags);
   81 
   82                 schedule();
   83 
   84                 spin_lock_irqsave(&sem->wait.lock, flags);
   85                 tsk->state = TASK_UNINTERRUPTIBLE;
   86         }
   87         remove_wait_queue_locked(&sem->wait, &wait);
   88         wake_up_locked(&sem->wait);
   89         spin_unlock_irqrestore(&sem->wait.lock, flags);
   90         tsk->state = TASK_RUNNING;
   91 }
   92 
   93 int __sched __down_interruptible(struct semaphore *sem)
   94 {
   95         int retval = 0;
   96         struct task_struct *tsk = current;
   97         DECLARE_WAITQUEUE(wait, tsk);
   98         unsigned long flags;
   99 
  100         tsk->state = TASK_INTERRUPTIBLE;
  101         spin_lock_irqsave(&sem->wait.lock, flags);
  102         add_wait_queue_exclusive_locked(&sem->wait, &wait);
  103 
  104         sem->sleepers++;
  105         for (;;) {
  106                 int sleepers = sem->sleepers;
  107 
  108                 /*
  109                  * With signals pending, this turns into
  110                  * the trylock failure case - we won't be
  111                  * sleeping, and we* can't get the lock as
  112                  * it has contention. Just correct the count
  113                  * and exit.
  114                  */
  115                 if (signal_pending(current)) {
  116                         retval = -EINTR;
  117                         sem->sleepers = 0;
  118                         atomic_add(sleepers, &sem->count);
  119                         break;
  120                 }
  121 
  122                 /*
  123                  * Add "everybody else" into it. They aren't
  124                  * playing, because we own the spinlock in
  125                  * wait_queue_head. The "-1" is because we're
  126                  * still hoping to get the semaphore.
  127                  */
  128                 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
  129                         sem->sleepers = 0;
  130                         break;
  131                 }
  132                 sem->sleepers = 1;      /* us - see -1 above */
  133                 spin_unlock_irqrestore(&sem->wait.lock, flags);
  134 
  135                 schedule();
  136 
  137                 spin_lock_irqsave(&sem->wait.lock, flags);
  138                 tsk->state = TASK_INTERRUPTIBLE;
  139         }
  140         remove_wait_queue_locked(&sem->wait, &wait);
  141         wake_up_locked(&sem->wait);
  142         spin_unlock_irqrestore(&sem->wait.lock, flags);
  143 
  144         tsk->state = TASK_RUNNING;
  145         return retval;
  146 }
  147 
  148 /*
  149  * Trylock failed - make sure we correct for
  150  * having decremented the count.
  151  *
  152  * We could have done the trylock with a
  153  * single "cmpxchg" without failure cases,
  154  * but then it wouldn't work on a 386.
  155  */
  156 int __down_trylock(struct semaphore *sem)
  157 {
  158         int sleepers;
  159         unsigned long flags;
  160 
  161         spin_lock_irqsave(&sem->wait.lock, flags);
  162         sleepers = sem->sleepers + 1;
  163         sem->sleepers = 0;
  164 
  165         /*
  166          * Add "everybody else" and us into it. They aren't
  167          * playing, because we own the spinlock in the
  168          * wait_queue_head.
  169          */
  170         if (!atomic_add_negative(sleepers, &sem->count)) {
  171                 wake_up_locked(&sem->wait);
  172         }
  173 
  174         spin_unlock_irqrestore(&sem->wait.lock, flags);
  175         return 1;
  176 }

Cache object: 3e7e3efcfe4c2e409a4dbe2040eaaa60


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.