The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_umtx.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2002, Jeffrey Roberson <jeff@freebsd.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice unmodified, this list of conditions, and the following
   10  *    disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/5.3/sys/kern/kern_umtx.c 136588 2004-10-16 08:43:07Z cvs2svn $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/kernel.h>
   32 #include <sys/limits.h>
   33 #include <sys/lock.h>
   34 #include <sys/malloc.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/signalvar.h>
   38 #include <sys/sysent.h>
   39 #include <sys/systm.h>
   40 #include <sys/sysproto.h>
   41 #include <sys/sx.h>
   42 #include <sys/thr.h>
   43 #include <sys/umtx.h>
   44 
   45 struct umtx_q {
   46         LIST_ENTRY(umtx_q)      uq_next;        /* Linked list for the hash. */
   47         TAILQ_HEAD(, thread) uq_tdq;    /* List of threads blocked here. */
   48         struct umtx     *uq_umtx;       /* Pointer key component. */
   49         pid_t           uq_pid;         /* Pid key component. */
   50 };
   51 
   52 #define UMTX_QUEUES     128
   53 #define UMTX_HASH(pid, umtx)                                            \
   54     (((uintptr_t)pid + ((uintptr_t)umtx & ~65535)) % UMTX_QUEUES)
   55 
   56 LIST_HEAD(umtx_head, umtx_q);
   57 static struct umtx_head queues[UMTX_QUEUES];
   58 static MALLOC_DEFINE(M_UMTX, "umtx", "UMTX queue memory");
   59 
   60 static struct mtx umtx_lock;
   61 MTX_SYSINIT(umtx, &umtx_lock, "umtx", MTX_DEF);
   62 
   63 #define UMTX_LOCK()     mtx_lock(&umtx_lock);
   64 #define UMTX_UNLOCK()   mtx_unlock(&umtx_lock);
   65 
   66 #define UMTX_CONTESTED  LONG_MIN
   67 
   68 static struct umtx_q *umtx_lookup(struct thread *, struct umtx *umtx);
   69 static struct umtx_q *umtx_insert(struct thread *, struct umtx *umtx);
   70 
   71 static struct umtx_q *
   72 umtx_lookup(struct thread *td, struct umtx *umtx)
   73 {
   74         struct umtx_head *head;
   75         struct umtx_q *uq;
   76         pid_t pid;
   77 
   78         pid = td->td_proc->p_pid;
   79 
   80         head = &queues[UMTX_HASH(td->td_proc->p_pid, umtx)];
   81 
   82         LIST_FOREACH(uq, head, uq_next) {
   83                 if (uq->uq_pid == pid && uq->uq_umtx == umtx)
   84                         return (uq);
   85         }
   86 
   87         return (NULL);
   88 }
   89 
   90 /*
   91  * Insert a thread onto the umtx queue.
   92  */
   93 static struct umtx_q *
   94 umtx_insert(struct thread *td, struct umtx *umtx)
   95 {
   96         struct umtx_head *head;
   97         struct umtx_q *uq;
   98         pid_t pid;
   99 
  100         pid = td->td_proc->p_pid;
  101 
  102         if ((uq = umtx_lookup(td, umtx)) == NULL) {
  103                 struct umtx_q *ins;
  104 
  105                 UMTX_UNLOCK();
  106                 ins = malloc(sizeof(*uq), M_UMTX, M_ZERO | M_WAITOK);
  107                 UMTX_LOCK();
  108 
  109                 /*
  110                  * Some one else could have succeeded while we were blocked
  111                  * waiting on memory.
  112                  */
  113                 if ((uq = umtx_lookup(td, umtx)) == NULL) {
  114                         head = &queues[UMTX_HASH(pid, umtx)];
  115                         uq = ins;
  116                         uq->uq_pid = pid;
  117                         uq->uq_umtx = umtx;
  118                         LIST_INSERT_HEAD(head, uq, uq_next);
  119                         TAILQ_INIT(&uq->uq_tdq);
  120                 } else
  121                         free(ins, M_UMTX);
  122         }
  123 
  124         /*
  125          * Insert us onto the end of the TAILQ.
  126          */
  127         TAILQ_INSERT_TAIL(&uq->uq_tdq, td, td_umtx);
  128 
  129         return (uq);
  130 }
  131 
  132 static void
  133 umtx_remove(struct umtx_q *uq, struct thread *td)
  134 {
  135         TAILQ_REMOVE(&uq->uq_tdq, td, td_umtx);
  136 
  137         if (TAILQ_EMPTY(&uq->uq_tdq)) {
  138                 LIST_REMOVE(uq, uq_next);
  139                 free(uq, M_UMTX);
  140         }
  141 }
  142 
  143 int
  144 _umtx_lock(struct thread *td, struct _umtx_lock_args *uap)
  145     /* struct umtx *umtx */
  146 {
  147         struct umtx_q *uq;
  148         struct umtx *umtx;
  149         intptr_t owner;
  150         intptr_t old;
  151         int error;
  152 
  153         uq = NULL;
  154 
  155         /*
  156          * Care must be exercised when dealing with this structure.  It
  157          * can fault on any access.
  158          */
  159         umtx = uap->umtx;       
  160 
  161         for (;;) {
  162                 /*
  163                  * Try the uncontested case.  This should be done in userland.
  164                  */
  165                 owner = casuptr((intptr_t *)&umtx->u_owner,
  166                     UMTX_UNOWNED, td->td_tid);
  167 
  168                 /* The address was invalid. */
  169                 if (owner == -1)
  170                         return (EFAULT);
  171 
  172                 /* The acquire succeeded. */
  173                 if (owner == UMTX_UNOWNED)
  174                         return (0);
  175 
  176                 /* If no one owns it but it is contested try to acquire it. */
  177                 if (owner == UMTX_CONTESTED) {
  178                         owner = casuptr((intptr_t *)&umtx->u_owner,
  179                             UMTX_CONTESTED, td->td_tid | UMTX_CONTESTED);
  180 
  181                         /* The address was invalid. */
  182                         if (owner == -1)
  183                                 return (EFAULT);
  184 
  185                         if (owner == UMTX_CONTESTED)
  186                                 return (0);
  187 
  188                         /* If this failed the lock has changed, restart. */
  189                         continue;
  190                 }
  191 
  192 
  193                 UMTX_LOCK();
  194                 uq = umtx_insert(td, umtx);
  195                 UMTX_UNLOCK();
  196 
  197                 /*
  198                  * Set the contested bit so that a release in user space
  199                  * knows to use the system call for unlock.  If this fails
  200                  * either some one else has acquired the lock or it has been
  201                  * released.
  202                  */
  203                 old = casuptr((intptr_t *)&umtx->u_owner, owner,
  204                     owner | UMTX_CONTESTED);
  205 
  206                 /* The address was invalid. */
  207                 if (old == -1) {
  208                         UMTX_LOCK();
  209                         umtx_remove(uq, td);
  210                         UMTX_UNLOCK();
  211                         return (EFAULT);
  212                 }
  213 
  214                 /*
  215                  * We set the contested bit, sleep. Otherwise the lock changed
  216                  * and we need to retry or we lost a race to the thread
  217                  * unlocking the umtx.
  218                  */
  219                 PROC_LOCK(td->td_proc);
  220                 if (old == owner && (td->td_flags & TDF_UMTXWAKEUP) == 0)
  221                         error = msleep(td, &td->td_proc->p_mtx,
  222                             td->td_priority | PCATCH, "umtx", 0);
  223                 else
  224                         error = 0;
  225                 mtx_lock_spin(&sched_lock);
  226                 td->td_flags &= ~TDF_UMTXWAKEUP;
  227                 mtx_unlock_spin(&sched_lock);
  228                 PROC_UNLOCK(td->td_proc);
  229 
  230                 UMTX_LOCK();
  231                 umtx_remove(uq, td);
  232                 UMTX_UNLOCK();
  233 
  234                 /*
  235                  * If we caught a signal we might have to retry or exit 
  236                  * immediately.
  237                  */
  238                 if (error)
  239                         return (error);
  240         }
  241 
  242         return (0);
  243 }
  244 
  245 int
  246 _umtx_unlock(struct thread *td, struct _umtx_unlock_args *uap)
  247     /* struct umtx *umtx */
  248 {
  249         struct thread *blocked;
  250         struct umtx *umtx;
  251         struct umtx_q *uq;
  252         intptr_t owner;
  253         intptr_t old;
  254 
  255         umtx = uap->umtx;
  256 
  257         /*
  258          * Make sure we own this mtx.
  259          *
  260          * XXX Need a {fu,su}ptr this is not correct on arch where
  261          * sizeof(intptr_t) != sizeof(long).
  262          */
  263         if ((owner = fuword(&umtx->u_owner)) == -1)
  264                 return (EFAULT);
  265 
  266         if ((owner & ~UMTX_CONTESTED) != td->td_tid)
  267                 return (EPERM);
  268 
  269         /* We should only ever be in here for contested locks */
  270         if ((owner & UMTX_CONTESTED) == 0)
  271                 return (EINVAL);
  272         blocked = NULL;
  273 
  274         /*
  275          * When unlocking the umtx, it must be marked as unowned if
  276          * there is zero or one thread only waiting for it.
  277          * Otherwise, it must be marked as contested.
  278          */
  279         UMTX_LOCK();
  280         uq = umtx_lookup(td, umtx);
  281         if (uq == NULL ||
  282             (uq != NULL && (blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
  283             TAILQ_NEXT(blocked, td_umtx) == NULL)) {
  284                 UMTX_UNLOCK();
  285                 old = casuptr((intptr_t *)&umtx->u_owner, owner,
  286                     UMTX_UNOWNED);
  287                 if (old == -1)
  288                         return (EFAULT);
  289                 if (old != owner)
  290                         return (EINVAL);
  291 
  292                 /*
  293                  * Recheck the umtx queue to make sure another thread
  294                  * didn't put itself on it after it was unlocked.
  295                  */
  296                 UMTX_LOCK();
  297                 uq = umtx_lookup(td, umtx);
  298                 if (uq != NULL &&
  299                     ((blocked = TAILQ_FIRST(&uq->uq_tdq)) != NULL &&
  300                     TAILQ_NEXT(blocked, td_umtx) != NULL)) {
  301                         UMTX_UNLOCK();
  302                         old = casuptr((intptr_t *)&umtx->u_owner,
  303                             UMTX_UNOWNED, UMTX_CONTESTED);
  304                 } else {
  305                         UMTX_UNLOCK();
  306                 }
  307         } else {
  308                 UMTX_UNLOCK();
  309                 old = casuptr((intptr_t *)&umtx->u_owner,
  310                     owner, UMTX_CONTESTED);
  311                 if (old != -1 && old != owner)
  312                         return (EINVAL);
  313         }
  314 
  315         if (old == -1)
  316                 return (EFAULT);
  317 
  318         /*
  319          * If there is a thread waiting on the umtx, wake it up.
  320          */
  321         if (blocked != NULL) {
  322                 PROC_LOCK(blocked->td_proc);
  323                 mtx_lock_spin(&sched_lock);
  324                 blocked->td_flags |= TDF_UMTXWAKEUP;
  325                 mtx_unlock_spin(&sched_lock);
  326                 PROC_UNLOCK(blocked->td_proc);
  327                 wakeup(blocked);
  328         }
  329 
  330         return (0);
  331 }

Cache object: 53b2675c29a9a1d25e5e0729775d4965


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.