The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/iokit/IOKit/IOLocks.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
    3  *
    4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
    5  * 
    6  * This file contains Original Code and/or Modifications of Original Code
    7  * as defined in and that are subject to the Apple Public Source License
    8  * Version 2.0 (the 'License'). You may not use this file except in
    9  * compliance with the License. The rights granted to you under the License
   10  * may not be used to create, or enable the creation or redistribution of,
   11  * unlawful or unlicensed copies of an Apple operating system, or to
   12  * circumvent, violate, or enable the circumvention or violation of, any
   13  * terms of an Apple operating system software license agreement.
   14  * 
   15  * Please obtain a copy of the License at
   16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
   17  * 
   18  * The Original Code and all software distributed under the License are
   19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   23  * Please see the License for the specific language governing rights and
   24  * limitations under the License.
   25  * 
   26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
   27  */
   28 /*
   29  *
   30  */
   31 
   32 #ifndef __IOKIT_IOLOCKS_H
   33 #define __IOKIT_IOLOCKS_H
   34 
   35 #ifndef KERNEL
   36 #error IOLocks.h is for kernel use only
   37 #endif
   38 
   39 #include <sys/appleapiopts.h>
   40 
   41 #include <IOKit/system.h>
   42 
   43 #include <IOKit/IOReturn.h>
   44 #include <IOKit/IOTypes.h>
   45 
   46 #ifdef __cplusplus
   47 extern "C" {
   48 #endif
   49 
   50 #include <libkern/locks.h>
   51 #include <machine/machine_routines.h>
   52 
   53 extern lck_grp_t        *IOLockGroup;
   54 
   55 /*
   56  * Mutex lock operations
   57  */
   58 
   59 #ifdef  XNU_KERNEL_PRIVATE
   60 typedef lck_mtx_t       IOLock;
   61 #else
   62 typedef struct _IOLock  IOLock;
   63 #endif  /* XNU_KERNEL_PRIVATE */
   64 
   65 
   66 /*! @function IOLockAlloc
   67     @abstract Allocates and initializes a mutex.
   68     @discussion Allocates a mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
   69     @result Pointer to the allocated lock, or zero on failure. */
   70 
   71 IOLock * IOLockAlloc( void );
   72 
   73 /*! @function IOLockFree
   74     @abstract Frees a mutex.
   75     @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken.
   76     @param lock Pointer to the allocated lock. */
   77 
   78 void    IOLockFree( IOLock * lock);
   79 
   80 /*! @function IOLockGetMachLock
   81     @abstract Accessor to a Mach mutex.
   82     @discussion Accessor to the Mach mutex.
   83     @param lock Pointer to the allocated lock. */
   84 
   85 lck_mtx_t * IOLockGetMachLock( IOLock * lock);
   86 
   87 /*! @function IOLockLock
   88     @abstract Lock a mutex.
   89     @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock. 
   90     @param lock Pointer to the allocated lock. */
   91 
   92 #ifdef  XNU_KERNEL_PRIVATE
   93 #ifndef IOLOCKS_CPP
   94 static __inline__
   95 void    IOLockLock( IOLock * lock)
   96 {
   97     lck_mtx_lock(lock);
   98 }
   99 #else
  100 void    IOLockLock( IOLock * lock);
  101 #endif  /* !IOLOCKS_CPP */
  102 #else
  103 void    IOLockLock( IOLock * lock);
  104 #endif  /* XNU_KERNEL_PRIVATE */
  105 
  106 /*! @function IOLockTryLock
  107     @abstract Attempt to lock a mutex.
  108     @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false.
  109     @param lock Pointer to the allocated lock.
  110     @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */
  111 
  112 #ifdef  XNU_KERNEL_PRIVATE
  113 #ifndef IOLOCKS_CPP
  114 static __inline__
  115 boolean_t IOLockTryLock( IOLock * lock)
  116 {
  117     return(lck_mtx_try_lock(lock));
  118 }
  119 #else
  120 boolean_t IOLockTryLock( IOLock * lock);
  121 #endif  /* !IOLOCKS_CPP */
  122 #else
  123 boolean_t IOLockTryLock( IOLock * lock);
  124 #endif  /* XNU_KERNEL_PRIVATE */
  125 
  126 /*! @function IOLockUnlock
  127     @abstract Unlock a mutex.
  128 @discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. 
  129     @param lock Pointer to the allocated lock. */
  130 
  131 #ifdef  XNU_KERNEL_PRIVATE
  132 #ifndef IOLOCKS_CPP
  133 static __inline__
  134 void    IOLockUnlock( IOLock * lock)
  135 {
  136     lck_mtx_unlock(lock);
  137 }
  138 #else
  139 void    IOLockUnlock( IOLock * lock);
  140 #endif  /* !IOLOCKS_CPP */
  141 #else
  142 void    IOLockUnlock( IOLock * lock);
  143 #endif  /* XNU_KERNEL_PRIVATE */
  144 
  145 /*! @function IOLockSleep
  146     @abstract Sleep with mutex unlock and relock
  147 @discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup.Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. 
  148     @param lock Pointer to the locked lock. 
  149     @param event The event to sleep on.
  150     @param interType How can the sleep be interrupted.
  151         @result The wait-result value indicating how the thread was awakened.*/
  152 int     IOLockSleep( IOLock * lock, void *event, UInt32 interType);
  153 
  154 int     IOLockSleepDeadline( IOLock * lock, void *event,
  155                                 AbsoluteTime deadline, UInt32 interType);
  156 
  157 void    IOLockWakeup(IOLock * lock, void *event, bool oneThread);
  158 
  159 #ifdef __APPLE_API_OBSOLETE
  160 
  161 /* The following API is deprecated */
  162 
  163 typedef enum {
  164     kIOLockStateUnlocked        = 0,
  165     kIOLockStateLocked          = 1
  166 } IOLockState;
  167 
  168 void    IOLockInitWithState( IOLock * lock, IOLockState state);
  169 #define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked);
  170 
  171 static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock);        }
  172 static __inline__ boolean_t IOTryLock(  IOLock * lock) { return(IOLockTryLock(lock)); }
  173 static __inline__ void IOUnlock(   IOLock * lock) { IOLockUnlock(lock);      }
  174 
  175 #endif /* __APPLE_API_OBSOLETE */
  176 
  177 /*
  178  * Recursive lock operations
  179  */
  180 
  181 typedef struct _IORecursiveLock IORecursiveLock;
  182 
  183 /*! @function IORecursiveLockAlloc
  184     @abstract Allocates and initializes an recursive lock.
  185     @discussion Allocates a recursive lock in general purpose memory, and initializes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks.
  186     @result Pointer to the allocated lock, or zero on failure. */
  187 
  188 IORecursiveLock * IORecursiveLockAlloc( void );
  189 
  190 /*! @function IORecursiveLockFree
  191     @abstract Frees a recursive lock.
  192     @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken.
  193     @param lock Pointer to the allocated lock. */
  194 
  195 void            IORecursiveLockFree( IORecursiveLock * lock);
  196 
  197 /*! @function IORecursiveLockGetMachLock
  198     @abstract Accessor to a Mach mutex.
  199     @discussion Accessor to the Mach mutex.
  200     @param lock Pointer to the allocated lock. */
  201 
  202 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock);
  203 
  204 /*! @function IORecursiveLockLock
  205     @abstract Lock a recursive lock.
  206     @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock.
  207     @param lock Pointer to the allocated lock. */
  208 
  209 void            IORecursiveLockLock( IORecursiveLock * lock);
  210 
  211 /*! @function IORecursiveLockTryLock
  212     @abstract Attempt to lock a recursive lock.
  213     @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock.
  214     @param lock Pointer to the allocated lock.
  215     @result True if the lock is now locked by the caller, otherwise false. */
  216 
  217 boolean_t       IORecursiveLockTryLock( IORecursiveLock * lock);
  218 
  219 /*! @function IORecursiveLockUnlock
  220     @abstract Unlock a recursive lock.
  221 @discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held.
  222     @param lock Pointer to the allocated lock. */
  223 
  224 void            IORecursiveLockUnlock( IORecursiveLock * lock);
  225 
  226 /*! @function IORecursiveLockHaveLock
  227     @abstract Check if a recursive lock is held by the calling thread.
  228     @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned.
  229     @param lock Pointer to the allocated lock.
  230     @result True if the calling thread holds the lock otherwise false. */
  231 
  232 boolean_t       IORecursiveLockHaveLock( const IORecursiveLock * lock);
  233 
  234 extern int      IORecursiveLockSleep( IORecursiveLock *_lock,
  235                                       void *event, UInt32 interType);
  236 extern void     IORecursiveLockWakeup( IORecursiveLock *_lock,
  237                                        void *event, bool oneThread);
  238 
  239 /*
  240  * Complex (read/write) lock operations
  241  */
  242 
  243 #ifdef  XNU_KERNEL_PRIVATE
  244 typedef lck_rw_t                IORWLock;
  245 #else
  246 typedef struct _IORWLock        IORWLock;
  247 #endif  /* XNU_KERNEL_PRIVATE */
  248 
  249 /*! @function IORWLockAlloc
  250     @abstract Allocates and initializes a read/write lock.
  251 @discussion Allocates and initializes a read/write lock in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
  252     @result Pointer to the allocated lock, or zero on failure. */
  253 
  254 IORWLock * IORWLockAlloc( void );
  255 
  256 /*! @function IORWLockFree
  257    @abstract Frees a read/write lock.
  258    @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken.
  259     @param lock Pointer to the allocated lock. */
  260 
  261 void    IORWLockFree( IORWLock * lock);
  262 
  263 /*! @function IORWLockGetMachLock
  264     @abstract Accessor to a Mach read/write lock.
  265     @discussion Accessor to the Mach read/write lock.
  266     @param lock Pointer to the allocated lock. */
  267 
  268 lck_rw_t * IORWLockGetMachLock( IORWLock * lock);
  269 
  270 /*! @function IORWLockRead
  271     @abstract Lock a read/write lock for read.
  272 @discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
  273     @param lock Pointer to the allocated lock. */
  274 
  275 #ifdef  XNU_KERNEL_PRIVATE
  276 #ifndef IOLOCKS_CPP
  277 static __inline__
  278 void    IORWLockRead( IORWLock * lock)
  279 {
  280     lck_rw_lock_shared( lock);
  281 }
  282 #else
  283 void    IORWLockRead( IORWLock * lock);
  284 #endif  /* !IOLOCKS_CPP */
  285 #else
  286 void    IORWLockRead( IORWLock * lock);
  287 #endif  /* XNU_KERNEL_PRIVATE */
  288 
  289 /*! @function IORWLockWrite
  290     @abstract Lock a read/write lock for write.
  291     @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
  292     @param lock Pointer to the allocated lock. */
  293 
  294 #ifdef  XNU_KERNEL_PRIVATE
  295 #ifndef IOLOCKS_CPP
  296 static __inline__
  297 void    IORWLockWrite( IORWLock * lock)
  298 {
  299     lck_rw_lock_exclusive( lock);
  300 }
  301 #else
  302 void    IORWLockWrite( IORWLock * lock);
  303 #endif  /* !IOLOCKS_CPP */
  304 #else
  305 void    IORWLockWrite( IORWLock * lock);
  306 #endif  /* XNU_KERNEL_PRIVATE */
  307 
  308 /*! @function IORWLockUnlock
  309     @abstract Unlock a read/write lock.
  310     @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held.
  311     @param lock Pointer to the allocated lock. */
  312 
  313 #ifdef  XNU_KERNEL_PRIVATE
  314 #ifndef IOLOCKS_CPP
  315 static __inline__
  316 void    IORWLockUnlock( IORWLock * lock)
  317 {
  318     lck_rw_done( lock);
  319 }
  320 #else
  321 void    IORWLockUnlock( IORWLock * lock);
  322 #endif  /* !IOLOCKS_CPP */
  323 #else
  324 void    IORWLockUnlock( IORWLock * lock);
  325 #endif  /* XNU_KERNEL_PRIVATE */
  326 
  327 #ifdef __APPLE_API_OBSOLETE
  328 
  329 /* The following API is deprecated */
  330 
  331 static __inline__ void IOReadLock( IORWLock * lock)   { IORWLockRead(lock);   }
  332 static __inline__ void IOWriteLock(  IORWLock * lock) { IORWLockWrite(lock);  }
  333 static __inline__ void IORWUnlock(   IORWLock * lock) { IORWLockUnlock(lock); }
  334 
  335 #endif /* __APPLE_API_OBSOLETE */
  336 
  337 
  338 /*
  339  * Simple locks. Cannot block while holding a simple lock.
  340  */
  341 
  342 #ifdef  KERNEL_PRIVATE
  343 typedef lck_spin_t              IOSimpleLock;
  344 #else
  345 typedef struct _IOSimpleLock    IOSimpleLock;
  346 #endif  /* XNU_KERNEL_PRIVATE */
  347 
  348 /*! @function IOSimpleLockAlloc
  349     @abstract Allocates and initializes a spin lock.
  350     @discussion Allocates an initializes a spin lock in general purpose memory, and initilizes it. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
  351     @result Pointer to the allocated lock, or zero on failure. */
  352 
  353 IOSimpleLock * IOSimpleLockAlloc( void );
  354 
  355 /*! @function IOSimpleLockFree
  356     @abstract Frees a spin lock.
  357     @discussion Frees a lock allocated with IOSimpleLockAlloc.
  358     @param lock Pointer to the lock. */
  359 
  360 void IOSimpleLockFree( IOSimpleLock * lock );
  361 
  362 /*! @function IOSimpleLockGetMachLock
  363     @abstract Accessor to a Mach spin lock.
  364     @discussion Accessor to the Mach spin lock.
  365     @param lock Pointer to the allocated lock. */
  366 
  367 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock);
  368 
  369 /*! @function IOSimpleLockInit
  370     @abstract Initialize a spin lock.
  371     @discussion Initialize an embedded spin lock, to the unlocked state.
  372     @param lock Pointer to the lock. */
  373 
  374 void IOSimpleLockInit( IOSimpleLock * lock );
  375 
  376 /*! @function IOSimpleLockLock
  377     @abstract Lock a spin lock.
  378 @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
  379     @param lock Pointer to the lock. */
  380 
  381 #ifdef  XNU_KERNEL_PRIVATE
  382 #ifndef IOLOCKS_CPP
  383 static __inline__
  384 void IOSimpleLockLock( IOSimpleLock * lock )
  385 {
  386     lck_spin_lock( lock );
  387 }
  388 #else
  389 void IOSimpleLockLock( IOSimpleLock * lock );
  390 #endif  /* !IOLOCKS_CPP */
  391 #else
  392 void IOSimpleLockLock( IOSimpleLock * lock );
  393 #endif  /* XNU_KERNEL_PRIVATE */
  394 
  395 /*! @function IOSimpleLockTryLock
  396     @abstract Attempt to lock a spin lock.
  397 @discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock. 
  398     @param lock Pointer to the lock.
  399     @result True if the lock was unlocked and is now locked by the caller, otherwise false. */
  400 
  401 #ifdef  XNU_KERNEL_PRIVATE
  402 #ifndef IOLOCKS_CPP
  403 static __inline__
  404 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock )
  405 {
  406     return( lck_spin_try_lock( lock ) );
  407 }
  408 #else
  409 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
  410 #endif  /* !IOLOCKS_CPP */
  411 #else
  412 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
  413 #endif  /* XNU_KERNEL_PRIVATE */
  414 
  415 /*! @function IOSimpleLockUnlock
  416     @abstract Unlock a spin lock.
  417     @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock.
  418     @param lock Pointer to the lock. */
  419 
  420 #ifdef  XNU_KERNEL_PRIVATE
  421 #ifndef IOLOCKS_CPP
  422 static __inline__
  423 void IOSimpleLockUnlock( IOSimpleLock * lock )
  424 {
  425     lck_spin_unlock( lock );
  426 }
  427 #else
  428 void IOSimpleLockUnlock( IOSimpleLock * lock );
  429 #endif  /* !IOLOCKS_CPP */
  430 #else
  431 void IOSimpleLockUnlock( IOSimpleLock * lock );
  432 #endif  /* XNU_KERNEL_PRIVATE */
  433 
  434 typedef long int IOInterruptState;
  435 
  436 /*! @function IOSimpleLockLockDisableInterrupt
  437     @abstract Lock a spin lock.
  438     @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
  439     @param lock Pointer to the lock. */
  440 
  441 static __inline__
  442 IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock )
  443 {
  444     IOInterruptState    state = ml_set_interrupts_enabled( false );
  445     IOSimpleLockLock( lock );
  446     return( state );
  447 }
  448 
  449 /*! @function IOSimpleLockUnlockEnableInterrupt
  450     @abstract Unlock a spin lock, and restore interrupt state.
  451     @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock.
  452     @param lock Pointer to the lock.
  453     @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */
  454 
  455 static __inline__
  456 void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock,
  457                                         IOInterruptState state )
  458 {
  459     IOSimpleLockUnlock( lock );
  460     ml_set_interrupts_enabled( state );
  461 }
  462 
  463 #ifdef __cplusplus
  464 } /* extern "C" */
  465 #endif
  466 
  467 #endif /* !__IOKIT_IOLOCKS_H */
  468 

Cache object: 0de744a9df1c8c90fa6c67ec7655f8fd


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.