The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/sound/clone.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  * $FreeBSD$
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/conf.h>
   32 #include <sys/kernel.h>
   33 #include <sys/malloc.h>
   34 #include <sys/proc.h>
   35 
   36 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
   37 #include <dev/sound/pcm/sound.h>
   38 #endif
   39 
   40 #include <dev/sound/clone.h>
   41 
   42 /*
   43  * So here we go again, another clonedevs manager. Unlike default clonedevs,
   44  * this clone manager is designed to withstand various abusive behavior
   45  * (such as 'while : ; do ls /dev/whatever ; done', etc.), reusable object
   46  * after reaching certain expiration threshold, aggressive garbage collector,
   47  * transparent device allocator and concurrency handling across multiple
   48  * thread/proc. Due to limited information given by dev_clone EVENTHANDLER,
   49  * we don't have much clues whether the caller wants a real open() or simply
   50  * making fun of us with things like stat(), mtime() etc. Assuming that:
   51  * 1) Time window between dev_clone EH <-> real open() should be small
   52  * enough and 2) mtime()/stat() etc. always looks like a half way / stalled
   53  * operation, we can decide whether a new cdev must be created, old
   54  * (expired) cdev can be reused or an existing cdev can be shared.
   55  *
   56  * Most of the operations and logics are generic enough and can be applied
   57  * on other places (such as if_tap, snp, etc).  Perhaps this can be
   58  * rearranged to complement clone_*(). However, due to this still being
   59  * specific to the sound driver (and as a proof of concept on how it can be
   60  * done), si_drv2 is used to keep the pointer of the clone list entry to
   61  * avoid expensive lookup.
   62  */
   63 
   64 /* clone entry */
   65 struct snd_clone_entry {
   66         TAILQ_ENTRY(snd_clone_entry) link;
   67         struct snd_clone *parent;
   68         struct cdev *devt;
   69         struct timespec tsp;
   70         uint32_t flags;
   71         pid_t pid;
   72         int unit;
   73 };
   74 
   75 /* clone manager */
   76 struct snd_clone {
   77         TAILQ_HEAD(link_head, snd_clone_entry) head;
   78         struct timespec tsp;
   79         int refcount;
   80         int size;
   81         int typemask;
   82         int maxunit;
   83         int deadline;
   84         uint32_t flags;
   85 };
   86 
   87 #ifdef SND_DIAGNOSTIC
   88 #define SND_CLONE_ASSERT(x, y)          do {                    \
   89         if (!(x))                                               \
   90                 panic y;                                        \
   91 } while(0)
   92 #else
   93 #define SND_CLONE_ASSERT(x...)          KASSERT(x)
   94 #endif
   95 
   96 /*
   97  * Shamelessly ripped off from vfs_subr.c
   98  * We need at least 1/HZ precision as default timestamping.
   99  */
  100 enum { SND_TSP_SEC, SND_TSP_HZ, SND_TSP_USEC, SND_TSP_NSEC };
  101 
  102 static int snd_timestamp_precision = SND_TSP_HZ;
  103 TUNABLE_INT("hw.snd.timestamp_precision", &snd_timestamp_precision);
  104 
  105 void
  106 snd_timestamp(struct timespec *tsp)
  107 {
  108         struct timeval tv;
  109 
  110         switch (snd_timestamp_precision) {
  111         case SND_TSP_SEC:
  112                 tsp->tv_sec = time_second;
  113                 tsp->tv_nsec = 0;
  114                 break;
  115         case SND_TSP_HZ:
  116                 getnanouptime(tsp);
  117                 break;
  118         case SND_TSP_USEC:
  119                 microuptime(&tv);
  120                 TIMEVAL_TO_TIMESPEC(&tv, tsp);
  121                 break;
  122         case SND_TSP_NSEC:
  123                 nanouptime(tsp);
  124                 break;
  125         default:
  126                 snd_timestamp_precision = SND_TSP_HZ;
  127                 getnanouptime(tsp);
  128                 break;
  129         }
  130 }
  131 
  132 #if defined(SND_DIAGNOSTIC) || defined(SND_DEBUG)
  133 static int
  134 sysctl_hw_snd_timestamp_precision(SYSCTL_HANDLER_ARGS)
  135 {
  136         int err, val;
  137 
  138         val = snd_timestamp_precision;
  139         err = sysctl_handle_int(oidp, &val, 0, req);
  140         if (err == 0 && req->newptr != NULL) {
  141                 switch (val) {
  142                 case SND_TSP_SEC:
  143                 case SND_TSP_HZ:
  144                 case SND_TSP_USEC:
  145                 case SND_TSP_NSEC:
  146                         snd_timestamp_precision = val;
  147                         break;
  148                 default:
  149                         break;
  150                 }
  151         }
  152 
  153         return (err);
  154 }
  155 SYSCTL_PROC(_hw_snd, OID_AUTO, timestamp_precision, CTLTYPE_INT | CTLFLAG_RW,
  156     0, sizeof(int), sysctl_hw_snd_timestamp_precision, "I",
  157     "timestamp precision (0=s 1=hz 2=us 3=ns)");
  158 #endif
  159 
  160 /*
  161  * snd_clone_create() : Return opaque allocated clone manager.
  162  */
  163 struct snd_clone *
  164 snd_clone_create(int typemask, int maxunit, int deadline, uint32_t flags)
  165 {
  166         struct snd_clone *c;
  167 
  168         SND_CLONE_ASSERT(!(typemask & ~SND_CLONE_MAXUNIT),
  169             ("invalid typemask: 0x%08x", typemask));
  170         SND_CLONE_ASSERT(maxunit == -1 ||
  171             !(maxunit & ~(~typemask & SND_CLONE_MAXUNIT)),
  172             ("maxunit overflow: typemask=0x%08x maxunit=%d",
  173             typemask, maxunit));
  174         SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
  175             ("invalid clone flags=0x%08x", flags));
  176 
  177         c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
  178         c->refcount = 0;
  179         c->size = 0;
  180         c->typemask = typemask;
  181         c->maxunit = (maxunit == -1) ? (~typemask & SND_CLONE_MAXUNIT) :
  182             maxunit;
  183         c->deadline = deadline;
  184         c->flags = flags;
  185         snd_timestamp(&c->tsp);
  186         TAILQ_INIT(&c->head);
  187 
  188         return (c);
  189 }
  190 
  191 int
  192 snd_clone_busy(struct snd_clone *c)
  193 {
  194         struct snd_clone_entry *ce;
  195 
  196         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  197 
  198         if (c->size == 0)
  199                 return (0);
  200 
  201         TAILQ_FOREACH(ce, &c->head, link) {
  202                 if ((ce->flags & SND_CLONE_BUSY) ||
  203                     (ce->devt != NULL && ce->devt->si_threadcount != 0))
  204                         return (EBUSY);
  205         }
  206 
  207         return (0);
  208 }
  209 
  210 /*
  211  * snd_clone_enable()/disable() : Suspend/resume clone allocation through
  212  * snd_clone_alloc(). Everything else will not be affected by this.
  213  */
  214 int
  215 snd_clone_enable(struct snd_clone *c)
  216 {
  217         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  218 
  219         if (c->flags & SND_CLONE_ENABLE)
  220                 return (EINVAL);
  221 
  222         c->flags |= SND_CLONE_ENABLE;
  223 
  224         return (0);
  225 }
  226 
  227 int
  228 snd_clone_disable(struct snd_clone *c)
  229 {
  230         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  231 
  232         if (!(c->flags & SND_CLONE_ENABLE))
  233                 return (EINVAL);
  234 
  235         c->flags &= ~SND_CLONE_ENABLE;
  236 
  237         return (0);
  238 }
  239 
  240 /*
  241  * Getters / Setters. Not worth explaining :)
  242  */
  243 int
  244 snd_clone_getsize(struct snd_clone *c)
  245 {
  246         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  247 
  248         return (c->size);
  249 }
  250 
  251 int
  252 snd_clone_getmaxunit(struct snd_clone *c)
  253 {
  254         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  255 
  256         return (c->maxunit);
  257 }
  258 
  259 int
  260 snd_clone_setmaxunit(struct snd_clone *c, int maxunit)
  261 {
  262         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  263         SND_CLONE_ASSERT(maxunit == -1 ||
  264             !(maxunit & ~(~c->typemask & SND_CLONE_MAXUNIT)),
  265             ("maxunit overflow: typemask=0x%08x maxunit=%d",
  266             c->typemask, maxunit));
  267 
  268         c->maxunit = (maxunit == -1) ? (~c->typemask & SND_CLONE_MAXUNIT) :
  269             maxunit;
  270 
  271         return (c->maxunit);
  272 }
  273 
  274 int
  275 snd_clone_getdeadline(struct snd_clone *c)
  276 {
  277         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  278 
  279         return (c->deadline);
  280 }
  281 
  282 int
  283 snd_clone_setdeadline(struct snd_clone *c, int deadline)
  284 {
  285         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  286 
  287         c->deadline = deadline;
  288 
  289         return (c->deadline);
  290 }
  291 
  292 int
  293 snd_clone_gettime(struct snd_clone *c, struct timespec *tsp)
  294 {
  295         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  296         SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
  297 
  298         *tsp = c->tsp;
  299 
  300         return (0);
  301 }
  302 
  303 uint32_t
  304 snd_clone_getflags(struct snd_clone *c)
  305 {
  306         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  307 
  308         return (c->flags);
  309 }
  310 
  311 uint32_t
  312 snd_clone_setflags(struct snd_clone *c, uint32_t flags)
  313 {
  314         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  315         SND_CLONE_ASSERT(!(flags & ~SND_CLONE_MASK),
  316             ("invalid clone flags=0x%08x", flags));
  317 
  318         c->flags = flags;
  319 
  320         return (c->flags);
  321 }
  322 
  323 int
  324 snd_clone_getdevtime(struct cdev *dev, struct timespec *tsp)
  325 {
  326         struct snd_clone_entry *ce;
  327 
  328         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  329         SND_CLONE_ASSERT(tsp != NULL, ("NULL timespec"));
  330 
  331         ce = dev->si_drv2;
  332         if (ce == NULL)
  333                 return (ENODEV);
  334 
  335         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  336 
  337         *tsp = ce->tsp;
  338 
  339         return (0);
  340 }
  341 
  342 uint32_t
  343 snd_clone_getdevflags(struct cdev *dev)
  344 {
  345         struct snd_clone_entry *ce;
  346 
  347         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  348 
  349         ce = dev->si_drv2;
  350         if (ce == NULL)
  351                 return (0xffffffff);
  352 
  353         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  354 
  355         return (ce->flags);
  356 }
  357 
  358 uint32_t
  359 snd_clone_setdevflags(struct cdev *dev, uint32_t flags)
  360 {
  361         struct snd_clone_entry *ce;
  362 
  363         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  364         SND_CLONE_ASSERT(!(flags & ~SND_CLONE_DEVMASK),
  365             ("invalid clone dev flags=0x%08x", flags));
  366 
  367         ce = dev->si_drv2;
  368         if (ce == NULL)
  369                 return (0xffffffff);
  370 
  371         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  372 
  373         ce->flags = flags;
  374 
  375         return (ce->flags);
  376 }
  377 
  378 /* Elapsed time conversion to ms */
  379 #define SND_CLONE_ELAPSED(x, y)                                         \
  380         ((((x)->tv_sec - (y)->tv_sec) * 1000) +                         \
  381         (((y)->tv_nsec > (x)->tv_nsec) ?                                \
  382         (((1000000000L + (x)->tv_nsec -                                 \
  383         (y)->tv_nsec) / 1000000) - 1000) :                              \
  384         (((x)->tv_nsec - (y)->tv_nsec) / 1000000)))
  385 
  386 #define SND_CLONE_EXPIRED(x, y, z)                                      \
  387         ((x)->deadline < 1 ||                                           \
  388         ((y)->tv_sec - (z)->tv_sec) > ((x)->deadline / 1000) ||         \
  389         SND_CLONE_ELAPSED(y, z) > (x)->deadline)
  390 
  391 /*
  392  * snd_clone_gc() : Garbage collector for stalled, expired objects. Refer to
  393  * clone.h for explanations on GC settings.
  394  */
  395 int
  396 snd_clone_gc(struct snd_clone *c)
  397 {
  398         struct snd_clone_entry *ce, *tce;
  399         struct timespec now;
  400         int pruned;
  401 
  402         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  403 
  404         if (!(c->flags & SND_CLONE_GC_ENABLE) || c->size == 0)
  405                 return (0);
  406 
  407         snd_timestamp(&now);
  408 
  409         /*
  410          * Bail out if the last clone handler was invoked below the deadline
  411          * threshold.
  412          */
  413         if ((c->flags & SND_CLONE_GC_EXPIRED) &&
  414             !SND_CLONE_EXPIRED(c, &now, &c->tsp))
  415                 return (0);
  416 
  417         pruned = 0;
  418 
  419         /*
  420          * Visit each object in reverse order. If the object is still being
  421          * referenced by a valid open(), skip it. Look for expired objects
  422          * and either revoke its clone invocation status or mercilessly
  423          * throw it away.
  424          */
  425         TAILQ_FOREACH_REVERSE_SAFE(ce, &c->head, link_head, link, tce) {
  426                 if (!(ce->flags & SND_CLONE_BUSY) &&
  427                     (!(ce->flags & SND_CLONE_INVOKE) ||
  428                     SND_CLONE_EXPIRED(c, &now, &ce->tsp))) {
  429                         if ((c->flags & SND_CLONE_GC_REVOKE) ||
  430                             ce->devt->si_threadcount != 0) {
  431                                 ce->flags &= ~SND_CLONE_INVOKE;
  432                                 ce->pid = -1;
  433                         } else {
  434                                 TAILQ_REMOVE(&c->head, ce, link);
  435                                 destroy_dev(ce->devt);
  436                                 free(ce, M_DEVBUF);
  437                                 c->size--;
  438                         }
  439                         pruned++;
  440                 }
  441         }
  442 
  443         /* return total pruned objects */
  444         return (pruned);
  445 }
  446 
  447 void
  448 snd_clone_destroy(struct snd_clone *c)
  449 {
  450         struct snd_clone_entry *ce, *tmp;
  451 
  452         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  453 
  454         ce = TAILQ_FIRST(&c->head);
  455         while (ce != NULL) {
  456                 tmp = TAILQ_NEXT(ce, link);
  457                 if (ce->devt != NULL)
  458                         destroy_dev(ce->devt);
  459                 free(ce, M_DEVBUF);
  460                 ce = tmp;
  461         }
  462 
  463         free(c, M_DEVBUF);
  464 }
  465 
  466 /*
  467  * snd_clone_acquire() : The vital part of concurrency management. Must be
  468  * called somewhere at the beginning of open() handler. ENODEV is not really
  469  * fatal since it just tell the caller that this is not cloned stuff.
  470  * EBUSY is *real*, don't forget that!
  471  */
  472 int
  473 snd_clone_acquire(struct cdev *dev)
  474 {
  475         struct snd_clone_entry *ce;
  476 
  477         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  478 
  479         ce = dev->si_drv2;
  480         if (ce == NULL)
  481                 return (ENODEV);
  482 
  483         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  484 
  485         ce->flags &= ~SND_CLONE_INVOKE;
  486 
  487         if (ce->flags & SND_CLONE_BUSY)
  488                 return (EBUSY);
  489 
  490         ce->flags |= SND_CLONE_BUSY;
  491 
  492         return (0);
  493 }
  494 
  495 /*
  496  * snd_clone_release() : Release busy status. Must be called somewhere at
  497  * the end of close() handler, or somewhere after fail open().
  498  */
  499 int
  500 snd_clone_release(struct cdev *dev)
  501 {
  502         struct snd_clone_entry *ce;
  503 
  504         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  505 
  506         ce = dev->si_drv2;
  507         if (ce == NULL)
  508                 return (ENODEV);
  509 
  510         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  511 
  512         ce->flags &= ~SND_CLONE_INVOKE;
  513 
  514         if (!(ce->flags & SND_CLONE_BUSY))
  515                 return (EBADF);
  516 
  517         ce->flags &= ~SND_CLONE_BUSY;
  518         ce->pid = -1;
  519 
  520         return (0);
  521 }
  522 
  523 /*
  524  * snd_clone_ref/unref() : Garbage collector reference counter. To make
  525  * garbage collector run automatically, the sequence must be something like
  526  * this (both in open() and close() handlers):
  527  *
  528  *  open() - 1) snd_clone_acquire()
  529  *           2) .... check check ... if failed, snd_clone_release()
  530  *           3) Success. Call snd_clone_ref()
  531  *
  532  * close() - 1) .... check check check ....
  533  *           2) Success. snd_clone_release()
  534  *           3) snd_clone_unref() . Garbage collector will run at this point
  535  *              if this is the last referenced object.
  536  */
  537 int
  538 snd_clone_ref(struct cdev *dev)
  539 {
  540         struct snd_clone_entry *ce;
  541         struct snd_clone *c;
  542 
  543         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  544 
  545         ce = dev->si_drv2;
  546         if (ce == NULL)
  547                 return (0);
  548 
  549         c = ce->parent;
  550         SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
  551         SND_CLONE_ASSERT(c->refcount >= 0, ("refcount < 0"));
  552 
  553         return (++c->refcount);
  554 }
  555 
  556 int
  557 snd_clone_unref(struct cdev *dev)
  558 {
  559         struct snd_clone_entry *ce;
  560         struct snd_clone *c;
  561 
  562         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  563 
  564         ce = dev->si_drv2;
  565         if (ce == NULL)
  566                 return (0);
  567 
  568         c = ce->parent;
  569         SND_CLONE_ASSERT(c != NULL, ("NULL parent"));
  570         SND_CLONE_ASSERT(c->refcount > 0, ("refcount <= 0"));
  571 
  572         c->refcount--;
  573 
  574         /* 
  575          * Run automatic garbage collector, if needed.
  576          */
  577         if ((c->flags & SND_CLONE_GC_UNREF) &&
  578             (!(c->flags & SND_CLONE_GC_LASTREF) ||
  579             (c->refcount == 0 && (c->flags & SND_CLONE_GC_LASTREF))))
  580                 (void)snd_clone_gc(c);
  581 
  582         return (c->refcount);
  583 }
  584 
  585 void
  586 snd_clone_register(struct snd_clone_entry *ce, struct cdev *dev)
  587 {
  588         SND_CLONE_ASSERT(ce != NULL, ("NULL snd_clone_entry"));
  589         SND_CLONE_ASSERT(dev != NULL, ("NULL dev"));
  590         SND_CLONE_ASSERT(dev->si_drv2 == NULL, ("dev->si_drv2 not NULL"));
  591         SND_CLONE_ASSERT((ce->flags & SND_CLONE_ALLOC) == SND_CLONE_ALLOC,
  592             ("invalid clone alloc flags=0x%08x", ce->flags));
  593         SND_CLONE_ASSERT(ce->devt == NULL, ("ce->devt not NULL"));
  594         SND_CLONE_ASSERT(ce->unit == dev2unit(dev),
  595             ("invalid unit ce->unit=0x%08x dev2unit=0x%08x",
  596             ce->unit, dev2unit(dev)));
  597 
  598         SND_CLONE_ASSERT(ce->parent != NULL, ("NULL parent"));
  599 
  600         dev->si_drv2 = ce;
  601         ce->devt = dev;
  602         ce->flags &= ~SND_CLONE_ALLOC;
  603         ce->flags |= SND_CLONE_INVOKE;
  604 }
  605 
  606 struct snd_clone_entry *
  607 snd_clone_alloc(struct snd_clone *c, struct cdev **dev, int *unit, int tmask)
  608 {
  609         struct snd_clone_entry *ce, *after, *bce, *cce, *nce, *tce;
  610         struct timespec now;
  611         int cunit, allocunit;
  612         pid_t curpid;
  613 
  614         SND_CLONE_ASSERT(c != NULL, ("NULL snd_clone"));
  615         SND_CLONE_ASSERT(dev != NULL, ("NULL dev pointer"));
  616         SND_CLONE_ASSERT((c->typemask & tmask) == tmask,
  617             ("invalid tmask: typemask=0x%08x tmask=0x%08x",
  618             c->typemask, tmask));
  619         SND_CLONE_ASSERT(unit != NULL, ("NULL unit pointer"));
  620         SND_CLONE_ASSERT(*unit == -1 || !(*unit & (c->typemask | tmask)),
  621             ("typemask collision: typemask=0x%08x tmask=0x%08x *unit=%d",
  622             c->typemask, tmask, *unit));
  623 
  624         if (!(c->flags & SND_CLONE_ENABLE) ||
  625             (*unit != -1 && *unit > c->maxunit))
  626                 return (NULL);
  627 
  628         ce = NULL;
  629         after = NULL;
  630         bce = NULL;     /* "b"usy candidate */
  631         cce = NULL;     /* "c"urthread/proc candidate */
  632         nce = NULL;     /* "n"ull, totally unbusy candidate */
  633         tce = NULL;     /* Last "t"ry candidate */
  634         cunit = 0;
  635         allocunit = (*unit == -1) ? 0 : *unit;
  636         curpid = curthread->td_proc->p_pid;
  637 
  638         snd_timestamp(&now);
  639 
  640         TAILQ_FOREACH(ce, &c->head, link) {
  641                 /*
  642                  * Sort incrementally according to device type.
  643                  */
  644                 if (tmask > (ce->unit & c->typemask)) {
  645                         if (cunit == 0)
  646                                 after = ce;
  647                         continue;
  648                 } else if (tmask < (ce->unit & c->typemask))
  649                         break;
  650 
  651                 /*
  652                  * Shoot.. this is where the grumpiness begin. Just
  653                  * return immediately.
  654                  */
  655                 if (*unit != -1 && *unit == (ce->unit & ~tmask))
  656                         goto snd_clone_alloc_out;
  657 
  658                 cunit++;
  659                 /*
  660                  * Simmilar device type. Sort incrementally according
  661                  * to allocation unit. While here, look for free slot
  662                  * and possible collision for new / future allocation.
  663                  */
  664                 if (*unit == -1 && (ce->unit & ~tmask) == allocunit)
  665                         allocunit++;
  666                 if ((ce->unit & ~tmask) < allocunit)
  667                         after = ce;
  668                 /*
  669                  * Clone logic:
  670                  *   1. Look for non busy, but keep track of the best
  671                  *      possible busy cdev.
  672                  *   2. Look for the best (oldest referenced) entry that is
  673                  *      in a same process / thread.
  674                  *   3. Look for the best (oldest referenced), absolute free
  675                  *      entry.
  676                  *   4. Lastly, look for the best (oldest referenced)
  677                  *      any entries that doesn't fit with anything above.
  678                  */
  679                 if (ce->flags & SND_CLONE_BUSY) {
  680                         if (ce->devt != NULL && (bce == NULL ||
  681                             timespeccmp(&ce->tsp, &bce->tsp, <)))
  682                                 bce = ce;
  683                         continue;
  684                 }
  685                 if (ce->pid == curpid &&
  686                     (cce == NULL || timespeccmp(&ce->tsp, &cce->tsp, <)))
  687                         cce = ce;
  688                 else if (!(ce->flags & SND_CLONE_INVOKE) &&
  689                     (nce == NULL || timespeccmp(&ce->tsp, &nce->tsp, <)))
  690                         nce = ce;
  691                 else if (tce == NULL || timespeccmp(&ce->tsp, &tce->tsp, <))
  692                         tce = ce;
  693         }
  694         if (*unit != -1)
  695                 goto snd_clone_alloc_new;
  696         else if (cce != NULL) {
  697                 /* Same proc entry found, go for it */
  698                 ce = cce;
  699                 goto snd_clone_alloc_out;
  700         } else if (nce != NULL) {
  701                 /*
  702                  * Next, try absolute free entry. If the calculated
  703                  * allocunit is smaller, create new entry instead.
  704                  */
  705                 if (allocunit < (nce->unit & ~tmask))
  706                         goto snd_clone_alloc_new;
  707                 ce = nce;
  708                 goto snd_clone_alloc_out;
  709         } else if (allocunit > c->maxunit) {
  710                 /*
  711                  * Maximum allowable unit reached. Try returning any
  712                  * available cdev and hope for the best. If the lookup is
  713                  * done for things like stat(), mtime() etc. , things should
  714                  * be ok. Otherwise, open() handler should do further checks
  715                  * and decide whether to return correct error code or not.
  716                  */
  717                 if (tce != NULL) {
  718                         ce = tce;
  719                         goto snd_clone_alloc_out;
  720                 } else if (bce != NULL) {
  721                         ce = bce;
  722                         goto snd_clone_alloc_out;
  723                 }
  724                 return (NULL);
  725         }
  726 
  727 snd_clone_alloc_new:
  728         /*
  729          * No free entries found, and we still haven't reached maximum
  730          * allowable units. Allocate, setup a minimal unique entry with busy
  731          * status so nobody will monkey on this new entry. Unit magic is set
  732          * right here to avoid collision with other contesting handler.
  733          * The caller must be carefull here to maintain its own
  734          * synchronization, as long as it will not conflict with malloc(9)
  735          * operations.
  736          *
  737          * That said, go figure.
  738          */
  739         ce = malloc(sizeof(*ce), M_DEVBUF,
  740             ((c->flags & SND_CLONE_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
  741         if (ce == NULL) {
  742                 if (*unit != -1)
  743                         return (NULL);
  744                 /*
  745                  * We're being dense, ignorance is bliss,
  746                  * Super Regulatory Measure (TM).. TRY AGAIN!
  747                  */
  748                 if (nce != NULL) {
  749                         ce = nce;
  750                         goto snd_clone_alloc_out;
  751                 } else if (tce != NULL) {
  752                         ce = tce;
  753                         goto snd_clone_alloc_out;
  754                 } else if (bce != NULL) {
  755                         ce = bce;
  756                         goto snd_clone_alloc_out;
  757                 }
  758                 return (NULL);
  759         }
  760         /* Setup new entry */
  761         ce->parent = c;
  762         ce->unit = tmask | allocunit;
  763         ce->pid = curpid;
  764         ce->tsp = now;
  765         ce->flags |= SND_CLONE_ALLOC;
  766         if (after != NULL) {
  767                 TAILQ_INSERT_AFTER(&c->head, after, ce, link);
  768         } else {
  769                 TAILQ_INSERT_HEAD(&c->head, ce, link);
  770         }
  771         c->size++;
  772         c->tsp = now;
  773         /*
  774          * Save new allocation unit for caller which will be used
  775          * by make_dev().
  776          */
  777         *unit = allocunit;
  778 
  779         return (ce);
  780 
  781 snd_clone_alloc_out:
  782         /*
  783          * Set, mark, timestamp the entry if this is a truly free entry.
  784          * Leave busy entry alone.
  785          */
  786         if (!(ce->flags & SND_CLONE_BUSY)) {
  787                 ce->pid = curpid;
  788                 ce->tsp = now;
  789                 ce->flags |= SND_CLONE_INVOKE;
  790         }
  791         c->tsp = now;
  792         *dev = ce->devt;
  793 
  794         return (NULL);
  795 }

Cache object: 282ad925286844011650ecffd911a33a


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.