The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_taskqueue.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2000 Doug Rabson
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  *
   26  *      $FreeBSD: releng/5.0/sys/kern/subr_taskqueue.c 101154 2002-08-01 13:37:22Z jhb $
   27  */
   28 
   29 #include <sys/param.h>
   30 #include <sys/systm.h>
   31 #include <sys/bus.h>
   32 #include <sys/interrupt.h>
   33 #include <sys/kernel.h>
   34 #include <sys/lock.h>
   35 #include <sys/malloc.h>
   36 #include <sys/mutex.h>
   37 #include <sys/taskqueue.h>
   38 
   39 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
   40 
   41 static STAILQ_HEAD(taskqueue_list, taskqueue) taskqueue_queues;
   42 
   43 static void     *taskqueue_ih;
   44 static struct mtx taskqueue_queues_mutex;
   45 
   46 struct taskqueue {
   47         STAILQ_ENTRY(taskqueue) tq_link;
   48         STAILQ_HEAD(, task)     tq_queue;
   49         const char              *tq_name;
   50         taskqueue_enqueue_fn    tq_enqueue;
   51         void                    *tq_context;
   52         int                     tq_draining;
   53         struct mtx              tq_mutex;
   54 };
   55 
   56 static void     init_taskqueue_list(void *data);
   57 
   58 static void
   59 init_taskqueue_list(void *data __unused)
   60 {
   61 
   62         mtx_init(&taskqueue_queues_mutex, "taskqueue list", NULL, MTX_DEF);
   63         STAILQ_INIT(&taskqueue_queues);
   64 }
   65 SYSINIT(taskqueue_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_taskqueue_list,
   66     NULL);
   67 
   68 struct taskqueue *
   69 taskqueue_create(const char *name, int mflags,
   70                  taskqueue_enqueue_fn enqueue, void *context)
   71 {
   72         struct taskqueue *queue;
   73 
   74         queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
   75         if (!queue)
   76                 return 0;
   77 
   78         STAILQ_INIT(&queue->tq_queue);
   79         queue->tq_name = name;
   80         queue->tq_enqueue = enqueue;
   81         queue->tq_context = context;
   82         queue->tq_draining = 0;
   83         mtx_init(&queue->tq_mutex, "taskqueue", NULL, MTX_DEF);
   84 
   85         mtx_lock(&taskqueue_queues_mutex);
   86         STAILQ_INSERT_TAIL(&taskqueue_queues, queue, tq_link);
   87         mtx_unlock(&taskqueue_queues_mutex);
   88 
   89         return queue;
   90 }
   91 
   92 void
   93 taskqueue_free(struct taskqueue *queue)
   94 {
   95 
   96         mtx_lock(&queue->tq_mutex);
   97         KASSERT(queue->tq_draining == 0, ("free'ing a draining taskqueue"));
   98         queue->tq_draining = 1;
   99         mtx_unlock(&queue->tq_mutex);
  100 
  101         taskqueue_run(queue);
  102 
  103         mtx_lock(&taskqueue_queues_mutex);
  104         STAILQ_REMOVE(&taskqueue_queues, queue, taskqueue, tq_link);
  105         mtx_unlock(&taskqueue_queues_mutex);
  106 
  107         mtx_destroy(&queue->tq_mutex);
  108         free(queue, M_TASKQUEUE);
  109 }
  110 
  111 /*
  112  * Returns with the taskqueue locked.
  113  */
  114 struct taskqueue *
  115 taskqueue_find(const char *name)
  116 {
  117         struct taskqueue *queue;
  118 
  119         mtx_lock(&taskqueue_queues_mutex);
  120         STAILQ_FOREACH(queue, &taskqueue_queues, tq_link) {
  121                 mtx_lock(&queue->tq_mutex);
  122                 if (!strcmp(queue->tq_name, name)) {
  123                         mtx_unlock(&taskqueue_queues_mutex);
  124                         return queue;
  125                 }
  126                 mtx_unlock(&queue->tq_mutex);
  127         }
  128         mtx_unlock(&taskqueue_queues_mutex);
  129         return 0;
  130 }
  131 
  132 int
  133 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
  134 {
  135         struct task *ins;
  136         struct task *prev;
  137 
  138         mtx_lock(&queue->tq_mutex);
  139 
  140         /*
  141          * Don't allow new tasks on a queue which is being freed.
  142          */
  143         if (queue->tq_draining) {
  144                 mtx_unlock(&queue->tq_mutex);
  145                 return EPIPE;
  146         }
  147 
  148         /*
  149          * Count multiple enqueues.
  150          */
  151         if (task->ta_pending) {
  152                 task->ta_pending++;
  153                 mtx_unlock(&queue->tq_mutex);
  154                 return 0;
  155         }
  156 
  157         /*
  158          * Optimise the case when all tasks have the same priority.
  159          */
  160         prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
  161         if (!prev || prev->ta_priority >= task->ta_priority) {
  162                 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
  163         } else {
  164                 prev = 0;
  165                 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
  166                      prev = ins, ins = STAILQ_NEXT(ins, ta_link))
  167                         if (ins->ta_priority < task->ta_priority)
  168                                 break;
  169 
  170                 if (prev)
  171                         STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
  172                 else
  173                         STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
  174         }
  175 
  176         task->ta_pending = 1;
  177         if (queue->tq_enqueue)
  178                 queue->tq_enqueue(queue->tq_context);
  179 
  180         mtx_unlock(&queue->tq_mutex);
  181 
  182         return 0;
  183 }
  184 
  185 void
  186 taskqueue_run(struct taskqueue *queue)
  187 {
  188         struct task *task;
  189         int pending;
  190 
  191         mtx_lock(&queue->tq_mutex);
  192         while (STAILQ_FIRST(&queue->tq_queue)) {
  193                 /*
  194                  * Carefully remove the first task from the queue and
  195                  * zero its pending count.
  196                  */
  197                 task = STAILQ_FIRST(&queue->tq_queue);
  198                 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
  199                 pending = task->ta_pending;
  200                 task->ta_pending = 0;
  201                 mtx_unlock(&queue->tq_mutex);
  202 
  203                 task->ta_func(task->ta_context, pending);
  204 
  205                 mtx_lock(&queue->tq_mutex);
  206         }
  207         mtx_unlock(&queue->tq_mutex);
  208 }
  209 
  210 static void
  211 taskqueue_swi_enqueue(void *context)
  212 {
  213         swi_sched(taskqueue_ih, 0);
  214 }
  215 
  216 static void
  217 taskqueue_swi_run(void *dummy)
  218 {
  219         taskqueue_run(taskqueue_swi);
  220 }
  221 
  222 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, 0,
  223                  swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ, 0,
  224                      &taskqueue_ih)); 

Cache object: f0ee4007fe86457c4e2129dc6fc2e9b9


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.