The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kcont.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: kern_kcont.c,v 1.13 2005/12/24 19:12:23 perry Exp $ */
    2 
    3 /*
    4  * Copyright 2003 Jonathan Stone.
    5  * All rights reserved.
    6  * Copyright (c) 2004 The NetBSD Foundation, Inc.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to The NetBSD Foundation
   10  * by Jonathan Stone.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of The NetBSD Foundation nor the names of its
   21  *    contributors may be used to endorse or promote products derived
   22  *    from this software without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   34  * POSSIBILITY OF SUCH DAMAGE.
   35  */
   36 
   37 /*
   38  */
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: kern_kcont.c,v 1.13 2005/12/24 19:12:23 perry Exp $ ");
   41 
   42 #include <sys/types.h>
   43 #include <sys/param.h>
   44 #include <sys/queue.h>
   45 #include <sys/errno.h>
   46 #include <sys/malloc.h>
   47 #include <sys/kernel.h>
   48 #include <sys/pool.h>
   49 #include <sys/kthread.h>
   50 #include <sys/proc.h>
   51 #include <sys/systm.h>
   52 #include <lib/libkern/libkern.h>
   53 
   54 #include <machine/intr.h>       /* IPL_*, and schedsoftnet() */
   55                                 /* XXX: schedsofnet() should die. */
   56 
   57 #include <sys/kcont.h>
   58 
   59 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
   60 /*
   61  * Software-interrupt priority continuation queues.
   62  */
   63 static kcq_t kcq_softnet;
   64 static kcq_t kcq_softclock;
   65 static kcq_t kcq_softserial;
   66 
   67 static void *kc_si_softnet;
   68 static void *kc_si_softclock;
   69 static void *kc_si_softserial;
   70 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
   71 
   72 /*
   73  * Pool allocator structure.
   74  */
   75 POOL_INIT(kc_pool, sizeof(struct kc), 0, 0, 0, "kcpl", NULL);
   76 
   77 /*
   78  * Process-context continuation queue.
   79  */
   80 static kcq_t kcq_process_ctxt;
   81 
   82 /*
   83  * Insert/Remove a fully-formed struct kc * into the kc_queue *
   84  * of some kernel object (e.g., a struct buf *).
   85  * For fine-grained SMP, both enqueueing and dequeueing will
   86  * need a locking mechanism.
   87  */
   88 static inline void
   89 kcont_enqueue_atomic(kcq_t *kcq, struct kc *kc)
   90 {
   91         int s;
   92 
   93         s = splvm();
   94         SIMPLEQ_INSERT_TAIL(kcq, kc, kc_next);
   95         splx(s);
   96 }
   97 
   98 static inline struct kc *
   99 kcont_dequeue_atomic(kcq_t *kcq)
  100 {
  101         struct kc *kc;
  102         int s;
  103 
  104         s = splvm();
  105         kc = SIMPLEQ_FIRST(kcq);
  106         if (kc != NULL) {
  107                 SIMPLEQ_REMOVE_HEAD(kcq, kc_next);
  108                 SIMPLEQ_NEXT(kc, kc_next) = NULL;
  109         }
  110         splx(s);
  111         return kc;
  112 }
  113 
  114 /*
  115  * Construct a continuation object from pre-allocated memory.
  116  * Used by functions that are about call an asynchronous operation,
  117  * to build a continuation to be called once the operation completes.
  118  */
  119 static inline struct kc *
  120 kc_set(struct kc *kc, void (*func)(void *, void *, int),
  121     void *env_arg, int ipl)
  122 {
  123 
  124         kc->kc_fn = func;
  125         kc->kc_env_arg = env_arg;
  126         kc->kc_ipl = ipl;
  127         kc->kc_flags = 0;
  128 #ifdef DEBUG
  129         kc->kc_obj = NULL;
  130         kc->kc_status = -1;
  131         SIMPLEQ_NEXT(kc, kc_next) = NULL;
  132 #endif
  133         return kc;
  134 }
  135 
  136 /*
  137  * Request a continuation.  Caller provides space for the struct kc *.
  138  */
  139 struct kc *
  140 kcont(struct kc *kc, void (*func)(void *, void *, int),
  141     void *env_arg, int continue_ipl)
  142 {
  143 
  144         /* Just save the arguments in the kcont *. */
  145         return kc_set(kc, func, env_arg, continue_ipl);
  146 }
  147 
  148 /*
  149  * Request a malloc'ed/auto-freed continuation. The kcont framework
  150  * mallocs the struct kc, and initializes it with the caller-supplied args.
  151  * Once the asynchronous operation completes and the continuation function
  152  * has been called, the kcont framework will free the struct kc *
  153  * immediately after the continuation function returns.
  154  */
  155 struct kc *
  156 kcont_malloc(int malloc_flags,
  157     void (*func)(void *, void *, int),
  158     void *env_arg, int continue_ipl)
  159 {
  160         struct kc *kc;
  161         int pool_flags;
  162 
  163         pool_flags = (malloc_flags & M_NOWAIT) ? 0 : PR_WAITOK;
  164         pool_flags |= (malloc_flags & M_CANFAIL) ? PR_LIMITFAIL : 0;
  165 
  166         kc = pool_get(&kc_pool, pool_flags);
  167         if (kc == NULL)
  168                 return kc;
  169         return kc_set(kc, func, env_arg, continue_ipl);
  170 }
  171 
  172 /*
  173  * Dispatch a dequeued continuation which requested deferral
  174  * into the appropriate lower-priority queue.
  175  * Public API entry to defer execution of a pre-built kcont.
  176  */
  177 void
  178 kcont_defer(struct kc *kc, void *obj, int status)
  179 {
  180         /*
  181          * IPL at which to synchronize access to object is
  182          * above the continuer's requested callback IPL,
  183          * (e.g., continuer wants IPL_SOFTNET but the object
  184          * holding this continuation incurred the wakeup()able
  185          * event whilst at IPL_BIO).
  186          * Defer this kc to a lower-priority kc queue,
  187          * to be serviced slightly later at a lower IPL.
  188          */
  189 
  190         /*
  191          * If we already deferred this kcont, don't clobber
  192          * the previously-saved kc_object and kc_status.
  193          * (The obj/status arguments passed in by ck_run() should
  194          * be the same as kc_object/kc_status, but don't rely on that.)
  195          */
  196         if ((kc->kc_flags & KC_DEFERRED) == 0) {
  197                 kc->kc_flags |= KC_DEFERRED;
  198                 kc->kc_obj = obj;
  199                 kc->kc_status = status;
  200         }
  201 
  202         switch (kc->kc_ipl) {
  203 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  204         case KC_IPL_DEFER_SOFTCLOCK:
  205                 kcont_enqueue_atomic(&kcq_softclock, kc);
  206                 softintr_schedule(kc_si_softclock);
  207                 break;
  208         case KC_IPL_DEFER_SOFTNET:
  209                 kcont_enqueue_atomic(&kcq_softnet, kc);
  210                 softintr_schedule(kc_si_softnet);
  211                 break;
  212         case KC_IPL_DEFER_SOFTSERIAL:
  213                 kcont_enqueue_atomic(&kcq_softserial, kc);
  214                 softintr_schedule(kc_si_softserial);
  215                 break;
  216 
  217 #else /* !__HAVE_GENERIC_SOFT_INTERRUPTS */
  218         /* What to do? For now, punt to process context */
  219         case KC_IPL_DEFER_SOFTCLOCK:
  220         case KC_IPL_DEFER_SOFTSERIAL:
  221         case KC_IPL_DEFER_SOFTNET:
  222                 /*FALLTHROUGH*/
  223 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  224 
  225         case KC_IPL_DEFER_PROCESS:
  226                 kcont_enqueue_atomic(&kcq_process_ctxt, kc);
  227                 wakeup(&kcq_process_ctxt);
  228                 break;
  229         default:
  230                 KASSERT(0);
  231         }
  232 }
  233 
  234 void
  235 kcont_defer_malloc(int mallocflags,
  236     void (*func)(void *, void *, int),
  237     void *obj, void *env_arg, int status, int ipl)
  238 {
  239         struct kc *kc;
  240 
  241         kc = kcont_malloc(mallocflags, func, env_arg, ipl);
  242         if (kc != NULL)
  243                 kcont_defer(kc, obj, status);
  244 }
  245 
  246 /*
  247  * Enqueue a pre-existing kcont onto a struct kcq completion queue
  248  * of some pre-existing kernel object.
  249  */
  250 void
  251 kcont_enqueue(kcq_t *kcq, struct kc *kc)
  252 {
  253 
  254         kcont_enqueue_atomic(kcq, kc);
  255 }
  256 
  257 
  258 /*
  259  * Run through a list of continuations, calling (or handing off)
  260  * continuation functions.
  261  * If the caller-provided IPL is the same as the requested IPL,
  262  * deliver the callback.
  263  * If the caller-provided IPL is higher than the requested
  264  * callback IPL, re-enqueue the continuation to a lower-priority queue.
  265  */
  266 void
  267 kcont_run(kcq_t *kcq, void *obj, int status, int curipl)
  268 {
  269         struct kc *kc;
  270 
  271         while ((kc = kcont_dequeue_atomic(kcq)) != NULL) {
  272 
  273                 /* If execution of kc was already deferred, restore context. */
  274                 if (kc->kc_flags & KC_DEFERRED) {
  275                         KASSERT(obj == NULL);
  276                         obj = kc->kc_obj;
  277                         status = kc->kc_status;
  278                 }
  279 
  280                 /* Check whether to execute now or to defer. */
  281                 if (kc->kc_ipl == KC_IPL_IMMED || curipl <= kc->kc_ipl) {
  282                         int saved_flags = kc->kc_flags; /* XXX see below */
  283 
  284                         /* Satisfy our raison d'e^tre */
  285                         (*kc->kc_fn)(obj, kc->kc_env_arg, status);
  286 
  287                         /*
  288                          * We must not touch (*kc) after calling
  289                          * (*kc->kc_fn), unless we were specifically
  290                          * asked to free it.  The memory for (*kc) may
  291                          * be a sub-field of some other object (for example,
  292                          * of kc->kc_env_arg) and (*kc_fn)() may already
  293                          * have freed it by the time we get here.  So save
  294                          * kc->kc_flags (above) and use that saved copy
  295                          * to test for auto-free.
  296                          */
  297                         if (saved_flags & KC_AUTOFREE)
  298                                 pool_put(&kc_pool, kc);
  299                 } else {
  300                         kcont_defer(kc, obj, status);
  301                 }
  302         }
  303 }
  304 
  305 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  306 /*
  307  * Trampolines for processing software-interrupt kcont queues.
  308  */
  309 static void
  310 kcont_run_softclock(void *arg)
  311 {
  312 
  313         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTCLOCK);
  314 }
  315 
  316 static void
  317 kcont_run_softnet(void *arg)
  318 {
  319 
  320         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTNET);
  321 }
  322 
  323 static void
  324 kcont_run_softserial(void *arg)
  325 {
  326 
  327         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTSERIAL);
  328 }
  329 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  330 
  331 /*
  332  * Main entrypoint for kcont worker kthreads to execute
  333  * a continuation which requested deferral to process context.
  334  */
  335 static void
  336 kcont_worker(void *arg)
  337 {
  338         int status;
  339 
  340         (void)arg;      /* kill GCC warning */
  341 
  342         while (1) {
  343                 status = ltsleep(&kcq_process_ctxt, PCATCH, "kcont", hz, NULL);
  344                 if (status != 0 && status != EWOULDBLOCK)
  345                         break;
  346                 kcont_run(&kcq_process_ctxt, NULL, 0, KC_IPL_DEFER_PROCESS);
  347         }
  348         kthread_exit(0);
  349 }
  350 
  351 static void
  352 kcont_create_worker(void *arg)
  353 {
  354         if (kthread_create1(kcont_worker, NULL, NULL, "kcont"))
  355                 panic("fork kcont");
  356 }
  357 
  358 /*
  359  * Initialize kcont subsystem.
  360  */
  361 void
  362 kcont_init(void)
  363 {
  364 
  365 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  366         /*
  367          * Initialize kc_queue and callout for soft-int deferred
  368          * continuations. (If not available, deferrals fall back
  369          * to deferring all the way to process context).
  370          */
  371         SIMPLEQ_INIT(&kcq_softclock);
  372         kc_si_softclock = softintr_establish(IPL_SOFTCLOCK,
  373             kcont_run_softclock, &kcq_softnet);
  374 
  375         SIMPLEQ_INIT(&kcq_softnet);
  376         kc_si_softnet = softintr_establish(IPL_SOFTNET,
  377             kcont_run_softnet, &kcq_softnet);
  378 
  379         SIMPLEQ_INIT(&kcq_softserial);
  380         kc_si_softserial = softintr_establish(IPL_SOFTSERIAL,
  381             kcont_run_softserial, &kcq_softserial);
  382 #endif  /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  383 
  384         /*
  385          * Create kc_queue for process-context continuations, and
  386          * a worker kthread to process the queue. (Fine-grained SMP
  387          * locking should have at least one worker kthread per CPU).
  388          */
  389         SIMPLEQ_INIT(&kcq_process_ctxt);
  390         kthread_create(kcont_create_worker, NULL);
  391 }

Cache object: e42caa240bfbb67169c991f7c313f984


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.