The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kcont.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* $NetBSD: kern_kcont.c,v 1.9 2004/03/27 00:42:38 jonathan Exp $ */
    2 
    3 /*
    4  * Copyright 2003 Jonathan Stone.
    5  * All rights reserved.
    6  * Copyright (c) 2004 The NetBSD Foundation, Inc.
    7  * All rights reserved.
    8  *
    9  * This code is derived from software contributed to The NetBSD Foundation
   10  * by Jonathan Stone.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  * 3. Neither the name of The NetBSD Foundation nor the names of its
   21  *    contributors may be used to endorse or promote products derived
   22  *    from this software without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   34  * POSSIBILITY OF SUCH DAMAGE.
   35  */
   36 
   37 /*
   38  */
   39 #include <sys/cdefs.h>
   40 __KERNEL_RCSID(0, "$NetBSD: kern_kcont.c,v 1.9 2004/03/27 00:42:38 jonathan Exp $ ");
   41 
   42 #include <sys/types.h>
   43 #include <sys/param.h>
   44 #include <sys/queue.h>
   45 #include <sys/errno.h>
   46 #include <sys/malloc.h>
   47 #include <sys/kernel.h>
   48 #include <sys/pool.h>
   49 #include <sys/kthread.h>
   50 #include <sys/proc.h>
   51 #include <sys/systm.h>
   52 #include <lib/libkern/libkern.h>
   53 
   54 #include <machine/intr.h>       /* IPL_*, and schedsoftnet() */
   55                                 /* XXX: schedsofnet() should die. */
   56 
   57 #include <sys/kcont.h>
   58 
   59 
   60 /* Accessors for struct kc_queue */
   61 static __inline struct kc *kc_set(struct kc *,
   62         void (*func)(void *, void *, int),
   63         void *env_arg, int ipl);
   64 
   65 static __inline void kcont_enqueue_atomic(kcq_t *kcq, struct kc *kc);
   66 static __inline struct kc *kcont_dequeue_atomic(kcq_t *kcq);
   67 
   68 
   69 static void     kcont_worker(void * /*arg*/);
   70 static void     kcont_create_worker(void *);
   71 
   72 
   73 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
   74 /*
   75  * Software-interrupt priority continuation queues.
   76  */
   77 static kcq_t kcq_softnet;
   78 static kcq_t kcq_softclock;
   79 static kcq_t kcq_softserial;
   80 
   81 static void *kc_si_softnet;
   82 static void *kc_si_softclock;
   83 static void *kc_si_softserial;
   84 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
   85 
   86 /*
   87  * Pool allocator structure.
   88  */
   89 static struct pool kc_pool;
   90 
   91 /*
   92  * Process-context continuation queue.
   93  */
   94 static kcq_t kcq_process_ctxt;
   95 
   96 
   97 /*
   98  * Insert/Remove a fully-formed struct kc * into the kc_queue *
   99  * of some kernel object (e.g., a struct buf *).
  100  * For fine-grained SMP, both enqueueing and dequeueing will
  101  * need a locking mechanism.
  102  */
  103 static __inline void
  104 kcont_enqueue_atomic(kcq_t *kcq, struct kc *kc)
  105 {
  106         int s;
  107 
  108         s = splvm();
  109         SIMPLEQ_INSERT_TAIL(kcq, kc, kc_next);
  110         splx(s);
  111 }
  112 
  113 static __inline struct kc *
  114 kcont_dequeue_atomic(kcq_t *kcq)
  115 {
  116         struct kc *kc;
  117         int s;
  118 
  119         s = splvm();
  120         kc = SIMPLEQ_FIRST(kcq);
  121         if (kc != NULL) {
  122                 SIMPLEQ_REMOVE_HEAD(kcq, kc_next);
  123                 SIMPLEQ_NEXT(kc, kc_next) = NULL;
  124         }
  125         splx(s);
  126         return kc;
  127 }
  128 
  129 /*
  130  * Construct a continuation object from pre-allocated memory.
  131  * Used by functions that are about call an asynchronous operation,
  132  * to build a continuation to be called once the operation completes.
  133  */
  134 static __inline struct kc *
  135 kc_set(struct kc *kc, void (*func)(void *, void *, int),
  136     void *env_arg, int ipl)
  137 {
  138 
  139         kc->kc_fn = func;
  140         kc->kc_env_arg = env_arg;
  141         kc->kc_ipl = ipl;
  142         kc->kc_flags = 0;
  143 #ifdef DEBUG
  144         kc->kc_obj = NULL;
  145         kc->kc_status = -1;
  146         SIMPLEQ_NEXT(kc, kc_next) = NULL;
  147 #endif
  148         return kc;
  149 }
  150 
  151 /*
  152  * Request a continuation.  Caller provides space for the struct kc *.
  153  */
  154 struct kc *
  155 kcont(struct kc *kc, void (*func)(void *, void *, int),
  156     void *env_arg, int continue_ipl)
  157 {
  158 
  159         /* Just save the arguments in the kcont *. */
  160         return kc_set(kc, func, env_arg, continue_ipl);
  161 }
  162 
  163 /*
  164  * Request a malloc'ed/auto-freed continuation. The kcont framework
  165  * mallocs the struct kc, and initializes it with the caller-supplied args.
  166  * Once the asynchronous operation completes and the continuation function
  167  * has been called, the kcont framework will free the struct kc *
  168  * immediately after the continuation function returns.
  169  */
  170 struct kc *
  171 kcont_malloc(int malloc_flags,
  172     void (*func)(void *, void *, int),
  173     void *env_arg, int continue_ipl)
  174 {
  175         struct kc *kc;
  176         int pool_flags;
  177 
  178         pool_flags = (malloc_flags & M_NOWAIT) ? 0 : PR_WAITOK;
  179         pool_flags |= (malloc_flags & M_CANFAIL) ? PR_LIMITFAIL : 0;
  180 
  181         kc = pool_get(&kc_pool, pool_flags);
  182         if (kc == NULL)
  183                 return kc;
  184         return kc_set(kc, func, env_arg, continue_ipl);
  185 }
  186 
  187 /*
  188  * Dispatch a dequeued continuation which requested deferral
  189  * into the appropriate lower-priority queue.
  190  * Public API entry to defer execution of a pre-built kcont.
  191  */
  192 void
  193 kcont_defer(struct kc *kc, void *obj, int status)
  194 {
  195         /*
  196          * IPL at which to synchronize access to object is
  197          * above the continuer's requested callback IPL,
  198          * (e.g., continuer wants IPL_SOFTNET but the object
  199          * holding this continuation incurred the wakeup()able
  200          * event whilst at IPL_BIO).
  201          * Defer this kc to a lower-priority kc queue,
  202          * to be serviced slightly later at a lower IPL.
  203          */
  204 
  205         /*
  206          * If we already deferred this kcont, don't clobber
  207          * the previously-saved kc_object and kc_status.
  208          * (The obj/status arguments passed in by ck_run() should
  209          * be the same as kc_object/kc_status, but don't rely on that.)
  210          */
  211         if ((kc->kc_flags & KC_DEFERRED) == 0) {
  212                 kc->kc_flags |= KC_DEFERRED;
  213                 kc->kc_obj = obj;
  214                 kc->kc_status = status;
  215         }
  216 
  217         switch (kc->kc_ipl) {
  218 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  219         case KC_IPL_DEFER_SOFTCLOCK:
  220                 kcont_enqueue_atomic(&kcq_softclock, kc);
  221                 softintr_schedule(kc_si_softclock);
  222                 break;
  223         case KC_IPL_DEFER_SOFTNET:
  224                 kcont_enqueue_atomic(&kcq_softnet, kc);
  225                 softintr_schedule(kc_si_softnet);
  226                 break;
  227         case KC_IPL_DEFER_SOFTSERIAL:
  228                 kcont_enqueue_atomic(&kcq_softserial, kc);
  229                 softintr_schedule(kc_si_softserial);
  230                 break;
  231 
  232 #else /* !__HAVE_GENERIC_SOFT_INTERRUPTS */
  233         /* What to do? For now, punt to process context */
  234         case KC_IPL_DEFER_SOFTCLOCK:
  235         case KC_IPL_DEFER_SOFTSERIAL:
  236         case KC_IPL_DEFER_SOFTNET:
  237                 /*FALLTHROUGH*/
  238 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  239 
  240         case KC_IPL_DEFER_PROCESS:
  241                 kcont_enqueue_atomic(&kcq_process_ctxt, kc);
  242                 wakeup(&kcq_process_ctxt);
  243                 break;
  244         default:
  245                 KASSERT(0);
  246         }
  247 }
  248 
  249 void
  250 kcont_defer_malloc(int mallocflags,
  251     void (*func)(void *, void *, int),
  252     void *obj, void *env_arg, int status, int ipl)
  253 {
  254         struct kc *kc;
  255 
  256         kc = kcont_malloc(mallocflags, func, env_arg, ipl);
  257         if (kc != NULL)
  258                 kcont_defer(kc, obj, status);
  259 }
  260 
  261 /*
  262  * Enqueue a pre-existing kcont onto a struct kcq completion queue
  263  * of some pre-existing kernel object.
  264  */
  265 void
  266 kcont_enqueue(kcq_t *kcq, struct kc *kc)
  267 {
  268 
  269         kcont_enqueue_atomic(kcq, kc);
  270 }
  271 
  272 
  273 /*
  274  * Run through a list of continuations, calling (or handing off)
  275  * continuation functions.
  276  * If the caller-provided IPL is the same as the requested IPL,
  277  * deliver the callback.
  278  * If the caller-provided IPL is higher than the requested
  279  * callback IPL, re-enqueue the continuation to a lower-priority queue.
  280  */
  281 void
  282 kcont_run(kcq_t *kcq, void *obj, int status, int curipl)
  283 {
  284         struct kc *kc;
  285 
  286         while ((kc = kcont_dequeue_atomic(kcq)) != NULL) {
  287 
  288                 /* If execution of kc was already deferred, restore context. */
  289                 if (kc->kc_flags & KC_DEFERRED) {
  290                         KASSERT(obj == NULL);
  291                         obj = kc->kc_obj;
  292                         status = kc->kc_status;
  293                 }
  294 
  295                 /* Check whether to execute now or to defer. */
  296                 if (kc->kc_ipl == KC_IPL_IMMED || curipl <= kc->kc_ipl) {
  297                         int saved_flags = kc->kc_flags; /* XXX see below */
  298 
  299                         /* Satisfy our raison d'e^tre */
  300                         (*kc->kc_fn)(obj, kc->kc_env_arg, status);
  301 
  302                         /*
  303                          * We must not touch (*kc) after calling
  304                          * (*kc->kc_fn), unless we were specifically
  305                          * asked to free it.  The memory for (*kc) may
  306                          * be a sub-field of some other object (for example,
  307                          * of kc->kc_env_arg) and (*kc_fn)() may already
  308                          * have freed it by the time we get here.  So save
  309                          * kc->kc_flags (above) and use that saved copy
  310                          * to test for auto-free.
  311                          */
  312                         if (saved_flags & KC_AUTOFREE)
  313                                 pool_put(&kc_pool, kc);
  314                 } else {
  315                         kcont_defer(kc, obj, status);
  316                 }
  317         }
  318 }
  319 
  320 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  321 /*
  322  * Trampolines for processing software-interrupt kcont queues.
  323  */
  324 static void
  325 kcont_run_softclock(void *arg)
  326 {
  327 
  328         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTCLOCK);
  329 }
  330 
  331 static void
  332 kcont_run_softnet(void *arg)
  333 {
  334 
  335         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTNET);
  336 }
  337 
  338 static void
  339 kcont_run_softserial(void *arg)
  340 {
  341 
  342         kcont_run((struct kcqueue *)arg, NULL, 0, KC_IPL_DEFER_SOFTSERIAL);
  343 }
  344 #endif /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  345 
  346 
  347 static void
  348 kcont_create_worker(void *arg)
  349 {
  350         if (kthread_create1(kcont_worker, NULL, NULL, "kcont"))
  351                 panic("fork kcont");
  352 }
  353 
  354 /*
  355  * Main entrypoint for kcont worker kthreads to execute
  356  * a continuation which requested deferral to process context.
  357  */
  358 static void
  359 kcont_worker(void *arg)
  360 {
  361         int status;
  362 
  363         (void)arg;      /* kill GCC warning */
  364 
  365         while (1) {
  366                 status = ltsleep(&kcq_process_ctxt, PCATCH, "kcont", hz, NULL);
  367                 if (status != 0 && status != EWOULDBLOCK)
  368                         break;
  369                 kcont_run(&kcq_process_ctxt, NULL, 0, KC_IPL_DEFER_PROCESS);
  370         }
  371         kthread_exit(0);
  372 }
  373 
  374 
  375 /*
  376  * Initialize kcont subsystem.
  377  */
  378 void
  379 kcont_init(void)
  380 {
  381 
  382 #ifdef __HAVE_GENERIC_SOFT_INTERRUPTS
  383         /*
  384          * Initialize kc_queue and callout for soft-int deferred
  385          * continuations. (If not available, deferrals fall back
  386          * to deferring all the way to process context).
  387          */
  388         SIMPLEQ_INIT(&kcq_softclock);
  389         kc_si_softclock = softintr_establish(IPL_SOFTCLOCK,
  390             kcont_run_softclock, &kcq_softnet);
  391 
  392         SIMPLEQ_INIT(&kcq_softnet);
  393         kc_si_softnet = softintr_establish(IPL_SOFTNET,
  394             kcont_run_softnet, &kcq_softnet);
  395 
  396         SIMPLEQ_INIT(&kcq_softserial);
  397         kc_si_softserial = softintr_establish(IPL_SOFTSERIAL,
  398             kcont_run_softserial, &kcq_softserial);
  399 #endif  /* __HAVE_GENERIC_SOFT_INTERRUPTS */
  400 
  401         pool_init(&kc_pool, sizeof(struct kc), 0, 0, 0, "kcpl", NULL);
  402 
  403         /*
  404          * Create kc_queue for process-context continuations, and
  405          * a worker kthread to process the queue. (Fine-grained SMP
  406          * locking should have at least one worker kthread per CPU).
  407          */
  408         SIMPLEQ_INIT(&kcq_process_ctxt);
  409         kthread_create(kcont_create_worker, NULL);
  410 }

Cache object: 28723985a8b38ed0efc0003e02b11b4f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.