The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kthread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/11.1/sys/kern/kern_kthread.c 304883 2016-08-27 09:23:20Z kib $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/cpuset.h>
   33 #include <sys/kthread.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/resourcevar.h>
   38 #include <sys/rwlock.h>
   39 #include <sys/signalvar.h>
   40 #include <sys/sx.h>
   41 #include <sys/umtx.h>
   42 #include <sys/unistd.h>
   43 #include <sys/wait.h>
   44 #include <sys/sched.h>
   45 #include <vm/vm.h>
   46 #include <vm/vm_extern.h>
   47 
   48 #include <machine/stdarg.h>
   49 
   50 /*
   51  * Start a kernel process.  This is called after a fork() call in
   52  * mi_startup() in the file kern/init_main.c.
   53  *
   54  * This function is used to start "internal" daemons and intended
   55  * to be called from SYSINIT().
   56  */
   57 void
   58 kproc_start(const void *udata)
   59 {
   60         const struct kproc_desc *kp = udata;
   61         int error;
   62 
   63         error = kproc_create((void (*)(void *))kp->func, NULL,
   64                     kp->global_procpp, 0, 0, "%s", kp->arg0);
   65         if (error)
   66                 panic("kproc_start: %s: error %d", kp->arg0, error);
   67 }
   68 
   69 /*
   70  * Create a kernel process/thread/whatever.  It shares its address space
   71  * with proc0 - ie: kernel only.
   72  *
   73  * func is the function to start.
   74  * arg is the parameter to pass to function on first startup.
   75  * newpp is the return value pointing to the thread's struct proc.
   76  * flags are flags to fork1 (in unistd.h)
   77  * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.).
   78  */
   79 int
   80 kproc_create(void (*func)(void *), void *arg,
   81     struct proc **newpp, int flags, int pages, const char *fmt, ...)
   82 {
   83         struct fork_req fr;
   84         int error;
   85         va_list ap;
   86         struct thread *td;
   87         struct proc *p2;
   88 
   89         if (!proc0.p_stats)
   90                 panic("kproc_create called too soon");
   91 
   92         bzero(&fr, sizeof(fr));
   93         fr.fr_flags = RFMEM | RFFDG | RFPROC | RFSTOPPED | flags;
   94         fr.fr_pages = pages;
   95         fr.fr_procp = &p2;
   96         error = fork1(&thread0, &fr);
   97         if (error)
   98                 return error;
   99 
  100         /* save a global descriptor, if desired */
  101         if (newpp != NULL)
  102                 *newpp = p2;
  103 
  104         /* this is a non-swapped system process */
  105         PROC_LOCK(p2);
  106         td = FIRST_THREAD_IN_PROC(p2);
  107         p2->p_flag |= P_SYSTEM | P_KPROC;
  108         td->td_pflags |= TDP_KTHREAD;
  109         mtx_lock(&p2->p_sigacts->ps_mtx);
  110         p2->p_sigacts->ps_flag |= PS_NOCLDWAIT;
  111         mtx_unlock(&p2->p_sigacts->ps_mtx);
  112         PROC_UNLOCK(p2);
  113 
  114         /* set up arg0 for 'ps', et al */
  115         va_start(ap, fmt);
  116         vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap);
  117         va_end(ap);
  118         /* set up arg0 for 'ps', et al */
  119         va_start(ap, fmt);
  120         vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  121         va_end(ap);
  122 #ifdef KTR
  123         sched_clear_tdname(td);
  124 #endif
  125 
  126         /* call the processes' main()... */
  127         cpu_fork_kthread_handler(td, func, arg);
  128 
  129         /* Avoid inheriting affinity from a random parent. */
  130         cpuset_setthread(td->td_tid, cpuset_root);
  131         thread_lock(td);
  132         TD_SET_CAN_RUN(td);
  133         sched_prio(td, PVM);
  134         sched_user_prio(td, PUSER);
  135 
  136         /* Delay putting it on the run queue until now. */
  137         if (!(flags & RFSTOPPED))
  138                 sched_add(td, SRQ_BORING); 
  139         thread_unlock(td);
  140 
  141         return 0;
  142 }
  143 
  144 void
  145 kproc_exit(int ecode)
  146 {
  147         struct thread *td;
  148         struct proc *p;
  149 
  150         td = curthread;
  151         p = td->td_proc;
  152 
  153         /*
  154          * Reparent curthread from proc0 to init so that the zombie
  155          * is harvested.
  156          */
  157         sx_xlock(&proctree_lock);
  158         PROC_LOCK(p);
  159         proc_reparent(p, initproc);
  160         PROC_UNLOCK(p);
  161         sx_xunlock(&proctree_lock);
  162 
  163         /*
  164          * Wakeup anyone waiting for us to exit.
  165          */
  166         wakeup(p);
  167 
  168         /* Buh-bye! */
  169         exit1(td, ecode, 0);
  170 }
  171 
  172 /*
  173  * Advise a kernel process to suspend (or resume) in its main loop.
  174  * Participation is voluntary.
  175  */
  176 int
  177 kproc_suspend(struct proc *p, int timo)
  178 {
  179         /*
  180          * Make sure this is indeed a system process and we can safely
  181          * use the p_siglist field.
  182          */
  183         PROC_LOCK(p);
  184         if ((p->p_flag & P_KPROC) == 0) {
  185                 PROC_UNLOCK(p);
  186                 return (EINVAL);
  187         }
  188         SIGADDSET(p->p_siglist, SIGSTOP);
  189         wakeup(p);
  190         return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo);
  191 }
  192 
  193 int
  194 kproc_resume(struct proc *p)
  195 {
  196         /*
  197          * Make sure this is indeed a system process and we can safely
  198          * use the p_siglist field.
  199          */
  200         PROC_LOCK(p);
  201         if ((p->p_flag & P_KPROC) == 0) {
  202                 PROC_UNLOCK(p);
  203                 return (EINVAL);
  204         }
  205         SIGDELSET(p->p_siglist, SIGSTOP);
  206         PROC_UNLOCK(p);
  207         wakeup(&p->p_siglist);
  208         return (0);
  209 }
  210 
  211 void
  212 kproc_suspend_check(struct proc *p)
  213 {
  214         PROC_LOCK(p);
  215         while (SIGISMEMBER(p->p_siglist, SIGSTOP)) {
  216                 wakeup(&p->p_siglist);
  217                 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0);
  218         }
  219         PROC_UNLOCK(p);
  220 }
  221 
  222 
  223 /*
  224  * Start a kernel thread.  
  225  *
  226  * This function is used to start "internal" daemons and intended
  227  * to be called from SYSINIT().
  228  */
  229 
  230 void
  231 kthread_start(const void *udata)
  232 {
  233         const struct kthread_desc       *kp = udata;
  234         int error;
  235 
  236         error = kthread_add((void (*)(void *))kp->func, NULL,
  237                     NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0);
  238         if (error)
  239                 panic("kthread_start: %s: error %d", kp->arg0, error);
  240 }
  241 
  242 /*
  243  * Create a kernel thread.  It shares its address space
  244  * with proc0 - ie: kernel only.
  245  *
  246  * func is the function to start.
  247  * arg is the parameter to pass to function on first startup.
  248  * newtdp is the return value pointing to the thread's struct thread.
  249  *  ** XXX fix this --> flags are flags to fork1 (in unistd.h) 
  250  * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.).
  251  */
  252 int
  253 kthread_add(void (*func)(void *), void *arg, struct proc *p,
  254     struct thread **newtdp, int flags, int pages, const char *fmt, ...)
  255 {
  256         va_list ap;
  257         struct thread *newtd, *oldtd;
  258 
  259         if (!proc0.p_stats)
  260                 panic("kthread_add called too soon");
  261 
  262         /* If no process supplied, put it on proc0 */
  263         if (p == NULL)
  264                 p = &proc0;
  265 
  266         /* Initialize our new td  */
  267         newtd = thread_alloc(pages);
  268         if (newtd == NULL)
  269                 return (ENOMEM);
  270 
  271         PROC_LOCK(p);
  272         oldtd = FIRST_THREAD_IN_PROC(p);
  273 
  274         bzero(&newtd->td_startzero,
  275             __rangeof(struct thread, td_startzero, td_endzero));
  276         newtd->td_sleeptimo = 0;
  277         bcopy(&oldtd->td_startcopy, &newtd->td_startcopy,
  278             __rangeof(struct thread, td_startcopy, td_endcopy));
  279 
  280         /* set up arg0 for 'ps', et al */
  281         va_start(ap, fmt);
  282         vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap);
  283         va_end(ap);
  284 
  285         newtd->td_proc = p;  /* needed for cpu_copy_thread */
  286         /* might be further optimized for kthread */
  287         cpu_copy_thread(newtd, oldtd);
  288         /* put the designated function(arg) as the resume context */
  289         cpu_fork_kthread_handler(newtd, func, arg);
  290 
  291         newtd->td_pflags |= TDP_KTHREAD;
  292         thread_cow_get_proc(newtd, p);
  293 
  294         /* this code almost the same as create_thread() in kern_thr.c */
  295         p->p_flag |= P_HADTHREADS;
  296         thread_link(newtd, p);
  297         thread_lock(oldtd);
  298         /* let the scheduler know about these things. */
  299         sched_fork_thread(oldtd, newtd);
  300         TD_SET_CAN_RUN(newtd);
  301         thread_unlock(oldtd);
  302         PROC_UNLOCK(p);
  303 
  304         tidhash_add(newtd);
  305 
  306         /* Avoid inheriting affinity from a random parent. */
  307         cpuset_setthread(newtd->td_tid, cpuset_root);
  308 
  309         /* Delay putting it on the run queue until now. */
  310         if (!(flags & RFSTOPPED)) {
  311                 thread_lock(newtd);
  312                 sched_add(newtd, SRQ_BORING); 
  313                 thread_unlock(newtd);
  314         }
  315         if (newtdp)
  316                 *newtdp = newtd;
  317         return 0;
  318 }
  319 
  320 void
  321 kthread_exit(void)
  322 {
  323         struct proc *p;
  324         struct thread *td;
  325 
  326         td = curthread;
  327         p = td->td_proc;
  328 
  329         /* A module may be waiting for us to exit. */
  330         wakeup(td);
  331 
  332         /*
  333          * The last exiting thread in a kernel process must tear down
  334          * the whole process.
  335          */
  336         rw_wlock(&tidhash_lock);
  337         PROC_LOCK(p);
  338         if (p->p_numthreads == 1) {
  339                 PROC_UNLOCK(p);
  340                 rw_wunlock(&tidhash_lock);
  341                 kproc_exit(0);
  342         }
  343         LIST_REMOVE(td, td_hash);
  344         rw_wunlock(&tidhash_lock);
  345         umtx_thread_exit(td);
  346         tdsigcleanup(td);
  347         PROC_SLOCK(p);
  348         thread_exit();
  349 }
  350 
  351 /*
  352  * Advise a kernel process to suspend (or resume) in its main loop.
  353  * Participation is voluntary.
  354  */
  355 int
  356 kthread_suspend(struct thread *td, int timo)
  357 {
  358         struct proc *p;
  359 
  360         p = td->td_proc;
  361 
  362         /*
  363          * td_pflags should not be read by any thread other than
  364          * curthread, but as long as this flag is invariant during the
  365          * thread's lifetime, it is OK to check its state.
  366          */
  367         if ((td->td_pflags & TDP_KTHREAD) == 0)
  368                 return (EINVAL);
  369 
  370         /*
  371          * The caller of the primitive should have already checked that the
  372          * thread is up and running, thus not being blocked by other
  373          * conditions.
  374          */
  375         PROC_LOCK(p);
  376         thread_lock(td);
  377         td->td_flags |= TDF_KTH_SUSP;
  378         thread_unlock(td);
  379         return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt",
  380             timo));
  381 }
  382 
  383 /*
  384  * Resume a thread previously put asleep with kthread_suspend().
  385  */
  386 int
  387 kthread_resume(struct thread *td)
  388 {
  389         struct proc *p;
  390 
  391         p = td->td_proc;
  392 
  393         /*
  394          * td_pflags should not be read by any thread other than
  395          * curthread, but as long as this flag is invariant during the
  396          * thread's lifetime, it is OK to check its state.
  397          */
  398         if ((td->td_pflags & TDP_KTHREAD) == 0)
  399                 return (EINVAL);
  400 
  401         PROC_LOCK(p);
  402         thread_lock(td);
  403         td->td_flags &= ~TDF_KTH_SUSP;
  404         thread_unlock(td);
  405         wakeup(&td->td_flags);
  406         PROC_UNLOCK(p);
  407         return (0);
  408 }
  409 
  410 /*
  411  * Used by the thread to poll as to whether it should yield/sleep
  412  * and notify the caller that is has happened.
  413  */
  414 void
  415 kthread_suspend_check(void)
  416 {
  417         struct proc *p;
  418         struct thread *td;
  419 
  420         td = curthread;
  421         p = td->td_proc;
  422 
  423         if ((td->td_pflags & TDP_KTHREAD) == 0)
  424                 panic("%s: curthread is not a valid kthread", __func__);
  425 
  426         /*
  427          * As long as the double-lock protection is used when accessing the
  428          * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex
  429          * is fine.
  430          */
  431         PROC_LOCK(p);
  432         while (td->td_flags & TDF_KTH_SUSP) {
  433                 wakeup(&td->td_flags);
  434                 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0);
  435         }
  436         PROC_UNLOCK(p);
  437 }
  438 
  439 int
  440 kproc_kthread_add(void (*func)(void *), void *arg,
  441             struct proc **procptr, struct thread **tdptr,
  442             int flags, int pages, const char *procname, const char *fmt, ...) 
  443 {
  444         int error;
  445         va_list ap;
  446         char buf[100];
  447         struct thread *td;
  448 
  449         if (*procptr == NULL) {
  450                 error = kproc_create(func, arg,
  451                         procptr, flags, pages, "%s", procname);
  452                 if (error)
  453                         return (error);
  454                 td = FIRST_THREAD_IN_PROC(*procptr);
  455                 if (tdptr)
  456                         *tdptr = td;
  457                 va_start(ap, fmt);
  458                 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  459                 va_end(ap);
  460 #ifdef KTR
  461                 sched_clear_tdname(td);
  462 #endif
  463                 return (0); 
  464         }
  465         va_start(ap, fmt);
  466         vsnprintf(buf, sizeof(buf), fmt, ap);
  467         va_end(ap);
  468         error = kthread_add(func, arg, *procptr,
  469                     tdptr, flags, pages, "%s", buf);
  470         return (error);
  471 }

Cache object: a4352d48d846588ad7f9fea153ac4f34


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.