The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kthread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/10.4/sys/kern/kern_kthread.c 304905 2016-08-27 11:45:05Z kib $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/cpuset.h>
   33 #include <sys/kthread.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/resourcevar.h>
   38 #include <sys/rwlock.h>
   39 #include <sys/signalvar.h>
   40 #include <sys/sx.h>
   41 #include <sys/umtx.h>
   42 #include <sys/unistd.h>
   43 #include <sys/wait.h>
   44 #include <sys/sched.h>
   45 #include <vm/vm.h>
   46 #include <vm/vm_extern.h>
   47 
   48 #include <machine/stdarg.h>
   49 
   50 /*
   51  * Start a kernel process.  This is called after a fork() call in
   52  * mi_startup() in the file kern/init_main.c.
   53  *
   54  * This function is used to start "internal" daemons and intended
   55  * to be called from SYSINIT().
   56  */
   57 void
   58 kproc_start(udata)
   59         const void *udata;
   60 {
   61         const struct kproc_desc *kp = udata;
   62         int error;
   63 
   64         error = kproc_create((void (*)(void *))kp->func, NULL,
   65                     kp->global_procpp, 0, 0, "%s", kp->arg0);
   66         if (error)
   67                 panic("kproc_start: %s: error %d", kp->arg0, error);
   68 }
   69 
   70 /*
   71  * Create a kernel process/thread/whatever.  It shares its address space
   72  * with proc0 - ie: kernel only.
   73  *
   74  * func is the function to start.
   75  * arg is the parameter to pass to function on first startup.
   76  * newpp is the return value pointing to the thread's struct proc.
   77  * flags are flags to fork1 (in unistd.h)
   78  * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.).
   79  */
   80 int
   81 kproc_create(void (*func)(void *), void *arg,
   82     struct proc **newpp, int flags, int pages, const char *fmt, ...)
   83 {
   84         int error;
   85         va_list ap;
   86         struct thread *td;
   87         struct proc *p2;
   88 
   89         if (!proc0.p_stats)
   90                 panic("kproc_create called too soon");
   91 
   92         error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags,
   93             pages, &p2, NULL, 0);
   94         if (error)
   95                 return error;
   96 
   97         /* save a global descriptor, if desired */
   98         if (newpp != NULL)
   99                 *newpp = p2;
  100 
  101         /* this is a non-swapped system process */
  102         PROC_LOCK(p2);
  103         td = FIRST_THREAD_IN_PROC(p2);
  104         p2->p_flag |= P_SYSTEM | P_KTHREAD;
  105         td->td_pflags |= TDP_KTHREAD;
  106         mtx_lock(&p2->p_sigacts->ps_mtx);
  107         p2->p_sigacts->ps_flag |= PS_NOCLDWAIT;
  108         mtx_unlock(&p2->p_sigacts->ps_mtx);
  109         PROC_UNLOCK(p2);
  110 
  111         /* set up arg0 for 'ps', et al */
  112         va_start(ap, fmt);
  113         vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap);
  114         va_end(ap);
  115         /* set up arg0 for 'ps', et al */
  116         va_start(ap, fmt);
  117         vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  118         va_end(ap);
  119 #ifdef KTR
  120         sched_clear_tdname(td);
  121 #endif
  122 
  123         /* call the processes' main()... */
  124         cpu_set_fork_handler(td, func, arg);
  125 
  126         /* Avoid inheriting affinity from a random parent. */
  127         cpuset_setthread(td->td_tid, cpuset_root);
  128         thread_lock(td);
  129         TD_SET_CAN_RUN(td);
  130         sched_prio(td, PVM);
  131         sched_user_prio(td, PUSER);
  132 
  133         /* Delay putting it on the run queue until now. */
  134         if (!(flags & RFSTOPPED))
  135                 sched_add(td, SRQ_BORING); 
  136         thread_unlock(td);
  137 
  138         return 0;
  139 }
  140 
  141 void
  142 kproc_exit(int ecode)
  143 {
  144         struct thread *td;
  145         struct proc *p;
  146 
  147         td = curthread;
  148         p = td->td_proc;
  149 
  150         /*
  151          * Reparent curthread from proc0 to init so that the zombie
  152          * is harvested.
  153          */
  154         sx_xlock(&proctree_lock);
  155         PROC_LOCK(p);
  156         proc_reparent(p, initproc);
  157         PROC_UNLOCK(p);
  158         sx_xunlock(&proctree_lock);
  159 
  160         /*
  161          * Wakeup anyone waiting for us to exit.
  162          */
  163         wakeup(p);
  164 
  165         /* Buh-bye! */
  166         exit1(td, W_EXITCODE(ecode, 0));
  167 }
  168 
  169 /*
  170  * Advise a kernel process to suspend (or resume) in its main loop.
  171  * Participation is voluntary.
  172  */
  173 int
  174 kproc_suspend(struct proc *p, int timo)
  175 {
  176         /*
  177          * Make sure this is indeed a system process and we can safely
  178          * use the p_siglist field.
  179          */
  180         PROC_LOCK(p);
  181         if ((p->p_flag & P_KTHREAD) == 0) {
  182                 PROC_UNLOCK(p);
  183                 return (EINVAL);
  184         }
  185         SIGADDSET(p->p_siglist, SIGSTOP);
  186         wakeup(p);
  187         return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo);
  188 }
  189 
  190 int
  191 kproc_resume(struct proc *p)
  192 {
  193         /*
  194          * Make sure this is indeed a system process and we can safely
  195          * use the p_siglist field.
  196          */
  197         PROC_LOCK(p);
  198         if ((p->p_flag & P_KTHREAD) == 0) {
  199                 PROC_UNLOCK(p);
  200                 return (EINVAL);
  201         }
  202         SIGDELSET(p->p_siglist, SIGSTOP);
  203         PROC_UNLOCK(p);
  204         wakeup(&p->p_siglist);
  205         return (0);
  206 }
  207 
  208 void
  209 kproc_suspend_check(struct proc *p)
  210 {
  211         PROC_LOCK(p);
  212         while (SIGISMEMBER(p->p_siglist, SIGSTOP)) {
  213                 wakeup(&p->p_siglist);
  214                 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0);
  215         }
  216         PROC_UNLOCK(p);
  217 }
  218 
  219 
  220 /*
  221  * Start a kernel thread.  
  222  *
  223  * This function is used to start "internal" daemons and intended
  224  * to be called from SYSINIT().
  225  */
  226 
  227 void
  228 kthread_start(udata)
  229         const void *udata;
  230 {
  231         const struct kthread_desc       *kp = udata;
  232         int error;
  233 
  234         error = kthread_add((void (*)(void *))kp->func, NULL,
  235                     NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0);
  236         if (error)
  237                 panic("kthread_start: %s: error %d", kp->arg0, error);
  238 }
  239 
  240 /*
  241  * Create a kernel thread.  It shares its address space
  242  * with proc0 - ie: kernel only.
  243  *
  244  * func is the function to start.
  245  * arg is the parameter to pass to function on first startup.
  246  * newtdp is the return value pointing to the thread's struct thread.
  247  *  ** XXX fix this --> flags are flags to fork1 (in unistd.h) 
  248  * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.).
  249  */
  250 int
  251 kthread_add(void (*func)(void *), void *arg, struct proc *p,
  252     struct thread **newtdp, int flags, int pages, const char *fmt, ...)
  253 {
  254         va_list ap;
  255         struct thread *newtd, *oldtd;
  256 
  257         if (!proc0.p_stats)
  258                 panic("kthread_add called too soon");
  259 
  260         /* If no process supplied, put it on proc0 */
  261         if (p == NULL)
  262                 p = &proc0;
  263 
  264         /* Initialize our new td  */
  265         newtd = thread_alloc(pages);
  266         if (newtd == NULL)
  267                 return (ENOMEM);
  268 
  269         PROC_LOCK(p);
  270         oldtd = FIRST_THREAD_IN_PROC(p);
  271 
  272         bzero(&newtd->td_startzero,
  273             __rangeof(struct thread, td_startzero, td_endzero));
  274         newtd->td_su = NULL;
  275         newtd->td_sleeptimo = 0;
  276         bcopy(&oldtd->td_startcopy, &newtd->td_startcopy,
  277             __rangeof(struct thread, td_startcopy, td_endcopy));
  278 
  279         /* set up arg0 for 'ps', et al */
  280         va_start(ap, fmt);
  281         vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap);
  282         va_end(ap);
  283 
  284         newtd->td_proc = p;  /* needed for cpu_set_upcall */
  285 
  286         /* XXX optimise this probably? */
  287         /* On x86 (and probably the others too) it is way too full of junk */
  288         /* Needs a better name */
  289         cpu_set_upcall(newtd, oldtd);
  290         /* put the designated function(arg) as the resume context */
  291         cpu_set_fork_handler(newtd, func, arg);
  292 
  293         newtd->td_pflags |= TDP_KTHREAD;
  294         newtd->td_ucred = crhold(p->p_ucred);
  295 
  296         /* this code almost the same as create_thread() in kern_thr.c */
  297         p->p_flag |= P_HADTHREADS;
  298         thread_link(newtd, p);
  299         thread_lock(oldtd);
  300         /* let the scheduler know about these things. */
  301         sched_fork_thread(oldtd, newtd);
  302         TD_SET_CAN_RUN(newtd);
  303         thread_unlock(oldtd);
  304         PROC_UNLOCK(p);
  305 
  306         tidhash_add(newtd);
  307 
  308         /* Avoid inheriting affinity from a random parent. */
  309         cpuset_setthread(newtd->td_tid, cpuset_root);
  310 
  311         /* Delay putting it on the run queue until now. */
  312         if (!(flags & RFSTOPPED)) {
  313                 thread_lock(newtd);
  314                 sched_add(newtd, SRQ_BORING); 
  315                 thread_unlock(newtd);
  316         }
  317         if (newtdp)
  318                 *newtdp = newtd;
  319         return 0;
  320 }
  321 
  322 void
  323 kthread_exit(void)
  324 {
  325         struct proc *p;
  326         struct thread *td;
  327 
  328         td = curthread;
  329         p = td->td_proc;
  330 
  331         /* A module may be waiting for us to exit. */
  332         wakeup(td);
  333 
  334         /*
  335          * The last exiting thread in a kernel process must tear down
  336          * the whole process.
  337          */
  338         rw_wlock(&tidhash_lock);
  339         PROC_LOCK(p);
  340         if (p->p_numthreads == 1) {
  341                 PROC_UNLOCK(p);
  342                 rw_wunlock(&tidhash_lock);
  343                 kproc_exit(0);
  344         }
  345         LIST_REMOVE(td, td_hash);
  346         rw_wunlock(&tidhash_lock);
  347         umtx_thread_exit(td);
  348         tdsigcleanup(td);
  349         PROC_SLOCK(p);
  350         thread_exit();
  351 }
  352 
  353 /*
  354  * Advise a kernel process to suspend (or resume) in its main loop.
  355  * Participation is voluntary.
  356  */
  357 int
  358 kthread_suspend(struct thread *td, int timo)
  359 {
  360         struct proc *p;
  361 
  362         p = td->td_proc;
  363 
  364         /*
  365          * td_pflags should not be read by any thread other than
  366          * curthread, but as long as this flag is invariant during the
  367          * thread's lifetime, it is OK to check its state.
  368          */
  369         if ((td->td_pflags & TDP_KTHREAD) == 0)
  370                 return (EINVAL);
  371 
  372         /*
  373          * The caller of the primitive should have already checked that the
  374          * thread is up and running, thus not being blocked by other
  375          * conditions.
  376          */
  377         PROC_LOCK(p);
  378         thread_lock(td);
  379         td->td_flags |= TDF_KTH_SUSP;
  380         thread_unlock(td);
  381         return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt",
  382             timo));
  383 }
  384 
  385 /*
  386  * Resume a thread previously put asleep with kthread_suspend().
  387  */
  388 int
  389 kthread_resume(struct thread *td)
  390 {
  391         struct proc *p;
  392 
  393         p = td->td_proc;
  394 
  395         /*
  396          * td_pflags should not be read by any thread other than
  397          * curthread, but as long as this flag is invariant during the
  398          * thread's lifetime, it is OK to check its state.
  399          */
  400         if ((td->td_pflags & TDP_KTHREAD) == 0)
  401                 return (EINVAL);
  402 
  403         PROC_LOCK(p);
  404         thread_lock(td);
  405         td->td_flags &= ~TDF_KTH_SUSP;
  406         thread_unlock(td);
  407         wakeup(&td->td_flags);
  408         PROC_UNLOCK(p);
  409         return (0);
  410 }
  411 
  412 /*
  413  * Used by the thread to poll as to whether it should yield/sleep
  414  * and notify the caller that is has happened.
  415  */
  416 void
  417 kthread_suspend_check()
  418 {
  419         struct proc *p;
  420         struct thread *td;
  421 
  422         td = curthread;
  423         p = td->td_proc;
  424 
  425         if ((td->td_pflags & TDP_KTHREAD) == 0)
  426                 panic("%s: curthread is not a valid kthread", __func__);
  427 
  428         /*
  429          * As long as the double-lock protection is used when accessing the
  430          * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex
  431          * is fine.
  432          */
  433         PROC_LOCK(p);
  434         while (td->td_flags & TDF_KTH_SUSP) {
  435                 wakeup(&td->td_flags);
  436                 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0);
  437         }
  438         PROC_UNLOCK(p);
  439 }
  440 
  441 int
  442 kproc_kthread_add(void (*func)(void *), void *arg,
  443             struct proc **procptr, struct thread **tdptr,
  444             int flags, int pages, const char *procname, const char *fmt, ...) 
  445 {
  446         int error;
  447         va_list ap;
  448         char buf[100];
  449         struct thread *td;
  450 
  451         if (*procptr == 0) {
  452                 error = kproc_create(func, arg,
  453                         procptr, flags, pages, "%s", procname);
  454                 if (error)
  455                         return (error);
  456                 td = FIRST_THREAD_IN_PROC(*procptr);
  457                 if (tdptr)
  458                         *tdptr = td;
  459                 va_start(ap, fmt);
  460                 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  461                 va_end(ap);
  462 #ifdef KTR
  463                 sched_clear_tdname(td);
  464 #endif
  465                 return (0); 
  466         }
  467         va_start(ap, fmt);
  468         vsnprintf(buf, sizeof(buf), fmt, ap);
  469         va_end(ap);
  470         error = kthread_add(func, arg, *procptr,
  471                     tdptr, flags, pages, "%s", buf);
  472         return (error);
  473 }

Cache object: 76856b5dc427f0fca5a2b6cf7803d263


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.