The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kthread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/11.0/sys/kern/kern_kthread.c 301961 2016-06-16 12:05:44Z kib $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/cpuset.h>
   33 #include <sys/kthread.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/resourcevar.h>
   38 #include <sys/rwlock.h>
   39 #include <sys/signalvar.h>
   40 #include <sys/sx.h>
   41 #include <sys/umtx.h>
   42 #include <sys/unistd.h>
   43 #include <sys/wait.h>
   44 #include <sys/sched.h>
   45 #include <vm/vm.h>
   46 #include <vm/vm_extern.h>
   47 
   48 #include <machine/stdarg.h>
   49 
   50 /*
   51  * Start a kernel process.  This is called after a fork() call in
   52  * mi_startup() in the file kern/init_main.c.
   53  *
   54  * This function is used to start "internal" daemons and intended
   55  * to be called from SYSINIT().
   56  */
   57 void
   58 kproc_start(const void *udata)
   59 {
   60         const struct kproc_desc *kp = udata;
   61         int error;
   62 
   63         error = kproc_create((void (*)(void *))kp->func, NULL,
   64                     kp->global_procpp, 0, 0, "%s", kp->arg0);
   65         if (error)
   66                 panic("kproc_start: %s: error %d", kp->arg0, error);
   67 }
   68 
   69 /*
   70  * Create a kernel process/thread/whatever.  It shares its address space
   71  * with proc0 - ie: kernel only.
   72  *
   73  * func is the function to start.
   74  * arg is the parameter to pass to function on first startup.
   75  * newpp is the return value pointing to the thread's struct proc.
   76  * flags are flags to fork1 (in unistd.h)
   77  * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.).
   78  */
   79 int
   80 kproc_create(void (*func)(void *), void *arg,
   81     struct proc **newpp, int flags, int pages, const char *fmt, ...)
   82 {
   83         struct fork_req fr;
   84         int error;
   85         va_list ap;
   86         struct thread *td;
   87         struct proc *p2;
   88 
   89         if (!proc0.p_stats)
   90                 panic("kproc_create called too soon");
   91 
   92         bzero(&fr, sizeof(fr));
   93         fr.fr_flags = RFMEM | RFFDG | RFPROC | RFSTOPPED | flags;
   94         fr.fr_pages = pages;
   95         fr.fr_procp = &p2;
   96         error = fork1(&thread0, &fr);
   97         if (error)
   98                 return error;
   99 
  100         /* save a global descriptor, if desired */
  101         if (newpp != NULL)
  102                 *newpp = p2;
  103 
  104         /* this is a non-swapped system process */
  105         PROC_LOCK(p2);
  106         td = FIRST_THREAD_IN_PROC(p2);
  107         p2->p_flag |= P_SYSTEM | P_KPROC;
  108         td->td_pflags |= TDP_KTHREAD;
  109         mtx_lock(&p2->p_sigacts->ps_mtx);
  110         p2->p_sigacts->ps_flag |= PS_NOCLDWAIT;
  111         mtx_unlock(&p2->p_sigacts->ps_mtx);
  112         PROC_UNLOCK(p2);
  113 
  114         /* set up arg0 for 'ps', et al */
  115         va_start(ap, fmt);
  116         vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap);
  117         va_end(ap);
  118         /* set up arg0 for 'ps', et al */
  119         va_start(ap, fmt);
  120         vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  121         va_end(ap);
  122 #ifdef KTR
  123         sched_clear_tdname(td);
  124 #endif
  125 
  126         /* call the processes' main()... */
  127         cpu_fork_kthread_handler(td, func, arg);
  128 
  129         /* Avoid inheriting affinity from a random parent. */
  130         cpuset_setthread(td->td_tid, cpuset_root);
  131         thread_lock(td);
  132         TD_SET_CAN_RUN(td);
  133         sched_prio(td, PVM);
  134         sched_user_prio(td, PUSER);
  135 
  136         /* Delay putting it on the run queue until now. */
  137         if (!(flags & RFSTOPPED))
  138                 sched_add(td, SRQ_BORING); 
  139         thread_unlock(td);
  140 
  141         return 0;
  142 }
  143 
  144 void
  145 kproc_exit(int ecode)
  146 {
  147         struct thread *td;
  148         struct proc *p;
  149 
  150         td = curthread;
  151         p = td->td_proc;
  152 
  153         /*
  154          * Reparent curthread from proc0 to init so that the zombie
  155          * is harvested.
  156          */
  157         sx_xlock(&proctree_lock);
  158         PROC_LOCK(p);
  159         proc_reparent(p, initproc);
  160         PROC_UNLOCK(p);
  161         sx_xunlock(&proctree_lock);
  162 
  163         /*
  164          * Wakeup anyone waiting for us to exit.
  165          */
  166         wakeup(p);
  167 
  168         /* Buh-bye! */
  169         exit1(td, ecode, 0);
  170 }
  171 
  172 /*
  173  * Advise a kernel process to suspend (or resume) in its main loop.
  174  * Participation is voluntary.
  175  */
  176 int
  177 kproc_suspend(struct proc *p, int timo)
  178 {
  179         /*
  180          * Make sure this is indeed a system process and we can safely
  181          * use the p_siglist field.
  182          */
  183         PROC_LOCK(p);
  184         if ((p->p_flag & P_KPROC) == 0) {
  185                 PROC_UNLOCK(p);
  186                 return (EINVAL);
  187         }
  188         SIGADDSET(p->p_siglist, SIGSTOP);
  189         wakeup(p);
  190         return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo);
  191 }
  192 
  193 int
  194 kproc_resume(struct proc *p)
  195 {
  196         /*
  197          * Make sure this is indeed a system process and we can safely
  198          * use the p_siglist field.
  199          */
  200         PROC_LOCK(p);
  201         if ((p->p_flag & P_KPROC) == 0) {
  202                 PROC_UNLOCK(p);
  203                 return (EINVAL);
  204         }
  205         SIGDELSET(p->p_siglist, SIGSTOP);
  206         PROC_UNLOCK(p);
  207         wakeup(&p->p_siglist);
  208         return (0);
  209 }
  210 
  211 void
  212 kproc_suspend_check(struct proc *p)
  213 {
  214         PROC_LOCK(p);
  215         while (SIGISMEMBER(p->p_siglist, SIGSTOP)) {
  216                 wakeup(&p->p_siglist);
  217                 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0);
  218         }
  219         PROC_UNLOCK(p);
  220 }
  221 
  222 
  223 /*
  224  * Start a kernel thread.  
  225  *
  226  * This function is used to start "internal" daemons and intended
  227  * to be called from SYSINIT().
  228  */
  229 
  230 void
  231 kthread_start(const void *udata)
  232 {
  233         const struct kthread_desc       *kp = udata;
  234         int error;
  235 
  236         error = kthread_add((void (*)(void *))kp->func, NULL,
  237                     NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0);
  238         if (error)
  239                 panic("kthread_start: %s: error %d", kp->arg0, error);
  240 }
  241 
  242 /*
  243  * Create a kernel thread.  It shares its address space
  244  * with proc0 - ie: kernel only.
  245  *
  246  * func is the function to start.
  247  * arg is the parameter to pass to function on first startup.
  248  * newtdp is the return value pointing to the thread's struct thread.
  249  *  ** XXX fix this --> flags are flags to fork1 (in unistd.h) 
  250  * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.).
  251  */
  252 int
  253 kthread_add(void (*func)(void *), void *arg, struct proc *p,
  254     struct thread **newtdp, int flags, int pages, const char *fmt, ...)
  255 {
  256         va_list ap;
  257         struct thread *newtd, *oldtd;
  258 
  259         if (!proc0.p_stats)
  260                 panic("kthread_add called too soon");
  261 
  262         /* If no process supplied, put it on proc0 */
  263         if (p == NULL)
  264                 p = &proc0;
  265 
  266         /* Initialize our new td  */
  267         newtd = thread_alloc(pages);
  268         if (newtd == NULL)
  269                 return (ENOMEM);
  270 
  271         PROC_LOCK(p);
  272         oldtd = FIRST_THREAD_IN_PROC(p);
  273 
  274         bzero(&newtd->td_startzero,
  275             __rangeof(struct thread, td_startzero, td_endzero));
  276         bcopy(&oldtd->td_startcopy, &newtd->td_startcopy,
  277             __rangeof(struct thread, td_startcopy, td_endcopy));
  278 
  279         /* set up arg0 for 'ps', et al */
  280         va_start(ap, fmt);
  281         vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap);
  282         va_end(ap);
  283 
  284         newtd->td_proc = p;  /* needed for cpu_copy_thread */
  285         /* might be further optimized for kthread */
  286         cpu_copy_thread(newtd, oldtd);
  287         /* put the designated function(arg) as the resume context */
  288         cpu_fork_kthread_handler(newtd, func, arg);
  289 
  290         newtd->td_pflags |= TDP_KTHREAD;
  291         thread_cow_get_proc(newtd, p);
  292 
  293         /* this code almost the same as create_thread() in kern_thr.c */
  294         p->p_flag |= P_HADTHREADS;
  295         thread_link(newtd, p);
  296         thread_lock(oldtd);
  297         /* let the scheduler know about these things. */
  298         sched_fork_thread(oldtd, newtd);
  299         TD_SET_CAN_RUN(newtd);
  300         thread_unlock(oldtd);
  301         PROC_UNLOCK(p);
  302 
  303         tidhash_add(newtd);
  304 
  305         /* Avoid inheriting affinity from a random parent. */
  306         cpuset_setthread(newtd->td_tid, cpuset_root);
  307 
  308         /* Delay putting it on the run queue until now. */
  309         if (!(flags & RFSTOPPED)) {
  310                 thread_lock(newtd);
  311                 sched_add(newtd, SRQ_BORING); 
  312                 thread_unlock(newtd);
  313         }
  314         if (newtdp)
  315                 *newtdp = newtd;
  316         return 0;
  317 }
  318 
  319 void
  320 kthread_exit(void)
  321 {
  322         struct proc *p;
  323 
  324         p = curthread->td_proc;
  325 
  326         /* A module may be waiting for us to exit. */
  327         wakeup(curthread);
  328 
  329         /*
  330          * The last exiting thread in a kernel process must tear down
  331          * the whole process.
  332          */
  333         rw_wlock(&tidhash_lock);
  334         PROC_LOCK(p);
  335         if (p->p_numthreads == 1) {
  336                 PROC_UNLOCK(p);
  337                 rw_wunlock(&tidhash_lock);
  338                 kproc_exit(0);
  339         }
  340         LIST_REMOVE(curthread, td_hash);
  341         rw_wunlock(&tidhash_lock);
  342         umtx_thread_exit(curthread);
  343         PROC_SLOCK(p);
  344         thread_exit();
  345 }
  346 
  347 /*
  348  * Advise a kernel process to suspend (or resume) in its main loop.
  349  * Participation is voluntary.
  350  */
  351 int
  352 kthread_suspend(struct thread *td, int timo)
  353 {
  354         struct proc *p;
  355 
  356         p = td->td_proc;
  357 
  358         /*
  359          * td_pflags should not be read by any thread other than
  360          * curthread, but as long as this flag is invariant during the
  361          * thread's lifetime, it is OK to check its state.
  362          */
  363         if ((td->td_pflags & TDP_KTHREAD) == 0)
  364                 return (EINVAL);
  365 
  366         /*
  367          * The caller of the primitive should have already checked that the
  368          * thread is up and running, thus not being blocked by other
  369          * conditions.
  370          */
  371         PROC_LOCK(p);
  372         thread_lock(td);
  373         td->td_flags |= TDF_KTH_SUSP;
  374         thread_unlock(td);
  375         return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt",
  376             timo));
  377 }
  378 
  379 /*
  380  * Resume a thread previously put asleep with kthread_suspend().
  381  */
  382 int
  383 kthread_resume(struct thread *td)
  384 {
  385         struct proc *p;
  386 
  387         p = td->td_proc;
  388 
  389         /*
  390          * td_pflags should not be read by any thread other than
  391          * curthread, but as long as this flag is invariant during the
  392          * thread's lifetime, it is OK to check its state.
  393          */
  394         if ((td->td_pflags & TDP_KTHREAD) == 0)
  395                 return (EINVAL);
  396 
  397         PROC_LOCK(p);
  398         thread_lock(td);
  399         td->td_flags &= ~TDF_KTH_SUSP;
  400         thread_unlock(td);
  401         wakeup(&td->td_flags);
  402         PROC_UNLOCK(p);
  403         return (0);
  404 }
  405 
  406 /*
  407  * Used by the thread to poll as to whether it should yield/sleep
  408  * and notify the caller that is has happened.
  409  */
  410 void
  411 kthread_suspend_check(void)
  412 {
  413         struct proc *p;
  414         struct thread *td;
  415 
  416         td = curthread;
  417         p = td->td_proc;
  418 
  419         if ((td->td_pflags & TDP_KTHREAD) == 0)
  420                 panic("%s: curthread is not a valid kthread", __func__);
  421 
  422         /*
  423          * As long as the double-lock protection is used when accessing the
  424          * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex
  425          * is fine.
  426          */
  427         PROC_LOCK(p);
  428         while (td->td_flags & TDF_KTH_SUSP) {
  429                 wakeup(&td->td_flags);
  430                 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0);
  431         }
  432         PROC_UNLOCK(p);
  433 }
  434 
  435 int
  436 kproc_kthread_add(void (*func)(void *), void *arg,
  437             struct proc **procptr, struct thread **tdptr,
  438             int flags, int pages, const char *procname, const char *fmt, ...) 
  439 {
  440         int error;
  441         va_list ap;
  442         char buf[100];
  443         struct thread *td;
  444 
  445         if (*procptr == NULL) {
  446                 error = kproc_create(func, arg,
  447                         procptr, flags, pages, "%s", procname);
  448                 if (error)
  449                         return (error);
  450                 td = FIRST_THREAD_IN_PROC(*procptr);
  451                 if (tdptr)
  452                         *tdptr = td;
  453                 va_start(ap, fmt);
  454                 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  455                 va_end(ap);
  456 #ifdef KTR
  457                 sched_clear_tdname(td);
  458 #endif
  459                 return (0); 
  460         }
  461         va_start(ap, fmt);
  462         vsnprintf(buf, sizeof(buf), fmt, ap);
  463         va_end(ap);
  464         error = kthread_add(func, arg, *procptr,
  465                     tdptr, flags, pages, "%s", buf);
  466         return (error);
  467 }

Cache object: 4ef8c6b8e51517f57bb8db29b7725401


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.