The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_kthread.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org>
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  *
   14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   24  * SUCH DAMAGE.
   25  */
   26 
   27 #include <sys/cdefs.h>
   28 __FBSDID("$FreeBSD: releng/9.1/sys/kern/kern_kthread.c 233814 2012-04-02 20:34:15Z jhb $");
   29 
   30 #include <sys/param.h>
   31 #include <sys/systm.h>
   32 #include <sys/cpuset.h>
   33 #include <sys/kthread.h>
   34 #include <sys/lock.h>
   35 #include <sys/mutex.h>
   36 #include <sys/proc.h>
   37 #include <sys/resourcevar.h>
   38 #include <sys/rwlock.h>
   39 #include <sys/signalvar.h>
   40 #include <sys/sx.h>
   41 #include <sys/unistd.h>
   42 #include <sys/wait.h>
   43 #include <sys/sched.h>
   44 #include <vm/vm.h>
   45 #include <vm/vm_extern.h>
   46 
   47 #include <machine/stdarg.h>
   48 
   49 /*
   50  * Start a kernel process.  This is called after a fork() call in
   51  * mi_startup() in the file kern/init_main.c.
   52  *
   53  * This function is used to start "internal" daemons and intended
   54  * to be called from SYSINIT().
   55  */
   56 void
   57 kproc_start(udata)
   58         const void *udata;
   59 {
   60         const struct kproc_desc *kp = udata;
   61         int error;
   62 
   63         error = kproc_create((void (*)(void *))kp->func, NULL,
   64                     kp->global_procpp, 0, 0, "%s", kp->arg0);
   65         if (error)
   66                 panic("kproc_start: %s: error %d", kp->arg0, error);
   67 }
   68 
   69 /*
   70  * Create a kernel process/thread/whatever.  It shares its address space
   71  * with proc0 - ie: kernel only.
   72  *
   73  * func is the function to start.
   74  * arg is the parameter to pass to function on first startup.
   75  * newpp is the return value pointing to the thread's struct proc.
   76  * flags are flags to fork1 (in unistd.h)
   77  * fmt and following will be *printf'd into (*newpp)->p_comm (for ps, etc.).
   78  */
   79 int
   80 kproc_create(void (*func)(void *), void *arg,
   81     struct proc **newpp, int flags, int pages, const char *fmt, ...)
   82 {
   83         int error;
   84         va_list ap;
   85         struct thread *td;
   86         struct proc *p2;
   87 
   88         if (!proc0.p_stats)
   89                 panic("kproc_create called too soon");
   90 
   91         error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags,
   92             pages, &p2, NULL, 0);
   93         if (error)
   94                 return error;
   95 
   96         /* save a global descriptor, if desired */
   97         if (newpp != NULL)
   98                 *newpp = p2;
   99 
  100         /* this is a non-swapped system process */
  101         PROC_LOCK(p2);
  102         td = FIRST_THREAD_IN_PROC(p2);
  103         p2->p_flag |= P_SYSTEM | P_KTHREAD;
  104         td->td_pflags |= TDP_KTHREAD;
  105         mtx_lock(&p2->p_sigacts->ps_mtx);
  106         p2->p_sigacts->ps_flag |= PS_NOCLDWAIT;
  107         mtx_unlock(&p2->p_sigacts->ps_mtx);
  108         PROC_UNLOCK(p2);
  109 
  110         /* set up arg0 for 'ps', et al */
  111         va_start(ap, fmt);
  112         vsnprintf(p2->p_comm, sizeof(p2->p_comm), fmt, ap);
  113         va_end(ap);
  114         /* set up arg0 for 'ps', et al */
  115         va_start(ap, fmt);
  116         vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  117         va_end(ap);
  118 #ifdef KTR
  119         sched_clear_tdname(td);
  120 #endif
  121 
  122         /* call the processes' main()... */
  123         cpu_set_fork_handler(td, func, arg);
  124 
  125         /* Avoid inheriting affinity from a random parent. */
  126         cpuset_setthread(td->td_tid, cpuset_root);
  127         thread_lock(td);
  128         TD_SET_CAN_RUN(td);
  129         sched_prio(td, PVM);
  130         sched_user_prio(td, PUSER);
  131 
  132         /* Delay putting it on the run queue until now. */
  133         if (!(flags & RFSTOPPED))
  134                 sched_add(td, SRQ_BORING); 
  135         thread_unlock(td);
  136 
  137         return 0;
  138 }
  139 
  140 void
  141 kproc_exit(int ecode)
  142 {
  143         struct thread *td;
  144         struct proc *p;
  145 
  146         td = curthread;
  147         p = td->td_proc;
  148 
  149         /*
  150          * Reparent curthread from proc0 to init so that the zombie
  151          * is harvested.
  152          */
  153         sx_xlock(&proctree_lock);
  154         PROC_LOCK(p);
  155         proc_reparent(p, initproc);
  156         PROC_UNLOCK(p);
  157         sx_xunlock(&proctree_lock);
  158 
  159         /*
  160          * Wakeup anyone waiting for us to exit.
  161          */
  162         wakeup(p);
  163 
  164         /* Buh-bye! */
  165         exit1(td, W_EXITCODE(ecode, 0));
  166 }
  167 
  168 /*
  169  * Advise a kernel process to suspend (or resume) in its main loop.
  170  * Participation is voluntary.
  171  */
  172 int
  173 kproc_suspend(struct proc *p, int timo)
  174 {
  175         /*
  176          * Make sure this is indeed a system process and we can safely
  177          * use the p_siglist field.
  178          */
  179         PROC_LOCK(p);
  180         if ((p->p_flag & P_KTHREAD) == 0) {
  181                 PROC_UNLOCK(p);
  182                 return (EINVAL);
  183         }
  184         SIGADDSET(p->p_siglist, SIGSTOP);
  185         wakeup(p);
  186         return msleep(&p->p_siglist, &p->p_mtx, PPAUSE | PDROP, "suspkp", timo);
  187 }
  188 
  189 int
  190 kproc_resume(struct proc *p)
  191 {
  192         /*
  193          * Make sure this is indeed a system process and we can safely
  194          * use the p_siglist field.
  195          */
  196         PROC_LOCK(p);
  197         if ((p->p_flag & P_KTHREAD) == 0) {
  198                 PROC_UNLOCK(p);
  199                 return (EINVAL);
  200         }
  201         SIGDELSET(p->p_siglist, SIGSTOP);
  202         PROC_UNLOCK(p);
  203         wakeup(&p->p_siglist);
  204         return (0);
  205 }
  206 
  207 void
  208 kproc_suspend_check(struct proc *p)
  209 {
  210         PROC_LOCK(p);
  211         while (SIGISMEMBER(p->p_siglist, SIGSTOP)) {
  212                 wakeup(&p->p_siglist);
  213                 msleep(&p->p_siglist, &p->p_mtx, PPAUSE, "kpsusp", 0);
  214         }
  215         PROC_UNLOCK(p);
  216 }
  217 
  218 
  219 /*
  220  * Start a kernel thread.  
  221  *
  222  * This function is used to start "internal" daemons and intended
  223  * to be called from SYSINIT().
  224  */
  225 
  226 void
  227 kthread_start(udata)
  228         const void *udata;
  229 {
  230         const struct kthread_desc       *kp = udata;
  231         int error;
  232 
  233         error = kthread_add((void (*)(void *))kp->func, NULL,
  234                     NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0);
  235         if (error)
  236                 panic("kthread_start: %s: error %d", kp->arg0, error);
  237 }
  238 
  239 /*
  240  * Create a kernel thread.  It shares its address space
  241  * with proc0 - ie: kernel only.
  242  *
  243  * func is the function to start.
  244  * arg is the parameter to pass to function on first startup.
  245  * newtdp is the return value pointing to the thread's struct thread.
  246  *  ** XXX fix this --> flags are flags to fork1 (in unistd.h) 
  247  * fmt and following will be *printf'd into (*newtd)->td_name (for ps, etc.).
  248  */
  249 int
  250 kthread_add(void (*func)(void *), void *arg, struct proc *p,
  251     struct thread **newtdp, int flags, int pages, const char *fmt, ...)
  252 {
  253         va_list ap;
  254         struct thread *newtd, *oldtd;
  255 
  256         if (!proc0.p_stats)
  257                 panic("kthread_add called too soon");
  258 
  259         /* If no process supplied, put it on proc0 */
  260         if (p == NULL) {
  261                 p = &proc0;
  262                 oldtd = &thread0;
  263         } else {
  264                 oldtd = FIRST_THREAD_IN_PROC(p);
  265         }
  266 
  267         /* Initialize our new td  */
  268         newtd = thread_alloc(pages);
  269         if (newtd == NULL)
  270                 return (ENOMEM);
  271 
  272         bzero(&newtd->td_startzero,
  273             __rangeof(struct thread, td_startzero, td_endzero));
  274 /* XXX check if we should zero. */
  275         bcopy(&oldtd->td_startcopy, &newtd->td_startcopy,
  276             __rangeof(struct thread, td_startcopy, td_endcopy));
  277 
  278         /* set up arg0 for 'ps', et al */
  279         va_start(ap, fmt);
  280         vsnprintf(newtd->td_name, sizeof(newtd->td_name), fmt, ap);
  281         va_end(ap);
  282 
  283         newtd->td_proc = p;  /* needed for cpu_set_upcall */
  284 
  285         /* XXX optimise this probably? */
  286         /* On x86 (and probably the others too) it is way too full of junk */
  287         /* Needs a better name */
  288         cpu_set_upcall(newtd, oldtd);
  289         /* put the designated function(arg) as the resume context */
  290         cpu_set_fork_handler(newtd, func, arg);
  291 
  292         newtd->td_pflags |= TDP_KTHREAD;
  293         newtd->td_ucred = crhold(p->p_ucred);
  294 
  295         /* this code almost the same as create_thread() in kern_thr.c */
  296         PROC_LOCK(p);
  297         p->p_flag |= P_HADTHREADS;
  298         newtd->td_sigmask = oldtd->td_sigmask; /* XXX dubious */
  299         thread_link(newtd, p);
  300         thread_lock(oldtd);
  301         /* let the scheduler know about these things. */
  302         sched_fork_thread(oldtd, newtd);
  303         TD_SET_CAN_RUN(newtd);
  304         thread_unlock(oldtd);
  305         PROC_UNLOCK(p);
  306 
  307         tidhash_add(newtd);
  308 
  309         /* Avoid inheriting affinity from a random parent. */
  310         cpuset_setthread(newtd->td_tid, cpuset_root);
  311 
  312         /* Delay putting it on the run queue until now. */
  313         if (!(flags & RFSTOPPED)) {
  314                 thread_lock(newtd);
  315                 sched_add(newtd, SRQ_BORING); 
  316                 thread_unlock(newtd);
  317         }
  318         if (newtdp)
  319                 *newtdp = newtd;
  320         return 0;
  321 }
  322 
  323 void
  324 kthread_exit(void)
  325 {
  326         struct proc *p;
  327 
  328         p = curthread->td_proc;
  329 
  330         /* A module may be waiting for us to exit. */
  331         wakeup(curthread);
  332 
  333         /*
  334          * The last exiting thread in a kernel process must tear down
  335          * the whole process.
  336          */
  337         rw_wlock(&tidhash_lock);
  338         PROC_LOCK(p);
  339         if (p->p_numthreads == 1) {
  340                 PROC_UNLOCK(p);
  341                 rw_wunlock(&tidhash_lock);
  342                 kproc_exit(0);
  343         }
  344         LIST_REMOVE(curthread, td_hash);
  345         rw_wunlock(&tidhash_lock);
  346         PROC_SLOCK(p);
  347         thread_exit();
  348 }
  349 
  350 /*
  351  * Advise a kernel process to suspend (or resume) in its main loop.
  352  * Participation is voluntary.
  353  */
  354 int
  355 kthread_suspend(struct thread *td, int timo)
  356 {
  357         struct proc *p;
  358 
  359         p = td->td_proc;
  360 
  361         /*
  362          * td_pflags should not be read by any thread other than
  363          * curthread, but as long as this flag is invariant during the
  364          * thread's lifetime, it is OK to check its state.
  365          */
  366         if ((td->td_pflags & TDP_KTHREAD) == 0)
  367                 return (EINVAL);
  368 
  369         /*
  370          * The caller of the primitive should have already checked that the
  371          * thread is up and running, thus not being blocked by other
  372          * conditions.
  373          */
  374         PROC_LOCK(p);
  375         thread_lock(td);
  376         td->td_flags |= TDF_KTH_SUSP;
  377         thread_unlock(td);
  378         return (msleep(&td->td_flags, &p->p_mtx, PPAUSE | PDROP, "suspkt",
  379             timo));
  380 }
  381 
  382 /*
  383  * Resume a thread previously put asleep with kthread_suspend().
  384  */
  385 int
  386 kthread_resume(struct thread *td)
  387 {
  388         struct proc *p;
  389 
  390         p = td->td_proc;
  391 
  392         /*
  393          * td_pflags should not be read by any thread other than
  394          * curthread, but as long as this flag is invariant during the
  395          * thread's lifetime, it is OK to check its state.
  396          */
  397         if ((td->td_pflags & TDP_KTHREAD) == 0)
  398                 return (EINVAL);
  399 
  400         PROC_LOCK(p);
  401         thread_lock(td);
  402         td->td_flags &= ~TDF_KTH_SUSP;
  403         thread_unlock(td);
  404         wakeup(&td->td_flags);
  405         PROC_UNLOCK(p);
  406         return (0);
  407 }
  408 
  409 /*
  410  * Used by the thread to poll as to whether it should yield/sleep
  411  * and notify the caller that is has happened.
  412  */
  413 void
  414 kthread_suspend_check()
  415 {
  416         struct proc *p;
  417         struct thread *td;
  418 
  419         td = curthread;
  420         p = td->td_proc;
  421 
  422         if ((td->td_pflags & TDP_KTHREAD) == 0)
  423                 panic("%s: curthread is not a valid kthread", __func__);
  424 
  425         /*
  426          * As long as the double-lock protection is used when accessing the
  427          * TDF_KTH_SUSP flag, synchronizing the read operation via proc mutex
  428          * is fine.
  429          */
  430         PROC_LOCK(p);
  431         while (td->td_flags & TDF_KTH_SUSP) {
  432                 wakeup(&td->td_flags);
  433                 msleep(&td->td_flags, &p->p_mtx, PPAUSE, "ktsusp", 0);
  434         }
  435         PROC_UNLOCK(p);
  436 }
  437 
  438 int
  439 kproc_kthread_add(void (*func)(void *), void *arg,
  440             struct proc **procptr, struct thread **tdptr,
  441             int flags, int pages, const char *procname, const char *fmt, ...) 
  442 {
  443         int error;
  444         va_list ap;
  445         char buf[100];
  446         struct thread *td;
  447 
  448         if (*procptr == 0) {
  449                 error = kproc_create(func, arg,
  450                         procptr, flags, pages, "%s", procname);
  451                 if (error)
  452                         return (error);
  453                 td = FIRST_THREAD_IN_PROC(*procptr);
  454                 if (tdptr)
  455                         *tdptr = td;
  456                 va_start(ap, fmt);
  457                 vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap);
  458                 va_end(ap);
  459 #ifdef KTR
  460                 sched_clear_tdname(td);
  461 #endif
  462                 return (0); 
  463         }
  464         va_start(ap, fmt);
  465         vsnprintf(buf, sizeof(buf), fmt, ap);
  466         va_end(ap);
  467         error = kthread_add(func, arg, *procptr,
  468                     tdptr, flags, pages, "%s", buf);
  469         return (error);
  470 }

Cache object: d622da31e6562b3ec2743c11af770df0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.