FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_lwp.c
1 /* $NetBSD: kern_lwp.c,v 1.29.4.2 2005/11/13 13:56:02 tron Exp $ */
2
3 /*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.29.4.2 2005/11/13 13:56:02 tron Exp $");
41
42 #include "opt_multiprocessor.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/pool.h>
47 #include <sys/lock.h>
48 #include <sys/proc.h>
49 #include <sys/sa.h>
50 #include <sys/savar.h>
51 #include <sys/types.h>
52 #include <sys/ucontext.h>
53 #include <sys/resourcevar.h>
54 #include <sys/mount.h>
55 #include <sys/syscallargs.h>
56
57 #include <uvm/uvm_extern.h>
58
59 struct lwplist alllwp;
60
61 #define LWP_DEBUG
62
63 #ifdef LWP_DEBUG
64 int lwp_debug = 0;
65 #define DPRINTF(x) if (lwp_debug) printf x
66 #else
67 #define DPRINTF(x)
68 #endif
69 /* ARGSUSED */
70 int
71 sys__lwp_create(struct lwp *l, void *v, register_t *retval)
72 {
73 struct sys__lwp_create_args /* {
74 syscallarg(const ucontext_t *) ucp;
75 syscallarg(u_long) flags;
76 syscallarg(lwpid_t *) new_lwp;
77 } */ *uap = v;
78 struct proc *p = l->l_proc;
79 struct lwp *l2;
80 vaddr_t uaddr;
81 boolean_t inmem;
82 ucontext_t *newuc;
83 int s, error;
84
85 if (p->p_flag & P_SA)
86 return EINVAL;
87
88 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
89
90 error = copyin(SCARG(uap, ucp), newuc, sizeof(*newuc));
91 if (error)
92 return (error);
93
94 /* XXX check against resource limits */
95
96 inmem = uvm_uarea_alloc(&uaddr);
97 if (__predict_false(uaddr == 0)) {
98 return (ENOMEM);
99 }
100
101 /* XXX flags:
102 * __LWP_ASLWP is probably needed for Solaris compat.
103 */
104
105 newlwp(l, p, uaddr, inmem,
106 SCARG(uap, flags) & LWP_DETACHED,
107 NULL, 0, startlwp, newuc, &l2);
108
109 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0) {
110 SCHED_LOCK(s);
111 l2->l_stat = LSRUN;
112 setrunqueue(l2);
113 p->p_nrlwps++;
114 SCHED_UNLOCK(s);
115 } else {
116 l2->l_stat = LSSUSPENDED;
117 }
118
119 error = copyout(&l2->l_lid, SCARG(uap, new_lwp),
120 sizeof(l2->l_lid));
121 if (error)
122 return (error);
123
124 return (0);
125 }
126
127
128 int
129 sys__lwp_exit(struct lwp *l, void *v, register_t *retval)
130 {
131
132 lwp_exit(l);
133 /* NOTREACHED */
134 return (0);
135 }
136
137
138 int
139 sys__lwp_self(struct lwp *l, void *v, register_t *retval)
140 {
141
142 *retval = l->l_lid;
143
144 return (0);
145 }
146
147
148 int
149 sys__lwp_getprivate(struct lwp *l, void *v, register_t *retval)
150 {
151
152 *retval = (uintptr_t) l->l_private;
153
154 return (0);
155 }
156
157
158 int
159 sys__lwp_setprivate(struct lwp *l, void *v, register_t *retval)
160 {
161 struct sys__lwp_setprivate_args /* {
162 syscallarg(void *) ptr;
163 } */ *uap = v;
164
165 l->l_private = SCARG(uap, ptr);
166
167 return (0);
168 }
169
170
171 int
172 sys__lwp_suspend(struct lwp *l, void *v, register_t *retval)
173 {
174 struct sys__lwp_suspend_args /* {
175 syscallarg(lwpid_t) target;
176 } */ *uap = v;
177 int target_lid;
178 struct proc *p = l->l_proc;
179 struct lwp *t;
180 struct lwp *t2;
181
182 if (p->p_flag & P_SA)
183 return EINVAL;
184
185 target_lid = SCARG(uap, target);
186
187 LIST_FOREACH(t, &p->p_lwps, l_sibling)
188 if (t->l_lid == target_lid)
189 break;
190
191 if (t == NULL)
192 return (ESRCH);
193
194 if (t == l) {
195 /*
196 * Check for deadlock, which is only possible
197 * when we're suspending ourself.
198 */
199 LIST_FOREACH(t2, &p->p_lwps, l_sibling) {
200 if ((t2 != l) && (t2->l_stat != LSSUSPENDED))
201 break;
202 }
203
204 if (t2 == NULL) /* All other LWPs are suspended */
205 return (EDEADLK);
206 }
207
208 return lwp_suspend(l, t);
209 }
210
211 inline int
212 lwp_suspend(struct lwp *l, struct lwp *t)
213 {
214 struct proc *p = t->l_proc;
215 int s;
216
217 if (t == l) {
218 SCHED_LOCK(s);
219 l->l_stat = LSSUSPENDED;
220 /* XXX NJWLWP check if this makes sense here: */
221 p->p_stats->p_ru.ru_nvcsw++;
222 mi_switch(l, NULL);
223 SCHED_ASSERT_UNLOCKED();
224 splx(s);
225 } else {
226 switch (t->l_stat) {
227 case LSSUSPENDED:
228 return (0); /* _lwp_suspend() is idempotent */
229 case LSRUN:
230 SCHED_LOCK(s);
231 remrunqueue(t);
232 t->l_stat = LSSUSPENDED;
233 p->p_nrlwps--;
234 SCHED_UNLOCK(s);
235 break;
236 case LSSLEEP:
237 t->l_stat = LSSUSPENDED;
238 break;
239 case LSIDL:
240 case LSZOMB:
241 return (EINTR); /* It's what Solaris does..... */
242 case LSSTOP:
243 panic("_lwp_suspend: Stopped LWP in running process!");
244 break;
245 case LSONPROC:
246 /* XXX multiprocessor LWPs? Implement me! */
247 return (EINVAL);
248 }
249 }
250
251 return (0);
252 }
253
254
255 int
256 sys__lwp_continue(struct lwp *l, void *v, register_t *retval)
257 {
258 struct sys__lwp_continue_args /* {
259 syscallarg(lwpid_t) target;
260 } */ *uap = v;
261 int s, target_lid;
262 struct proc *p = l->l_proc;
263 struct lwp *t;
264
265 if (p->p_flag & P_SA)
266 return EINVAL;
267
268 target_lid = SCARG(uap, target);
269
270 LIST_FOREACH(t, &p->p_lwps, l_sibling)
271 if (t->l_lid == target_lid)
272 break;
273
274 if (t == NULL)
275 return (ESRCH);
276
277 SCHED_LOCK(s);
278 lwp_continue(t);
279 SCHED_UNLOCK(s);
280
281 return (0);
282 }
283
284 void
285 lwp_continue(struct lwp *l)
286 {
287
288 DPRINTF(("lwp_continue of %d.%d (%s), state %d, wchan %p\n",
289 l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, l->l_stat,
290 l->l_wchan));
291
292 if (l->l_stat != LSSUSPENDED)
293 return;
294
295 if (l->l_wchan == 0) {
296 /* LWP was runnable before being suspended. */
297 setrunnable(l);
298 } else {
299 /* LWP was sleeping before being suspended. */
300 l->l_stat = LSSLEEP;
301 }
302 }
303
304 int
305 sys__lwp_wakeup(struct lwp *l, void *v, register_t *retval)
306 {
307 struct sys__lwp_wakeup_args /* {
308 syscallarg(lwpid_t) target;
309 } */ *uap = v;
310 lwpid_t target_lid;
311 struct lwp *t;
312 struct proc *p;
313 int error;
314 int s;
315
316 p = l->l_proc;
317 target_lid = SCARG(uap, target);
318
319 SCHED_LOCK(s);
320
321 LIST_FOREACH(t, &p->p_lwps, l_sibling)
322 if (t->l_lid == target_lid)
323 break;
324
325 if (t == NULL) {
326 error = ESRCH;
327 goto exit;
328 }
329
330 if (t->l_stat != LSSLEEP) {
331 error = ENODEV;
332 goto exit;
333 }
334
335 if ((t->l_flag & L_SINTR) == 0) {
336 error = EBUSY;
337 goto exit;
338 }
339 /*
340 * Tell ltsleep to wakeup.
341 */
342 t->l_flag |= L_CANCELLED;
343
344 setrunnable(t);
345 error = 0;
346 exit:
347 SCHED_UNLOCK(s);
348
349 return error;
350 }
351
352 int
353 sys__lwp_wait(struct lwp *l, void *v, register_t *retval)
354 {
355 struct sys__lwp_wait_args /* {
356 syscallarg(lwpid_t) wait_for;
357 syscallarg(lwpid_t *) departed;
358 } */ *uap = v;
359 int error;
360 lwpid_t dep;
361
362 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
363 if (error)
364 return (error);
365
366 if (SCARG(uap, departed)) {
367 error = copyout(&dep, SCARG(uap, departed),
368 sizeof(dep));
369 if (error)
370 return (error);
371 }
372
373 return (0);
374 }
375
376
377 int
378 lwp_wait1(struct lwp *l, lwpid_t lid, lwpid_t *departed, int flags)
379 {
380 struct proc *p = l->l_proc;
381 struct lwp *l2, *l3;
382 int nfound, error, wpri;
383 static const char waitstr1[] = "lwpwait";
384 static const char waitstr2[] = "lwpwait2";
385
386 DPRINTF(("lwp_wait1: %d.%d waiting for %d.\n",
387 p->p_pid, l->l_lid, lid));
388
389 if (lid == l->l_lid)
390 return (EDEADLK); /* Waiting for ourselves makes no sense. */
391
392 wpri = PWAIT |
393 ((flags & LWPWAIT_EXITCONTROL) ? PNOEXITERR : PCATCH);
394 loop:
395 nfound = 0;
396 LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
397 if ((l2 == l) || (l2->l_flag & L_DETACHED) ||
398 ((lid != 0) && (lid != l2->l_lid)))
399 continue;
400
401 nfound++;
402 if (l2->l_stat == LSZOMB) {
403 if (departed)
404 *departed = l2->l_lid;
405
406 simple_lock(&p->p_lock);
407 LIST_REMOVE(l2, l_sibling);
408 p->p_nlwps--;
409 p->p_nzlwps--;
410 simple_unlock(&p->p_lock);
411 /* XXX decrement limits */
412
413 pool_put(&lwp_pool, l2);
414
415 return (0);
416 } else if (l2->l_stat == LSSLEEP ||
417 l2->l_stat == LSSUSPENDED) {
418 /* Deadlock checks.
419 * 1. If all other LWPs are waiting for exits
420 * or suspended, we would deadlock.
421 */
422
423 LIST_FOREACH(l3, &p->p_lwps, l_sibling) {
424 if (l3 != l && (l3->l_stat != LSSUSPENDED) &&
425 !(l3->l_stat == LSSLEEP &&
426 l3->l_wchan == (caddr_t) &p->p_nlwps))
427 break;
428 }
429 if (l3 == NULL) /* Everyone else is waiting. */
430 return (EDEADLK);
431
432 /* XXX we'd like to check for a cycle of waiting
433 * LWPs (specific LID waits, not any-LWP waits)
434 * and detect that sort of deadlock, but we don't
435 * have a good place to store the lwp that is
436 * being waited for. wchan is already filled with
437 * &p->p_nlwps, and putting the lwp address in
438 * there for deadlock tracing would require
439 * exiting LWPs to call wakeup on both their
440 * own address and &p->p_nlwps, to get threads
441 * sleeping on any LWP exiting.
442 *
443 * Revisit later. Maybe another auxillary
444 * storage location associated with sleeping
445 * is in order.
446 */
447 }
448 }
449
450 if (nfound == 0)
451 return (ESRCH);
452
453 if ((error = tsleep((caddr_t) &p->p_nlwps, wpri,
454 (lid != 0) ? waitstr1 : waitstr2, 0)) != 0)
455 return (error);
456
457 goto loop;
458 }
459
460
461 int
462 newlwp(struct lwp *l1, struct proc *p2, vaddr_t uaddr, boolean_t inmem,
463 int flags, void *stack, size_t stacksize,
464 void (*func)(void *), void *arg, struct lwp **rnewlwpp)
465 {
466 struct lwp *l2;
467 int s;
468
469 l2 = pool_get(&lwp_pool, PR_WAITOK);
470
471 l2->l_stat = LSIDL;
472 l2->l_forw = l2->l_back = NULL;
473 l2->l_proc = p2;
474
475 memset(&l2->l_startzero, 0,
476 (unsigned) ((caddr_t)&l2->l_endzero -
477 (caddr_t)&l2->l_startzero));
478 memcpy(&l2->l_startcopy, &l1->l_startcopy,
479 (unsigned) ((caddr_t)&l2->l_endcopy -
480 (caddr_t)&l2->l_startcopy));
481
482 #if !defined(MULTIPROCESSOR)
483 /*
484 * In the single-processor case, all processes will always run
485 * on the same CPU. So, initialize the child's CPU to the parent's
486 * now. In the multiprocessor case, the child's CPU will be
487 * initialized in the low-level context switch code when the
488 * process runs.
489 */
490 KASSERT(l1->l_cpu != NULL);
491 l2->l_cpu = l1->l_cpu;
492 #else
493 /*
494 * zero child's CPU pointer so we don't get trash.
495 */
496 l2->l_cpu = NULL;
497 #endif /* ! MULTIPROCESSOR */
498
499 l2->l_flag = inmem ? L_INMEM : 0;
500 l2->l_flag |= (flags & LWP_DETACHED) ? L_DETACHED : 0;
501
502 callout_init(&l2->l_tsleep_ch);
503
504 if (rnewlwpp != NULL)
505 *rnewlwpp = l2;
506
507 l2->l_addr = (struct user *)uaddr;
508 uvm_lwp_fork(l1, l2, stack, stacksize, func,
509 (arg != NULL) ? arg : l2);
510
511 simple_lock(&p2->p_lock);
512 l2->l_lid = ++p2->p_nlwpid;
513 LIST_INSERT_HEAD(&p2->p_lwps, l2, l_sibling);
514 p2->p_nlwps++;
515 simple_unlock(&p2->p_lock);
516
517 /* XXX should be locked differently... */
518 s = proclist_lock_write();
519 LIST_INSERT_HEAD(&alllwp, l2, l_list);
520 proclist_unlock_write(s);
521
522 if (p2->p_emul->e_lwp_fork)
523 (*p2->p_emul->e_lwp_fork)(l1, l2);
524
525 return (0);
526 }
527
528
529 /*
530 * Quit the process. This will call cpu_exit, which will call cpu_switch,
531 * so this can only be used meaningfully if you're willing to switch away.
532 * Calling with l!=curlwp would be weird.
533 */
534 void
535 lwp_exit(struct lwp *l)
536 {
537 struct proc *p = l->l_proc;
538 int s;
539
540 DPRINTF(("lwp_exit: %d.%d exiting.\n", p->p_pid, l->l_lid));
541 DPRINTF((" nlwps: %d nrlwps %d nzlwps: %d\n",
542 p->p_nlwps, p->p_nrlwps, p->p_nzlwps));
543
544 if (p->p_emul->e_lwp_exit)
545 (*p->p_emul->e_lwp_exit)(l);
546
547 /*
548 * If we are the last live LWP in a process, we need to exit
549 * the entire process (if that's not already going on). We do
550 * so with an exit status of zero, because it's a "controlled"
551 * exit, and because that's what Solaris does.
552 */
553 if (((p->p_nlwps - p->p_nzlwps) == 1) && ((p->p_flag & P_WEXIT) == 0)) {
554 DPRINTF(("lwp_exit: %d.%d calling exit1()\n",
555 p->p_pid, l->l_lid));
556 exit1(l, 0);
557 /* NOTREACHED */
558 }
559
560 s = proclist_lock_write();
561 LIST_REMOVE(l, l_list);
562 proclist_unlock_write(s);
563
564 /* Free MD LWP resources */
565 #ifndef __NO_CPU_LWP_FREE
566 cpu_lwp_free(l, 0);
567 #endif
568
569 SCHED_LOCK(s);
570 p->p_nrlwps--;
571 l->l_stat = LSDEAD;
572 SCHED_UNLOCK(s);
573
574 /* This LWP no longer needs to hold the kernel lock. */
575 KERNEL_PROC_UNLOCK(l);
576
577 pmap_deactivate(l);
578
579 /* cpu_exit() will not return */
580 cpu_exit(l);
581 }
582
583 /*
584 * We are called from cpu_exit() once it is safe to schedule the
585 * dead process's resources to be freed (i.e., once we've switched to
586 * the idle PCB for the current CPU).
587 *
588 * NOTE: One must be careful with locking in this routine. It's
589 * called from a critical section in machine-dependent code, so
590 * we should refrain from changing any interrupt state.
591 */
592 void
593 lwp_exit2(struct lwp *l)
594 {
595 struct proc *p;
596
597 KERNEL_LOCK(LK_EXCLUSIVE);
598 /*
599 * Free the VM resources we're still holding on to.
600 */
601 uvm_lwp_exit(l);
602
603 if (l->l_flag & L_DETACHED) {
604 /* Nobody waits for detached LWPs. */
605
606 if ((l->l_flag & L_PROCEXIT) == 0) {
607 LIST_REMOVE(l, l_sibling);
608 p = l->l_proc;
609 p->p_nlwps--;
610 }
611
612 pool_put(&lwp_pool, l);
613 KERNEL_UNLOCK();
614 } else {
615 l->l_stat = LSZOMB;
616 p = l->l_proc;
617 p->p_nzlwps++;
618 KERNEL_UNLOCK();
619 wakeup(&p->p_nlwps);
620 }
621 }
622
623 /*
624 * Pick a LWP to represent the process for those operations which
625 * want information about a "process" that is actually associated
626 * with a LWP.
627 */
628 struct lwp *
629 proc_representative_lwp(struct proc *p)
630 {
631 struct lwp *l, *onproc, *running, *sleeping, *stopped, *suspended;
632 struct lwp *signalled;
633
634 /* Trivial case: only one LWP */
635 if (p->p_nlwps == 1)
636 return (LIST_FIRST(&p->p_lwps));
637
638 switch (p->p_stat) {
639 case SSTOP:
640 case SACTIVE:
641 /* Pick the most live LWP */
642 onproc = running = sleeping = stopped = suspended = NULL;
643 signalled = NULL;
644 LIST_FOREACH(l, &p->p_lwps, l_sibling) {
645 if (l->l_lid == p->p_sigctx.ps_lwp)
646 signalled = l;
647 switch (l->l_stat) {
648 case LSONPROC:
649 onproc = l;
650 break;
651 case LSRUN:
652 running = l;
653 break;
654 case LSSLEEP:
655 sleeping = l;
656 break;
657 case LSSTOP:
658 stopped = l;
659 break;
660 case LSSUSPENDED:
661 suspended = l;
662 break;
663 }
664 }
665 if (signalled)
666 return signalled;
667 if (onproc)
668 return onproc;
669 if (running)
670 return running;
671 if (sleeping)
672 return sleeping;
673 if (stopped)
674 return stopped;
675 if (suspended)
676 return suspended;
677 break;
678 case SZOMB:
679 /* Doesn't really matter... */
680 return (LIST_FIRST(&p->p_lwps));
681 #ifdef DIAGNOSTIC
682 case SIDL:
683 /* We have more than one LWP and we're in SIDL?
684 * How'd that happen?
685 */
686 panic("Too many LWPs (%d) in SIDL process %d (%s)",
687 p->p_nrlwps, p->p_pid, p->p_comm);
688 default:
689 panic("Process %d (%s) in unknown state %d",
690 p->p_pid, p->p_comm, p->p_stat);
691 #endif
692 }
693
694 panic("proc_representative_lwp: couldn't find a lwp for process"
695 " %d (%s)", p->p_pid, p->p_comm);
696 /* NOTREACHED */
697 return NULL;
698 }
Cache object: c073fc0d98ae6f6663127c84b9051360
|