FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_thr.c
1 /*-
2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD: releng/6.1/sys/kern/kern_thr.c 158179 2006-04-30 16:44:43Z cvs2svn $");
29
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/lock.h>
33 #include <sys/mutex.h>
34 #include <sys/proc.h>
35 #include <sys/resourcevar.h>
36 #include <sys/sched.h>
37 #include <sys/sysctl.h>
38 #include <sys/smp.h>
39 #include <sys/sysent.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/signalvar.h>
43 #include <sys/ucontext.h>
44 #include <sys/thr.h>
45 #include <sys/umtx.h>
46
47 #include <machine/frame.h>
48
49 extern int max_threads_per_proc;
50 extern int max_groups_per_proc;
51
52 SYSCTL_DECL(_kern_threads);
53 static int thr_scope = 0;
54 SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope, CTLFLAG_RW,
55 &thr_scope, 0, "sys or proc scope scheduling");
56
57 static int thr_concurrency = 0;
58 SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW,
59 &thr_concurrency, 0, "a concurrency value if not default");
60
61 static int create_thread(struct thread *td, mcontext_t *ctx,
62 void (*start_func)(void *), void *arg,
63 char *stack_base, size_t stack_size,
64 char *tls_base,
65 long *child_tid, long *parent_tid,
66 int flags);
67
68 /*
69 * System call interface.
70 */
71 int
72 thr_create(struct thread *td, struct thr_create_args *uap)
73 /* ucontext_t *ctx, long *id, int flags */
74 {
75 ucontext_t ctx;
76 int error;
77
78 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
79 return (error);
80
81 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
82 NULL, 0, NULL, uap->id, NULL, uap->flags);
83 return (error);
84 }
85
86 int
87 thr_new(struct thread *td, struct thr_new_args *uap)
88 /* struct thr_param * */
89 {
90 struct thr_param param;
91 int error;
92
93 if (uap->param_size < sizeof(param))
94 return (EINVAL);
95 if ((error = copyin(uap->param, ¶m, sizeof(param))))
96 return (error);
97 error = create_thread(td, NULL, param.start_func, param.arg,
98 param.stack_base, param.stack_size, param.tls_base,
99 param.child_tid, param.parent_tid, param.flags);
100 return (error);
101 }
102
103 static int
104 create_thread(struct thread *td, mcontext_t *ctx,
105 void (*start_func)(void *), void *arg,
106 char *stack_base, size_t stack_size,
107 char *tls_base,
108 long *child_tid, long *parent_tid,
109 int flags)
110 {
111 stack_t stack;
112 struct thread *newtd;
113 struct ksegrp *kg, *newkg;
114 struct proc *p;
115 long id;
116 int error, scope_sys, linkkg;
117
118 error = 0;
119 p = td->td_proc;
120 kg = td->td_ksegrp;
121
122 /* Have race condition but it is cheap. */
123 if ((p->p_numksegrps >= max_groups_per_proc) ||
124 (p->p_numthreads >= max_threads_per_proc)) {
125 return (EPROCLIM);
126 }
127
128 /* Check PTHREAD_SCOPE_SYSTEM */
129 scope_sys = (flags & THR_SYSTEM_SCOPE) != 0;
130
131 /* sysctl overrides user's flag */
132 if (thr_scope == 1)
133 scope_sys = 0;
134 else if (thr_scope == 2)
135 scope_sys = 1;
136
137 /* Initialize our td and new ksegrp.. */
138 newtd = thread_alloc();
139
140 /*
141 * Try the copyout as soon as we allocate the td so we don't
142 * have to tear things down in a failure case below.
143 * Here we copy out tid to two places, one for child and one
144 * for parent, because pthread can create a detached thread,
145 * if parent wants to safely access child tid, it has to provide
146 * its storage, because child thread may exit quickly and
147 * memory is freed before parent thread can access it.
148 */
149 id = newtd->td_tid;
150 if ((child_tid != NULL &&
151 (error = copyout(&id, child_tid, sizeof(long)))) ||
152 (parent_tid != NULL &&
153 (error = copyout(&id, parent_tid, sizeof(long))))) {
154 thread_free(newtd);
155 return (error);
156 }
157 bzero(&newtd->td_startzero,
158 __rangeof(struct thread, td_startzero, td_endzero));
159 bcopy(&td->td_startcopy, &newtd->td_startcopy,
160 __rangeof(struct thread, td_startcopy, td_endcopy));
161 newtd->td_proc = td->td_proc;
162 newtd->td_ucred = crhold(td->td_ucred);
163
164 cpu_set_upcall(newtd, td);
165
166 if (ctx != NULL) { /* old way to set user context */
167 error = set_mcontext(newtd, ctx);
168 if (error != 0) {
169 thread_free(newtd);
170 crfree(td->td_ucred);
171 return (error);
172 }
173 } else {
174 /* Set up our machine context. */
175 stack.ss_sp = stack_base;
176 stack.ss_size = stack_size;
177 /* Set upcall address to user thread entry function. */
178 cpu_set_upcall_kse(newtd, start_func, arg, &stack);
179 /* Setup user TLS address and TLS pointer register. */
180 error = cpu_set_user_tls(newtd, tls_base);
181 if (error != 0) {
182 thread_free(newtd);
183 crfree(td->td_ucred);
184 return (error);
185 }
186 }
187
188 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
189 /* Treat initial thread as it has PTHREAD_SCOPE_PROCESS. */
190 p->p_procscopegrp = kg;
191 mtx_lock_spin(&sched_lock);
192 sched_set_concurrency(kg,
193 thr_concurrency ? thr_concurrency : (2*mp_ncpus));
194 mtx_unlock_spin(&sched_lock);
195 }
196
197 linkkg = 0;
198 if (scope_sys) {
199 linkkg = 1;
200 newkg = ksegrp_alloc();
201 bzero(&newkg->kg_startzero,
202 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
203 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
204 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
205 sched_init_concurrency(newkg);
206 PROC_LOCK(td->td_proc);
207 } else {
208 /*
209 * Try to create a KSE group which will be shared
210 * by all PTHREAD_SCOPE_PROCESS threads.
211 */
212 retry:
213 PROC_LOCK(td->td_proc);
214 if ((newkg = p->p_procscopegrp) == NULL) {
215 PROC_UNLOCK(p);
216 newkg = ksegrp_alloc();
217 bzero(&newkg->kg_startzero,
218 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
219 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
220 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
221 PROC_LOCK(p);
222 if (p->p_procscopegrp == NULL) {
223 p->p_procscopegrp = newkg;
224 sched_init_concurrency(newkg);
225 sched_set_concurrency(newkg,
226 thr_concurrency ? thr_concurrency : (2*mp_ncpus));
227 linkkg = 1;
228 } else {
229 PROC_UNLOCK(p);
230 ksegrp_free(newkg);
231 goto retry;
232 }
233 }
234 }
235
236 td->td_proc->p_flag |= P_HADTHREADS;
237 newtd->td_sigmask = td->td_sigmask;
238 mtx_lock_spin(&sched_lock);
239 if (linkkg)
240 ksegrp_link(newkg, p);
241 thread_link(newtd, newkg);
242 PROC_UNLOCK(p);
243
244 /* let the scheduler know about these things. */
245 if (linkkg)
246 sched_fork_ksegrp(td, newkg);
247 sched_fork_thread(td, newtd);
248 TD_SET_CAN_RUN(newtd);
249 /* if ((flags & THR_SUSPENDED) == 0) */
250 setrunqueue(newtd, SRQ_BORING);
251 mtx_unlock_spin(&sched_lock);
252
253 return (error);
254 }
255
256 int
257 thr_self(struct thread *td, struct thr_self_args *uap)
258 /* long *id */
259 {
260 long id;
261 int error;
262
263 id = td->td_tid;
264 if ((error = copyout(&id, uap->id, sizeof(long))))
265 return (error);
266
267 return (0);
268 }
269
270 int
271 thr_exit(struct thread *td, struct thr_exit_args *uap)
272 /* long *state */
273 {
274 struct proc *p;
275
276 p = td->td_proc;
277
278 /* Signal userland that it can free the stack. */
279 if ((void *)uap->state != NULL) {
280 suword((void *)uap->state, 1);
281 kern_umtx_wake(td, uap->state, INT_MAX);
282 }
283
284 PROC_LOCK(p);
285 mtx_lock_spin(&sched_lock);
286
287 /*
288 * Shutting down last thread in the proc. This will actually
289 * call exit() in the trampoline when it returns.
290 */
291 if (p->p_numthreads != 1) {
292 thread_stopped(p);
293 thread_exit();
294 /* NOTREACHED */
295 }
296 mtx_unlock_spin(&sched_lock);
297 PROC_UNLOCK(p);
298 return (0);
299 }
300
301 int
302 thr_kill(struct thread *td, struct thr_kill_args *uap)
303 /* long id, int sig */
304 {
305 struct thread *ttd;
306 struct proc *p;
307 int error;
308
309 p = td->td_proc;
310 error = 0;
311 PROC_LOCK(p);
312 if (uap->id == -1) {
313 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
314 error = EINVAL;
315 } else {
316 error = ESRCH;
317 FOREACH_THREAD_IN_PROC(p, ttd) {
318 if (ttd != td) {
319 error = 0;
320 if (uap->sig == 0)
321 break;
322 tdsignal(ttd, uap->sig, SIGTARGET_TD);
323 }
324 }
325 }
326 } else {
327 if (uap->id != td->td_tid) {
328 FOREACH_THREAD_IN_PROC(p, ttd) {
329 if (ttd->td_tid == uap->id)
330 break;
331 }
332 } else
333 ttd = td;
334 if (ttd == NULL)
335 error = ESRCH;
336 else if (uap->sig == 0)
337 ;
338 else if (!_SIG_VALID(uap->sig))
339 error = EINVAL;
340 else
341 tdsignal(ttd, uap->sig, SIGTARGET_TD);
342 }
343 PROC_UNLOCK(p);
344 return (error);
345 }
346
347 int
348 thr_suspend(struct thread *td, struct thr_suspend_args *uap)
349 /* const struct timespec *timeout */
350 {
351 struct timespec ts;
352 struct timeval tv;
353 int error;
354 int hz;
355
356 hz = 0;
357 error = 0;
358 if (uap->timeout != NULL) {
359 error = copyin((const void *)uap->timeout, (void *)&ts,
360 sizeof(struct timespec));
361 if (error != 0)
362 return (error);
363 if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000)
364 return (EINVAL);
365 if (ts.tv_sec == 0 && ts.tv_nsec == 0)
366 return (ETIMEDOUT);
367 TIMESPEC_TO_TIMEVAL(&tv, &ts);
368 hz = tvtohz(&tv);
369 }
370 PROC_LOCK(td->td_proc);
371 if ((td->td_flags & TDF_THRWAKEUP) == 0)
372 error = msleep((void *)td, &td->td_proc->p_mtx,
373 td->td_priority | PCATCH, "lthr", hz);
374 if (td->td_flags & TDF_THRWAKEUP) {
375 mtx_lock_spin(&sched_lock);
376 td->td_flags &= ~TDF_THRWAKEUP;
377 mtx_unlock_spin(&sched_lock);
378 PROC_UNLOCK(td->td_proc);
379 return (0);
380 }
381 PROC_UNLOCK(td->td_proc);
382 if (error == EWOULDBLOCK)
383 error = ETIMEDOUT;
384 else if (error == ERESTART) {
385 if (hz != 0)
386 error = EINTR;
387 }
388 return (error);
389 }
390
391 int
392 thr_wake(struct thread *td, struct thr_wake_args *uap)
393 /* long id */
394 {
395 struct thread *ttd;
396
397 PROC_LOCK(td->td_proc);
398 FOREACH_THREAD_IN_PROC(td->td_proc, ttd) {
399 if (ttd->td_tid == uap->id)
400 break;
401 }
402 if (ttd == NULL) {
403 PROC_UNLOCK(td->td_proc);
404 return (ESRCH);
405 }
406 mtx_lock_spin(&sched_lock);
407 ttd->td_flags |= TDF_THRWAKEUP;
408 mtx_unlock_spin(&sched_lock);
409 wakeup((void *)ttd);
410 PROC_UNLOCK(td->td_proc);
411 return (0);
412 }
Cache object: e482055902d06525745ebca9b504977c
|