FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_thr.c
1 /*-
2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include "opt_compat.h"
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/lock.h>
34 #include <sys/mutex.h>
35 #include <sys/proc.h>
36 #include <sys/resourcevar.h>
37 #include <sys/sched.h>
38 #include <sys/sysctl.h>
39 #include <sys/smp.h>
40 #include <sys/syscallsubr.h>
41 #include <sys/sysent.h>
42 #include <sys/systm.h>
43 #include <sys/sysproto.h>
44 #include <sys/signalvar.h>
45 #include <sys/ucontext.h>
46 #include <sys/thr.h>
47 #include <sys/umtx.h>
48
49 #include <machine/frame.h>
50
51 #ifdef COMPAT_IA32
52
53 extern struct sysentvec ia32_freebsd_sysvec;
54
55 static inline int
56 suword_lwpid(void *addr, lwpid_t lwpid)
57 {
58 int error;
59
60 if (curproc->p_sysent != &ia32_freebsd_sysvec)
61 error = suword(addr, lwpid);
62 else
63 error = suword32(addr, lwpid);
64 return (error);
65 }
66
67 #else
68 #define suword_lwpid suword
69 #endif
70
71 extern int max_threads_per_proc;
72 extern int max_groups_per_proc;
73
74 SYSCTL_DECL(_kern_threads);
75 static int thr_scope = 0;
76 SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope, CTLFLAG_RW,
77 &thr_scope, 0, "sys or proc scope scheduling");
78
79 static int thr_concurrency = 0;
80 SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW,
81 &thr_concurrency, 0, "a concurrency value if not default");
82
83 static int create_thread(struct thread *td, mcontext_t *ctx,
84 void (*start_func)(void *), void *arg,
85 char *stack_base, size_t stack_size,
86 char *tls_base,
87 long *child_tid, long *parent_tid,
88 int flags);
89
90 /*
91 * System call interface.
92 */
93 int
94 thr_create(struct thread *td, struct thr_create_args *uap)
95 /* ucontext_t *ctx, long *id, int flags */
96 {
97 ucontext_t ctx;
98 int error;
99
100 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx))))
101 return (error);
102
103 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL,
104 NULL, 0, NULL, uap->id, NULL, uap->flags);
105 return (error);
106 }
107
108 int
109 thr_new(struct thread *td, struct thr_new_args *uap)
110 /* struct thr_param * */
111 {
112 struct thr_param param;
113 int error;
114
115 if (uap->param_size < sizeof(param))
116 return (EINVAL);
117 if ((error = copyin(uap->param, ¶m, sizeof(param))))
118 return (error);
119 return (kern_thr_new(td, ¶m));
120 }
121
122 int
123 kern_thr_new(struct thread *td, struct thr_param *param)
124 {
125 int error;
126
127 error = create_thread(td, NULL, param->start_func, param->arg,
128 param->stack_base, param->stack_size, param->tls_base,
129 param->child_tid, param->parent_tid, param->flags);
130 return (error);
131 }
132
133 static int
134 create_thread(struct thread *td, mcontext_t *ctx,
135 void (*start_func)(void *), void *arg,
136 char *stack_base, size_t stack_size,
137 char *tls_base,
138 long *child_tid, long *parent_tid,
139 int flags)
140 {
141 stack_t stack;
142 struct thread *newtd;
143 struct ksegrp *kg, *newkg;
144 struct proc *p;
145 int error, scope_sys, linkkg;
146
147 error = 0;
148 p = td->td_proc;
149 kg = td->td_ksegrp;
150
151 /* Have race condition but it is cheap. */
152 if ((p->p_numksegrps >= max_groups_per_proc) ||
153 (p->p_numthreads >= max_threads_per_proc)) {
154 return (EPROCLIM);
155 }
156
157 /* Check PTHREAD_SCOPE_SYSTEM */
158 scope_sys = (flags & THR_SYSTEM_SCOPE) != 0;
159
160 /* sysctl overrides user's flag */
161 if (thr_scope == 1)
162 scope_sys = 0;
163 else if (thr_scope == 2)
164 scope_sys = 1;
165
166 /* Initialize our td and new ksegrp.. */
167 newtd = thread_alloc();
168
169 /*
170 * Try the copyout as soon as we allocate the td so we don't
171 * have to tear things down in a failure case below.
172 * Here we copy out tid to two places, one for child and one
173 * for parent, because pthread can create a detached thread,
174 * if parent wants to safely access child tid, it has to provide
175 * its storage, because child thread may exit quickly and
176 * memory is freed before parent thread can access it.
177 */
178 if ((child_tid != NULL &&
179 suword_lwpid(child_tid, newtd->td_tid)) ||
180 (parent_tid != NULL &&
181 suword_lwpid(parent_tid, newtd->td_tid))) {
182 thread_free(newtd);
183 return (error);
184 }
185 bzero(&newtd->td_startzero,
186 __rangeof(struct thread, td_startzero, td_endzero));
187 bcopy(&td->td_startcopy, &newtd->td_startcopy,
188 __rangeof(struct thread, td_startcopy, td_endcopy));
189 newtd->td_proc = td->td_proc;
190 newtd->td_ucred = crhold(td->td_ucred);
191
192 cpu_set_upcall(newtd, td);
193
194 if (ctx != NULL) { /* old way to set user context */
195 error = set_mcontext(newtd, ctx);
196 if (error != 0) {
197 thread_free(newtd);
198 crfree(td->td_ucred);
199 return (error);
200 }
201 } else {
202 /* Set up our machine context. */
203 stack.ss_sp = stack_base;
204 stack.ss_size = stack_size;
205 /* Set upcall address to user thread entry function. */
206 cpu_set_upcall_kse(newtd, start_func, arg, &stack);
207 /* Setup user TLS address and TLS pointer register. */
208 error = cpu_set_user_tls(newtd, tls_base);
209 if (error != 0) {
210 thread_free(newtd);
211 crfree(td->td_ucred);
212 return (error);
213 }
214 }
215
216 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
217 /* Treat initial thread as it has PTHREAD_SCOPE_PROCESS. */
218 p->p_procscopegrp = kg;
219 mtx_lock_spin(&sched_lock);
220 sched_set_concurrency(kg,
221 thr_concurrency ? thr_concurrency : (2*mp_ncpus));
222 mtx_unlock_spin(&sched_lock);
223 }
224
225 linkkg = 0;
226 if (scope_sys) {
227 linkkg = 1;
228 newkg = ksegrp_alloc();
229 bzero(&newkg->kg_startzero,
230 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
231 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
232 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
233 sched_init_concurrency(newkg);
234 PROC_LOCK(td->td_proc);
235 } else {
236 /*
237 * Try to create a KSE group which will be shared
238 * by all PTHREAD_SCOPE_PROCESS threads.
239 */
240 retry:
241 PROC_LOCK(td->td_proc);
242 if ((newkg = p->p_procscopegrp) == NULL) {
243 PROC_UNLOCK(p);
244 newkg = ksegrp_alloc();
245 bzero(&newkg->kg_startzero,
246 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
247 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
248 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
249 PROC_LOCK(p);
250 if (p->p_procscopegrp == NULL) {
251 p->p_procscopegrp = newkg;
252 sched_init_concurrency(newkg);
253 sched_set_concurrency(newkg,
254 thr_concurrency ? thr_concurrency : (2*mp_ncpus));
255 linkkg = 1;
256 } else {
257 PROC_UNLOCK(p);
258 ksegrp_free(newkg);
259 goto retry;
260 }
261 }
262 }
263
264 td->td_proc->p_flag |= P_HADTHREADS;
265 newtd->td_sigmask = td->td_sigmask;
266 mtx_lock_spin(&sched_lock);
267 if (linkkg)
268 ksegrp_link(newkg, p);
269 thread_link(newtd, newkg);
270 PROC_UNLOCK(p);
271
272 /* let the scheduler know about these things. */
273 if (linkkg)
274 sched_fork_ksegrp(td, newkg);
275 sched_fork_thread(td, newtd);
276 TD_SET_CAN_RUN(newtd);
277 /* if ((flags & THR_SUSPENDED) == 0) */
278 setrunqueue(newtd, SRQ_BORING);
279 mtx_unlock_spin(&sched_lock);
280
281 return (error);
282 }
283
284 int
285 thr_self(struct thread *td, struct thr_self_args *uap)
286 /* long *id */
287 {
288 int error;
289
290 error = suword_lwpid(uap->id, (unsigned)td->td_tid);
291 if (error == -1)
292 return (EFAULT);
293 return (0);
294 }
295
296 int
297 thr_exit(struct thread *td, struct thr_exit_args *uap)
298 /* long *state */
299 {
300 struct proc *p;
301
302 p = td->td_proc;
303
304 /* Signal userland that it can free the stack. */
305 if ((void *)uap->state != NULL) {
306 suword_lwpid(uap->state, 1);
307 kern_umtx_wake(td, uap->state, INT_MAX);
308 }
309
310 PROC_LOCK(p);
311 mtx_lock_spin(&sched_lock);
312
313 /*
314 * Shutting down last thread in the proc. This will actually
315 * call exit() in the trampoline when it returns.
316 */
317 if (p->p_numthreads != 1) {
318 thread_stopped(p);
319 thread_exit();
320 /* NOTREACHED */
321 }
322 mtx_unlock_spin(&sched_lock);
323 PROC_UNLOCK(p);
324 return (0);
325 }
326
327 int
328 thr_kill(struct thread *td, struct thr_kill_args *uap)
329 /* long id, int sig */
330 {
331 struct thread *ttd;
332 struct proc *p;
333 int error;
334
335 p = td->td_proc;
336 error = 0;
337 PROC_LOCK(p);
338 if (uap->id == -1) {
339 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
340 error = EINVAL;
341 } else {
342 error = ESRCH;
343 FOREACH_THREAD_IN_PROC(p, ttd) {
344 if (ttd != td) {
345 error = 0;
346 if (uap->sig == 0)
347 break;
348 tdsignal(ttd, uap->sig, SIGTARGET_TD);
349 }
350 }
351 }
352 } else {
353 if (uap->id != td->td_tid) {
354 FOREACH_THREAD_IN_PROC(p, ttd) {
355 if (ttd->td_tid == uap->id)
356 break;
357 }
358 } else
359 ttd = td;
360 if (ttd == NULL)
361 error = ESRCH;
362 else if (uap->sig == 0)
363 ;
364 else if (!_SIG_VALID(uap->sig))
365 error = EINVAL;
366 else
367 tdsignal(ttd, uap->sig, SIGTARGET_TD);
368 }
369 PROC_UNLOCK(p);
370 return (error);
371 }
372
373 int
374 thr_suspend(struct thread *td, struct thr_suspend_args *uap)
375 /* const struct timespec *timeout */
376 {
377 struct timespec ts, *tsp;
378 int error;
379
380 error = 0;
381 tsp = NULL;
382 if (uap->timeout != NULL) {
383 error = copyin((const void *)uap->timeout, (void *)&ts,
384 sizeof(struct timespec));
385 if (error != 0)
386 return (error);
387 tsp = &ts;
388 }
389
390 return (kern_thr_suspend(td, tsp));
391 }
392
393 int
394 kern_thr_suspend(struct thread *td, struct timespec *tsp)
395 {
396 struct timeval tv;
397 int error = 0, hz = 0;
398
399 if (tsp != NULL) {
400 if (tsp->tv_nsec < 0 || tsp->tv_nsec > 1000000000)
401 return (EINVAL);
402 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
403 return (ETIMEDOUT);
404 TIMESPEC_TO_TIMEVAL(&tv, tsp);
405 hz = tvtohz(&tv);
406 }
407 PROC_LOCK(td->td_proc);
408 if ((td->td_flags & TDF_THRWAKEUP) == 0)
409 error = msleep((void *)td, &td->td_proc->p_mtx, PCATCH, "lthr",
410 hz);
411 if (td->td_flags & TDF_THRWAKEUP) {
412 mtx_lock_spin(&sched_lock);
413 td->td_flags &= ~TDF_THRWAKEUP;
414 mtx_unlock_spin(&sched_lock);
415 PROC_UNLOCK(td->td_proc);
416 return (0);
417 }
418 PROC_UNLOCK(td->td_proc);
419 if (error == EWOULDBLOCK)
420 error = ETIMEDOUT;
421 else if (error == ERESTART) {
422 if (hz != 0)
423 error = EINTR;
424 }
425 return (error);
426 }
427
428 int
429 thr_wake(struct thread *td, struct thr_wake_args *uap)
430 /* long id */
431 {
432 struct thread *ttd;
433
434 PROC_LOCK(td->td_proc);
435 FOREACH_THREAD_IN_PROC(td->td_proc, ttd) {
436 if (ttd->td_tid == uap->id)
437 break;
438 }
439 if (ttd == NULL) {
440 PROC_UNLOCK(td->td_proc);
441 return (ESRCH);
442 }
443 mtx_lock_spin(&sched_lock);
444 ttd->td_flags |= TDF_THRWAKEUP;
445 mtx_unlock_spin(&sched_lock);
446 wakeup((void *)ttd);
447 PROC_UNLOCK(td->td_proc);
448 return (0);
449 }
Cache object: 1088dfa78d5484d7542402b71acdd71b
|