1 /* $NetBSD: mach_thread.c,v 1.48 2008/10/15 06:51:19 wrstuden Exp $ */
2
3 /*-
4 * Copyright (c) 2002-2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Emmanuel Dreyfus
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mach_thread.c,v 1.48 2008/10/15 06:51:19 wrstuden Exp $");
34
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/systm.h>
39 #include <sys/signal.h>
40 #include <sys/rwlock.h>
41 #include <sys/queue.h>
42 #include <sys/proc.h>
43 #include <sys/resource.h>
44 #include <sys/resourcevar.h>
45 #include <sys/sa.h>
46 #include <sys/savar.h>
47
48 #include <compat/mach/mach_types.h>
49 #include <compat/mach/mach_message.h>
50 #include <compat/mach/mach_exec.h>
51 #include <compat/mach/mach_clock.h>
52 #include <compat/mach/mach_port.h>
53 #include <compat/mach/mach_task.h>
54 #include <compat/mach/mach_thread.h>
55 #include <compat/mach/mach_errno.h>
56 #include <compat/mach/mach_services.h>
57 #include <compat/mach/mach_syscallargs.h>
58
59 int
60 mach_sys_syscall_thread_switch(struct lwp *l, const struct mach_sys_syscall_thread_switch_args *uap, register_t *retval)
61 {
62 /* {
63 syscallarg(mach_port_name_t) thread_name;
64 syscallarg(int) option;
65 syscallarg(mach_msg_timeout_t) option_time;
66 } */
67 int timeout;
68 struct mach_emuldata *med;
69
70 med = (struct mach_emuldata *)l->l_proc->p_emuldata;
71 timeout = SCARG(uap, option_time) * hz / 1000;
72
73 /*
74 * The day we will be able to find out the struct proc from
75 * the port number, try to use preempt() to call the right thread.
76 * [- but preempt() is for _involuntary_ context switches.]
77 */
78 switch(SCARG(uap, option)) {
79 case MACH_SWITCH_OPTION_NONE:
80 yield();
81 break;
82
83 case MACH_SWITCH_OPTION_WAIT:
84 med->med_thpri = 1;
85 while (med->med_thpri != 0)
86 (void)tsleep(&med->med_thpri, PZERO|PCATCH,
87 "thread_switch", timeout);
88 break;
89
90 case MACH_SWITCH_OPTION_DEPRESS:
91 case MACH_SWITCH_OPTION_IDLE:
92 /* Use a callout to restore the priority after depression? */
93 med->med_thpri = l->l_priority;
94 l->l_priority = MAXPRI;
95 break;
96
97 default:
98 uprintf("mach_sys_syscall_thread_switch(): unknown option %d\n", SCARG(uap, option));
99 break;
100 }
101 return 0;
102 }
103
104 int
105 mach_sys_swtch_pri(struct lwp *l, const struct mach_sys_swtch_pri_args *uap, register_t *retval)
106 {
107 /* {
108 syscallarg(int) pri;
109 } */
110
111 /*
112 * Copied from preempt(9). We cannot just call preempt
113 * because we want to return mi_switch(9) return value.
114 */
115 KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
116 lwp_lock(l);
117 if (l->l_stat == LSONPROC)
118 l->l_proc->p_stats->p_ru.ru_nivcsw++; /* XXXSMP */
119 *retval = mi_switch(l);
120 KERNEL_LOCK(l->l_biglocks, l);
121
122 return 0;
123 }
124
125 int
126 mach_sys_swtch(struct lwp *l, const void *v, register_t *retval)
127 {
128 struct mach_sys_swtch_pri_args cup;
129
130 SCARG(&cup, pri) = 0;
131
132 return mach_sys_swtch_pri(l, &cup, retval);
133 }
134
135
136 int
137 mach_thread_policy(struct mach_trap_args *args)
138 {
139 mach_thread_policy_request_t *req = args->smsg;
140 mach_thread_policy_reply_t *rep = args->rmsg;
141 size_t *msglen = args->rsize;
142 int end_offset;
143
144 /* Sanity check req_count */
145 end_offset = req->req_count +
146 (sizeof(req->req_setlimit) / sizeof(req->req_base[0]));
147 if (MACH_REQMSG_OVERFLOW(args, req->req_base[end_offset]))
148 return mach_msg_error(args, EINVAL);
149
150 uprintf("Unimplemented mach_thread_policy\n");
151
152 *msglen = sizeof(*rep);
153 mach_set_header(rep, req, *msglen);
154
155 rep->rep_retval = 0;
156
157 mach_set_trailer(rep, *msglen);
158
159 return 0;
160 }
161
162 /* XXX it might be possible to use this on another task */
163 int
164 mach_thread_create_running(struct mach_trap_args *args)
165 {
166 mach_thread_create_running_request_t *req = args->smsg;
167 mach_thread_create_running_reply_t *rep = args->rmsg;
168 size_t *msglen = args->rsize;
169 struct lwp *l = args->l;
170 struct proc *p = l->l_proc;
171 struct mach_create_thread_child_args mctc;
172 struct mach_right *child_mr;
173 struct mach_lwp_emuldata *mle;
174 vaddr_t uaddr;
175 int flags;
176 int error;
177 int inmem;
178 int end_offset;
179
180 /* Sanity check req_count */
181 end_offset = req->req_count;
182 if (MACH_REQMSG_OVERFLOW(args, req->req_state[end_offset]))
183 return mach_msg_error(args, EINVAL);
184
185 /*
186 * Prepare the data we want to transmit to the child.
187 */
188 mctc.mctc_flavor = req->req_flavor;
189 mctc.mctc_oldlwp = l;
190 mctc.mctc_child_done = 0;
191 mctc.mctc_state = req->req_state;
192
193 inmem = uvm_uarea_alloc(&uaddr);
194 if (__predict_false(uaddr == 0))
195 return (ENOMEM);
196
197 flags = 0;
198 if ((error = lwp_create(l, p, uaddr, inmem, flags, NULL, 0,
199 mach_create_thread_child, (void *)&mctc, &mctc.mctc_lwp,
200 SCHED_OTHER)) != 0)
201 {
202 uvm_uarea_free(uaddr, curcpu());
203 return mach_msg_error(args, error);
204 }
205
206 /*
207 * Make the child runnable.
208 */
209 mutex_enter(p->p_lock);
210 lwp_lock(mctc.mctc_lwp);
211 mctc.mctc_lwp->l_private = 0;
212 mctc.mctc_lwp->l_stat = LSRUN;
213 sched_enqueue(mctc.mctc_lwp, false);
214 p->p_nrlwps++;
215 lwp_unlock(mctc.mctc_lwp);
216 mutex_exit(p->p_lock);
217
218 /*
219 * Get the child's kernel port
220 */
221 mle = mctc.mctc_lwp->l_emuldata;
222 child_mr = mach_right_get(mle->mle_kernel, l, MACH_PORT_TYPE_SEND, 0);
223
224 /*
225 * The child relies on some values in mctc, so we should not
226 * exit until it is finished with it. We catch signals so that
227 * the process can be killed with kill -9, but we loop to avoid
228 * spurious wakeups due to other signals.
229 */
230 while(mctc.mctc_child_done == 0)
231 (void)tsleep(&mctc.mctc_child_done,
232 PZERO|PCATCH, "mach_thread", 0);
233
234 *msglen = sizeof(*rep);
235 mach_set_header(rep, req, *msglen);
236 mach_add_port_desc(rep, child_mr->mr_name);
237 mach_set_trailer(rep, *msglen);
238
239 return 0;
240 }
241
242 int
243 mach_thread_info(struct mach_trap_args *args)
244 {
245 mach_thread_info_request_t *req = args->smsg;
246 mach_thread_info_reply_t *rep = args->rmsg;
247 size_t *msglen = args->rsize;
248 struct lwp *l = args->l;
249 struct lwp *tl = args->tl;
250 struct proc *tp = tl->l_proc;
251
252 /* Sanity check req->req_count */
253 if (req->req_count > 12)
254 return mach_msg_error(args, EINVAL);
255
256 rep->rep_count = req->req_count;
257
258 *msglen = sizeof(*rep) + ((req->req_count - 12) * sizeof(int));
259 mach_set_header(rep, req, *msglen);
260
261 switch (req->req_flavor) {
262 case MACH_THREAD_BASIC_INFO: {
263 struct mach_thread_basic_info *tbi;
264
265 if (req->req_count != (sizeof(*tbi) / sizeof(int))) /* 10 */
266 return mach_msg_error(args, EINVAL);
267
268 tbi = (struct mach_thread_basic_info *)rep->rep_out;
269 tbi->user_time.seconds = tp->p_uticks * hz / 1000000;
270 tbi->user_time.microseconds =
271 (tp->p_uticks) * hz - tbi->user_time.seconds;
272 tbi->system_time.seconds = tp->p_sticks * hz / 1000000;
273 tbi->system_time.microseconds =
274 (tp->p_sticks) * hz - tbi->system_time.seconds;
275 tbi->cpu_usage = tp->p_pctcpu;
276 tbi->policy = MACH_THREAD_STANDARD_POLICY;
277
278 /* XXX this is not very accurate */
279 tbi->run_state = MACH_TH_STATE_RUNNING;
280 tbi->flags = 0;
281 switch (l->l_stat) {
282 case LSRUN:
283 tbi->run_state = MACH_TH_STATE_RUNNING;
284 break;
285 case LSSTOP:
286 tbi->run_state = MACH_TH_STATE_STOPPED;
287 break;
288 case LSSLEEP:
289 tbi->run_state = MACH_TH_STATE_WAITING;
290 break;
291 case LSIDL:
292 tbi->run_state = MACH_TH_STATE_RUNNING;
293 tbi->flags = MACH_TH_FLAGS_IDLE;
294 break;
295 default:
296 break;
297 }
298
299 tbi->suspend_count = 0;
300 tbi->sleep_time = tl->l_slptime;
301 break;
302 }
303
304 case MACH_THREAD_SCHED_TIMESHARE_INFO: {
305 struct mach_policy_timeshare_info *pti;
306
307 if (req->req_count != (sizeof(*pti) / sizeof(int))) /* 5 */
308 return mach_msg_error(args, EINVAL);
309
310 pti = (struct mach_policy_timeshare_info *)rep->rep_out;
311
312 pti->max_priority = tl->l_priority;
313 pti->base_priority = tl->l_priority;
314 pti->cur_priority = tl->l_priority;
315 pti->depressed = 0;
316 pti->depress_priority = tl->l_priority;
317 break;
318 }
319
320 case MACH_THREAD_SCHED_RR_INFO:
321 case MACH_THREAD_SCHED_FIFO_INFO:
322 uprintf("Unimplemented thread_info flavor %d\n",
323 req->req_flavor);
324 default:
325 return mach_msg_error(args, EINVAL);
326 break;
327 }
328
329 mach_set_trailer(rep, *msglen);
330
331 return 0;
332 }
333
334 int
335 mach_thread_get_state(struct mach_trap_args *args)
336 {
337 mach_thread_get_state_request_t *req = args->smsg;
338 mach_thread_get_state_reply_t *rep = args->rmsg;
339 size_t *msglen = args->rsize;
340 struct lwp *tl = args->tl;
341 int error;
342 int size;
343
344 /* Sanity check req->req_count */
345 if (req->req_count > 144)
346 return mach_msg_error(args, EINVAL);
347
348 if ((error = mach_thread_get_state_machdep(tl,
349 req->req_flavor, &rep->rep_state, &size)) != 0)
350 return mach_msg_error(args, error);
351
352 rep->rep_count = size / sizeof(int);
353 *msglen = sizeof(*rep) + ((req->req_count - 144) * sizeof(int));
354 mach_set_header(rep, req, *msglen);
355 mach_set_trailer(rep, *msglen);
356
357 return 0;
358 }
359
360 int
361 mach_thread_set_state(struct mach_trap_args *args)
362 {
363 mach_thread_set_state_request_t *req = args->smsg;
364 mach_thread_set_state_reply_t *rep = args->rmsg;
365 size_t *msglen = args->rsize;
366 struct lwp *tl = args->tl;
367 int error;
368 int end_offset;
369
370 /* Sanity check req_count */
371 end_offset = req->req_count;
372 if (MACH_REQMSG_OVERFLOW(args, req->req_state[end_offset]))
373 return mach_msg_error(args, EINVAL);
374
375 if ((error = mach_thread_set_state_machdep(tl,
376 req->req_flavor, &req->req_state)) != 0)
377 return mach_msg_error(args, error);
378
379 *msglen = sizeof(*rep);
380 mach_set_header(rep, req, *msglen);
381
382 rep->rep_retval = 0;
383
384 mach_set_trailer(rep, *msglen);
385
386 return 0;
387 }
388
389 int
390 mach_thread_suspend(struct mach_trap_args *args)
391 {
392 mach_thread_suspend_request_t *req = args->smsg;
393 mach_thread_suspend_reply_t *rep = args->rmsg;
394 size_t *msglen = args->rsize;
395 struct lwp *l = args->l;
396 struct lwp *tl = args->tl;
397 struct proc *p = tl->l_proc;
398 int error;
399
400 mutex_enter(p->p_lock);
401 lwp_lock(tl);
402 error = lwp_suspend(l, tl);
403 mutex_exit(p->p_lock);
404
405 *msglen = sizeof(*rep);
406 mach_set_header(rep, req, *msglen);
407 rep->rep_retval = native_to_mach_errno[error];
408 mach_set_trailer(rep, *msglen);
409
410 return 0;
411 }
412
413 int
414 mach_thread_resume(struct mach_trap_args *args)
415 {
416 mach_thread_resume_request_t *req = args->smsg;
417 mach_thread_resume_reply_t *rep = args->rmsg;
418 size_t *msglen = args->rsize;
419 struct lwp *tl = args->tl;
420 struct proc *p = tl->l_proc;
421
422 mutex_enter(p->p_lock);
423 lwp_lock(tl);
424 lwp_continue(tl);
425 mutex_exit(p->p_lock);
426
427 *msglen = sizeof(*rep);
428 mach_set_header(rep, req, *msglen);
429 rep->rep_retval = 0;
430 mach_set_trailer(rep, *msglen);
431
432 return 0;
433 }
434
435 int
436 mach_thread_abort(struct mach_trap_args *args)
437 {
438 mach_thread_abort_request_t *req = args->smsg;
439 mach_thread_abort_reply_t *rep = args->rmsg;
440 size_t *msglen = args->rsize;
441 struct lwp *tl = args->tl;
442
443 lwp_exit(tl);
444
445 *msglen = sizeof(*rep);
446 mach_set_header(rep, req, *msglen);
447 rep->rep_retval = 0;
448 mach_set_trailer(rep, *msglen);
449
450 return 0;
451 }
452
453 int
454 mach_thread_set_policy(struct mach_trap_args *args)
455 {
456 mach_thread_set_policy_request_t *req = args->smsg;
457 mach_thread_set_policy_reply_t *rep = args->rmsg;
458 size_t *msglen = args->rsize;
459 struct lwp *tl = args->tl;
460 mach_port_t mn;
461 struct mach_right *mr;
462 int limit_count_offset, limit_offset;
463 int limit_count;
464 int *limit;
465
466 limit_count_offset = req->req_base_count;
467 if (MACH_REQMSG_OVERFLOW(args, req->req_base[limit_count_offset]))
468 return mach_msg_error(args, EINVAL);
469
470 limit_count = req->req_base[limit_count_offset];
471 limit_offset = limit_count_offset +
472 (sizeof(req->req_limit_count) / sizeof(req->req_base[0]));
473 limit = &req->req_base[limit_offset];
474 if (MACH_REQMSG_OVERFLOW(args, limit[limit_count]))
475 return mach_msg_error(args, EINVAL);
476
477 mn = req->req_pset.name;
478 if ((mr = mach_right_check(mn, tl, MACH_PORT_TYPE_ALL_RIGHTS)) == NULL)
479 return mach_msg_error(args, EINVAL);
480
481 *msglen = sizeof(*rep);
482 mach_set_header(rep, req, *msglen);
483 rep->rep_retval = 0;
484 mach_set_trailer(rep, *msglen);
485
486 return 0;
487 }
488
Cache object: 34e64101c251f73e3fb35f64e8ee19c4
|