FreeBSD/Linux Kernel Cross Reference
sys/kern/sys_lwp.c
1 /* $NetBSD: sys_lwp.c,v 1.43.4.1 2010/11/21 17:36:45 riz Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Lightweight process (LWP) system calls. See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: sys_lwp.c,v 1.43.4.1 2010/11/21 17:36:45 riz Exp $");
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/pool.h>
43 #include <sys/proc.h>
44 #include <sys/types.h>
45 #include <sys/syscallargs.h>
46 #include <sys/kauth.h>
47 #include <sys/kmem.h>
48 #include <sys/sleepq.h>
49 #include <sys/lwpctl.h>
50
51 #include <uvm/uvm_extern.h>
52
53 #include "opt_sa.h"
54
55 #define LWP_UNPARK_MAX 1024
56
57 syncobj_t lwp_park_sobj = {
58 SOBJ_SLEEPQ_LIFO,
59 sleepq_unsleep,
60 sleepq_changepri,
61 sleepq_lendpri,
62 syncobj_noowner,
63 };
64
65 sleeptab_t lwp_park_tab;
66
67 void
68 lwp_sys_init(void)
69 {
70 sleeptab_init(&lwp_park_tab);
71 }
72
73 /* ARGSUSED */
74 int
75 sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval)
76 {
77 /* {
78 syscallarg(const ucontext_t *) ucp;
79 syscallarg(u_long) flags;
80 syscallarg(lwpid_t *) new_lwp;
81 } */
82 struct proc *p = l->l_proc;
83 struct lwp *l2;
84 struct schedstate_percpu *spc;
85 vaddr_t uaddr;
86 bool inmem;
87 ucontext_t *newuc;
88 int error, lid;
89
90 #ifdef KERN_SA
91 mutex_enter(p->p_lock);
92 if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
93 mutex_exit(p->p_lock);
94 return EINVAL;
95 }
96 mutex_exit(p->p_lock);
97 #endif
98
99 newuc = pool_get(&lwp_uc_pool, PR_WAITOK);
100
101 error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
102 if (error) {
103 pool_put(&lwp_uc_pool, newuc);
104 return error;
105 }
106
107 /* XXX check against resource limits */
108
109 inmem = uvm_uarea_alloc(&uaddr);
110 if (__predict_false(uaddr == 0)) {
111 pool_put(&lwp_uc_pool, newuc);
112 return ENOMEM;
113 }
114
115 error = lwp_create(l, p, uaddr, inmem, SCARG(uap, flags) & LWP_DETACHED,
116 NULL, 0, p->p_emul->e_startlwp, newuc, &l2, l->l_class);
117 if (error) {
118 uvm_uarea_free(uaddr, curcpu());
119 pool_put(&lwp_uc_pool, newuc);
120 return error;
121 }
122
123 lid = l2->l_lid;
124 error = copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
125 if (error) {
126 lwp_exit(l2);
127 pool_put(&lwp_uc_pool, newuc);
128 return error;
129 }
130
131 /*
132 * Set the new LWP running, unless the caller has requested that
133 * it be created in suspended state. If the process is stopping,
134 * then the LWP is created stopped.
135 */
136 mutex_enter(p->p_lock);
137 lwp_lock(l2);
138 spc = &l2->l_cpu->ci_schedstate;
139 if ((SCARG(uap, flags) & LWP_SUSPENDED) == 0 &&
140 (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
141 if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
142 KASSERT(l2->l_wchan == NULL);
143 l2->l_stat = LSSTOP;
144 lwp_unlock_to(l2, spc->spc_lwplock);
145 } else {
146 KASSERT(lwp_locked(l2, spc->spc_mutex));
147 p->p_nrlwps++;
148 l2->l_stat = LSRUN;
149 sched_enqueue(l2, false);
150 lwp_unlock(l2);
151 }
152 } else {
153 l2->l_stat = LSSUSPENDED;
154 lwp_unlock_to(l2, spc->spc_lwplock);
155 }
156 mutex_exit(p->p_lock);
157
158 return 0;
159 }
160
161 int
162 sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
163 {
164
165 lwp_exit(l);
166 return 0;
167 }
168
169 int
170 sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
171 {
172
173 *retval = l->l_lid;
174 return 0;
175 }
176
177 int
178 sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
179 {
180
181 *retval = (uintptr_t)l->l_private;
182 return 0;
183 }
184
185 int
186 sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap, register_t *retval)
187 {
188 /* {
189 syscallarg(void *) ptr;
190 } */
191
192 l->l_private = SCARG(uap, ptr);
193 return 0;
194 }
195
196 int
197 sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap, register_t *retval)
198 {
199 /* {
200 syscallarg(lwpid_t) target;
201 } */
202 struct proc *p = l->l_proc;
203 struct lwp *t;
204 int error;
205
206 mutex_enter(p->p_lock);
207
208 #ifdef KERN_SA
209 if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) {
210 mutex_exit(p->p_lock);
211 return EINVAL;
212 }
213 #endif
214
215 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
216 mutex_exit(p->p_lock);
217 return ESRCH;
218 }
219
220 /*
221 * Check for deadlock, which is only possible when we're suspending
222 * ourself. XXX There is a short race here, as p_nrlwps is only
223 * incremented when an LWP suspends itself on the kernel/user
224 * boundary. It's still possible to kill -9 the process so we
225 * don't bother checking further.
226 */
227 lwp_lock(t);
228 if ((t == l && p->p_nrlwps == 1) ||
229 (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
230 lwp_unlock(t);
231 mutex_exit(p->p_lock);
232 return EDEADLK;
233 }
234
235 /*
236 * Suspend the LWP. XXX If it's on a different CPU, we should wait
237 * for it to be preempted, where it will put itself to sleep.
238 *
239 * Suspension of the current LWP will happen on return to userspace.
240 */
241 error = lwp_suspend(l, t);
242 if (error) {
243 mutex_exit(p->p_lock);
244 return error;
245 }
246
247 /*
248 * Wait for:
249 * o process exiting
250 * o target LWP suspended
251 * o target LWP not suspended and L_WSUSPEND clear
252 * o target LWP exited
253 */
254 for (;;) {
255 error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
256 if (error) {
257 error = ERESTART;
258 break;
259 }
260 if (lwp_find(p, SCARG(uap, target)) == NULL) {
261 error = ESRCH;
262 break;
263 }
264 if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
265 error = ERESTART;
266 break;
267 }
268 if (t->l_stat == LSSUSPENDED ||
269 (t->l_flag & LW_WSUSPEND) == 0)
270 break;
271 }
272 mutex_exit(p->p_lock);
273
274 return error;
275 }
276
277 int
278 sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap, register_t *retval)
279 {
280 /* {
281 syscallarg(lwpid_t) target;
282 } */
283 int error;
284 struct proc *p = l->l_proc;
285 struct lwp *t;
286
287 error = 0;
288
289 mutex_enter(p->p_lock);
290 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
291 mutex_exit(p->p_lock);
292 return ESRCH;
293 }
294
295 lwp_lock(t);
296 lwp_continue(t);
297 mutex_exit(p->p_lock);
298
299 return error;
300 }
301
302 int
303 sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap, register_t *retval)
304 {
305 /* {
306 syscallarg(lwpid_t) target;
307 } */
308 struct lwp *t;
309 struct proc *p;
310 int error;
311
312 p = l->l_proc;
313 mutex_enter(p->p_lock);
314
315 if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
316 mutex_exit(p->p_lock);
317 return ESRCH;
318 }
319
320 lwp_lock(t);
321 t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
322
323 if (t->l_stat != LSSLEEP) {
324 lwp_unlock(t);
325 error = ENODEV;
326 } else if ((t->l_flag & LW_SINTR) == 0) {
327 lwp_unlock(t);
328 error = EBUSY;
329 } else {
330 /* Wake it up. lwp_unsleep() will release the LWP lock. */
331 (void)lwp_unsleep(t, true);
332 error = 0;
333 }
334
335 mutex_exit(p->p_lock);
336
337 return error;
338 }
339
340 int
341 sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap, register_t *retval)
342 {
343 /* {
344 syscallarg(lwpid_t) wait_for;
345 syscallarg(lwpid_t *) departed;
346 } */
347 struct proc *p = l->l_proc;
348 int error;
349 lwpid_t dep;
350
351 mutex_enter(p->p_lock);
352 error = lwp_wait1(l, SCARG(uap, wait_for), &dep, 0);
353 mutex_exit(p->p_lock);
354
355 if (error)
356 return error;
357
358 if (SCARG(uap, departed)) {
359 error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
360 if (error)
361 return error;
362 }
363
364 return 0;
365 }
366
367 /* ARGSUSED */
368 int
369 sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap, register_t *retval)
370 {
371 /* {
372 syscallarg(lwpid_t) target;
373 syscallarg(int) signo;
374 } */
375 struct proc *p = l->l_proc;
376 struct lwp *t;
377 ksiginfo_t ksi;
378 int signo = SCARG(uap, signo);
379 int error = 0;
380
381 if ((u_int)signo >= NSIG)
382 return EINVAL;
383
384 KSI_INIT(&ksi);
385 ksi.ksi_signo = signo;
386 ksi.ksi_code = SI_LWP;
387 ksi.ksi_pid = p->p_pid;
388 ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
389 ksi.ksi_lid = SCARG(uap, target);
390
391 mutex_enter(proc_lock);
392 mutex_enter(p->p_lock);
393 if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
394 error = ESRCH;
395 else if (signo != 0)
396 kpsignal2(p, &ksi);
397 mutex_exit(p->p_lock);
398 mutex_exit(proc_lock);
399
400 return error;
401 }
402
403 int
404 sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap, register_t *retval)
405 {
406 /* {
407 syscallarg(lwpid_t) target;
408 } */
409 struct proc *p;
410 struct lwp *t;
411 lwpid_t target;
412 int error;
413
414 target = SCARG(uap, target);
415 p = l->l_proc;
416
417 mutex_enter(p->p_lock);
418
419 if (l->l_lid == target)
420 t = l;
421 else {
422 /*
423 * We can't use lwp_find() here because the target might
424 * be a zombie.
425 */
426 LIST_FOREACH(t, &p->p_lwps, l_sibling)
427 if (t->l_lid == target)
428 break;
429 }
430
431 /*
432 * If the LWP is already detached, there's nothing to do.
433 * If it's a zombie, we need to clean up after it. LSZOMB
434 * is visible with the proc mutex held.
435 *
436 * After we have detached or released the LWP, kick any
437 * other LWPs that may be sitting in _lwp_wait(), waiting
438 * for the target LWP to exit.
439 */
440 if (t != NULL && t->l_stat != LSIDL) {
441 if ((t->l_prflag & LPR_DETACHED) == 0) {
442 p->p_ndlwps++;
443 t->l_prflag |= LPR_DETACHED;
444 if (t->l_stat == LSZOMB) {
445 /* Releases proc mutex. */
446 lwp_free(t, false, false);
447 return 0;
448 }
449 error = 0;
450
451 /*
452 * Have any LWPs sleeping in lwp_wait() recheck
453 * for deadlock.
454 */
455 cv_broadcast(&p->p_lwpcv);
456 } else
457 error = EINVAL;
458 } else
459 error = ESRCH;
460
461 mutex_exit(p->p_lock);
462
463 return error;
464 }
465
466 static inline wchan_t
467 lwp_park_wchan(struct proc *p, const void *hint)
468 {
469
470 return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
471 }
472
473 int
474 lwp_unpark(lwpid_t target, const void *hint)
475 {
476 sleepq_t *sq;
477 wchan_t wchan;
478 int swapin;
479 kmutex_t *mp;
480 proc_t *p;
481 lwp_t *t;
482
483 /*
484 * Easy case: search for the LWP on the sleep queue. If
485 * it's parked, remove it from the queue and set running.
486 */
487 p = curproc;
488 wchan = lwp_park_wchan(p, hint);
489 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
490
491 TAILQ_FOREACH(t, sq, l_sleepchain)
492 if (t->l_proc == p && t->l_lid == target)
493 break;
494
495 if (__predict_true(t != NULL)) {
496 swapin = sleepq_remove(sq, t);
497 mutex_spin_exit(mp);
498 if (swapin)
499 uvm_kick_scheduler();
500 return 0;
501 }
502
503 /*
504 * The LWP hasn't parked yet. Take the hit and mark the
505 * operation as pending.
506 */
507 mutex_spin_exit(mp);
508
509 mutex_enter(p->p_lock);
510 if ((t = lwp_find(p, target)) == NULL) {
511 mutex_exit(p->p_lock);
512 return ESRCH;
513 }
514
515 /*
516 * It may not have parked yet, we may have raced, or it
517 * is parked on a different user sync object.
518 */
519 lwp_lock(t);
520 if (t->l_syncobj == &lwp_park_sobj) {
521 /* Releases the LWP lock. */
522 (void)lwp_unsleep(t, true);
523 } else {
524 /*
525 * Set the operation pending. The next call to _lwp_park
526 * will return early.
527 */
528 t->l_flag |= LW_UNPARKED;
529 lwp_unlock(t);
530 }
531
532 mutex_exit(p->p_lock);
533 return 0;
534 }
535
536 int
537 lwp_park(struct timespec *ts, const void *hint)
538 {
539 struct timespec tsx;
540 sleepq_t *sq;
541 kmutex_t *mp;
542 wchan_t wchan;
543 int timo, error;
544 lwp_t *l;
545
546 /* Fix up the given timeout value. */
547 if (ts != NULL) {
548 getnanotime(&tsx);
549 timespecsub(ts, &tsx, &tsx);
550 if (tsx.tv_sec < 0 || (tsx.tv_sec == 0 && tsx.tv_nsec <= 0))
551 return ETIMEDOUT;
552 if ((error = itimespecfix(&tsx)) != 0)
553 return error;
554 timo = tstohz(&tsx);
555 KASSERT(timo != 0);
556 } else
557 timo = 0;
558
559 /* Find and lock the sleep queue. */
560 l = curlwp;
561 wchan = lwp_park_wchan(l->l_proc, hint);
562 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
563
564 /*
565 * Before going the full route and blocking, check to see if an
566 * unpark op is pending.
567 */
568 lwp_lock(l);
569 if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
570 l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
571 lwp_unlock(l);
572 mutex_spin_exit(mp);
573 return EALREADY;
574 }
575 lwp_unlock_to(l, mp);
576 l->l_biglocks = 0;
577 sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
578 error = sleepq_block(timo, true);
579 switch (error) {
580 case EWOULDBLOCK:
581 error = ETIMEDOUT;
582 break;
583 case ERESTART:
584 error = EINTR;
585 break;
586 default:
587 /* nothing */
588 break;
589 }
590 return error;
591 }
592
593 /*
594 * 'park' an LWP waiting on a user-level synchronisation object. The LWP
595 * will remain parked until another LWP in the same process calls in and
596 * requests that it be unparked.
597 */
598 int
599 sys__lwp_park(struct lwp *l, const struct sys__lwp_park_args *uap, register_t *retval)
600 {
601 /* {
602 syscallarg(const struct timespec *) ts;
603 syscallarg(lwpid_t) unpark;
604 syscallarg(const void *) hint;
605 syscallarg(const void *) unparkhint;
606 } */
607 struct timespec ts, *tsp;
608 int error;
609
610 if (SCARG(uap, ts) == NULL)
611 tsp = NULL;
612 else {
613 error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
614 if (error != 0)
615 return error;
616 tsp = &ts;
617 }
618
619 if (SCARG(uap, unpark) != 0) {
620 error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
621 if (error != 0)
622 return error;
623 }
624
625 return lwp_park(tsp, SCARG(uap, hint));
626 }
627
628 int
629 sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap, register_t *retval)
630 {
631 /* {
632 syscallarg(lwpid_t) target;
633 syscallarg(const void *) hint;
634 } */
635
636 return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
637 }
638
639 int
640 sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap, register_t *retval)
641 {
642 /* {
643 syscallarg(const lwpid_t *) targets;
644 syscallarg(size_t) ntargets;
645 syscallarg(const void *) hint;
646 } */
647 struct proc *p;
648 struct lwp *t;
649 sleepq_t *sq;
650 wchan_t wchan;
651 lwpid_t targets[32], *tp, *tpp, *tmax, target;
652 int swapin, error;
653 kmutex_t *mp;
654 u_int ntargets;
655 size_t sz;
656
657 p = l->l_proc;
658 ntargets = SCARG(uap, ntargets);
659
660 if (SCARG(uap, targets) == NULL) {
661 /*
662 * Let the caller know how much we are willing to do, and
663 * let it unpark the LWPs in blocks.
664 */
665 *retval = LWP_UNPARK_MAX;
666 return 0;
667 }
668 if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
669 return EINVAL;
670
671 /*
672 * Copy in the target array. If it's a small number of LWPs, then
673 * place the numbers on the stack.
674 */
675 sz = sizeof(target) * ntargets;
676 if (sz <= sizeof(targets))
677 tp = targets;
678 else {
679 tp = kmem_alloc(sz, KM_SLEEP);
680 if (tp == NULL)
681 return ENOMEM;
682 }
683 error = copyin(SCARG(uap, targets), tp, sz);
684 if (error != 0) {
685 if (tp != targets) {
686 kmem_free(tp, sz);
687 }
688 return error;
689 }
690
691 swapin = 0;
692 wchan = lwp_park_wchan(p, SCARG(uap, hint));
693 sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
694
695 for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
696 target = *tpp;
697
698 /*
699 * Easy case: search for the LWP on the sleep queue. If
700 * it's parked, remove it from the queue and set running.
701 */
702 TAILQ_FOREACH(t, sq, l_sleepchain)
703 if (t->l_proc == p && t->l_lid == target)
704 break;
705
706 if (t != NULL) {
707 swapin |= sleepq_remove(sq, t);
708 continue;
709 }
710
711 /*
712 * The LWP hasn't parked yet. Take the hit and
713 * mark the operation as pending.
714 */
715 mutex_spin_exit(mp);
716 mutex_enter(p->p_lock);
717 if ((t = lwp_find(p, target)) == NULL) {
718 mutex_exit(p->p_lock);
719 mutex_spin_enter(mp);
720 continue;
721 }
722 lwp_lock(t);
723
724 /*
725 * It may not have parked yet, we may have raced, or
726 * it is parked on a different user sync object.
727 */
728 if (t->l_syncobj == &lwp_park_sobj) {
729 /* Releases the LWP lock. */
730 (void)lwp_unsleep(t, true);
731 } else {
732 /*
733 * Set the operation pending. The next call to
734 * _lwp_park will return early.
735 */
736 t->l_flag |= LW_UNPARKED;
737 lwp_unlock(t);
738 }
739
740 mutex_exit(p->p_lock);
741 mutex_spin_enter(mp);
742 }
743
744 mutex_spin_exit(mp);
745 if (tp != targets)
746 kmem_free(tp, sz);
747 if (swapin)
748 uvm_kick_scheduler();
749
750 return 0;
751 }
752
753 int
754 sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap, register_t *retval)
755 {
756 /* {
757 syscallarg(lwpid_t) target;
758 syscallarg(const char *) name;
759 } */
760 char *name, *oname;
761 lwpid_t target;
762 proc_t *p;
763 lwp_t *t;
764 int error;
765
766 if ((target = SCARG(uap, target)) == 0)
767 target = l->l_lid;
768
769 name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
770 if (name == NULL)
771 return ENOMEM;
772 error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
773 switch (error) {
774 case ENAMETOOLONG:
775 case 0:
776 name[MAXCOMLEN - 1] = '\0';
777 break;
778 default:
779 kmem_free(name, MAXCOMLEN);
780 return error;
781 }
782
783 p = curproc;
784 mutex_enter(p->p_lock);
785 if ((t = lwp_find(p, target)) == NULL) {
786 mutex_exit(p->p_lock);
787 kmem_free(name, MAXCOMLEN);
788 return ESRCH;
789 }
790 lwp_lock(t);
791 oname = t->l_name;
792 t->l_name = name;
793 lwp_unlock(t);
794 mutex_exit(p->p_lock);
795
796 if (oname != NULL)
797 kmem_free(oname, MAXCOMLEN);
798
799 return 0;
800 }
801
802 int
803 sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap, register_t *retval)
804 {
805 /* {
806 syscallarg(lwpid_t) target;
807 syscallarg(char *) name;
808 syscallarg(size_t) len;
809 } */
810 char name[MAXCOMLEN];
811 lwpid_t target;
812 proc_t *p;
813 lwp_t *t;
814
815 if ((target = SCARG(uap, target)) == 0)
816 target = l->l_lid;
817
818 p = curproc;
819 mutex_enter(p->p_lock);
820 if ((t = lwp_find(p, target)) == NULL) {
821 mutex_exit(p->p_lock);
822 return ESRCH;
823 }
824 lwp_lock(t);
825 if (t->l_name == NULL)
826 name[0] = '\0';
827 else
828 strcpy(name, t->l_name);
829 lwp_unlock(t);
830 mutex_exit(p->p_lock);
831
832 return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
833 }
834
835 int
836 sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap, register_t *retval)
837 {
838 /* {
839 syscallarg(int) features;
840 syscallarg(struct lwpctl **) address;
841 } */
842 int error, features;
843 vaddr_t vaddr;
844
845 features = SCARG(uap, features);
846 features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
847 if (features != 0)
848 return ENODEV;
849 if ((error = lwp_ctl_alloc(&vaddr)) != 0)
850 return error;
851 return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
852 }
Cache object: 2b26e3855f3706dadbb3d69ca6d34126
|