1 /*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: src/sys/kern/sys_process.c,v 1.127.2.5 2006/02/28 15:02:51 emaste Exp $");
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/lock.h>
38 #include <sys/mutex.h>
39 #include <sys/syscallsubr.h>
40 #include <sys/sysproto.h>
41 #include <sys/proc.h>
42 #include <sys/vnode.h>
43 #include <sys/ptrace.h>
44 #include <sys/sx.h>
45 #include <sys/malloc.h>
46 #include <sys/signalvar.h>
47
48 #include <machine/reg.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52 #include <vm/vm_extern.h>
53 #include <vm/vm_map.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_object.h>
56 #include <vm/vm_page.h>
57
58 /*
59 * Functions implemented using PROC_ACTION():
60 *
61 * proc_read_regs(proc, regs)
62 * Get the current user-visible register set from the process
63 * and copy it into the regs structure (<machine/reg.h>).
64 * The process is stopped at the time read_regs is called.
65 *
66 * proc_write_regs(proc, regs)
67 * Update the current register set from the passed in regs
68 * structure. Take care to avoid clobbering special CPU
69 * registers or privileged bits in the PSL.
70 * Depending on the architecture this may have fix-up work to do,
71 * especially if the IAR or PCW are modified.
72 * The process is stopped at the time write_regs is called.
73 *
74 * proc_read_fpregs, proc_write_fpregs
75 * deal with the floating point register set, otherwise as above.
76 *
77 * proc_read_dbregs, proc_write_dbregs
78 * deal with the processor debug register set, otherwise as above.
79 *
80 * proc_sstep(proc)
81 * Arrange for the process to trap after executing a single instruction.
82 */
83
84 #define PROC_ACTION(action) do { \
85 int error; \
86 \
87 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
88 if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
89 error = EIO; \
90 else \
91 error = (action); \
92 return (error); \
93 } while(0)
94
95 int
96 proc_read_regs(struct thread *td, struct reg *regs)
97 {
98
99 PROC_ACTION(fill_regs(td, regs));
100 }
101
102 int
103 proc_write_regs(struct thread *td, struct reg *regs)
104 {
105
106 PROC_ACTION(set_regs(td, regs));
107 }
108
109 int
110 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
111 {
112
113 PROC_ACTION(fill_dbregs(td, dbregs));
114 }
115
116 int
117 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
118 {
119
120 PROC_ACTION(set_dbregs(td, dbregs));
121 }
122
123 /*
124 * Ptrace doesn't support fpregs at all, and there are no security holes
125 * or translations for fpregs, so we can just copy them.
126 */
127 int
128 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
129 {
130
131 PROC_ACTION(fill_fpregs(td, fpregs));
132 }
133
134 int
135 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
136 {
137
138 PROC_ACTION(set_fpregs(td, fpregs));
139 }
140
141 int
142 proc_sstep(struct thread *td)
143 {
144
145 PROC_ACTION(ptrace_single_step(td));
146 }
147
148 int
149 proc_rwmem(struct proc *p, struct uio *uio)
150 {
151 struct vmspace *vm;
152 vm_map_t map;
153 vm_object_t backing_object, object = NULL;
154 vm_offset_t pageno = 0; /* page number */
155 vm_prot_t reqprot;
156 int error, refcnt, writing;
157
158 /*
159 * if the vmspace is in the midst of being deallocated or the
160 * process is exiting, don't try to grab anything. The page table
161 * usage in that process can be messed up.
162 */
163 vm = p->p_vmspace;
164 if ((p->p_flag & P_WEXIT))
165 return (EFAULT);
166 do {
167 if ((refcnt = vm->vm_refcnt) < 1)
168 return (EFAULT);
169 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
170
171 /*
172 * The map we want...
173 */
174 map = &vm->vm_map;
175
176 writing = uio->uio_rw == UIO_WRITE;
177 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
178 VM_PROT_READ;
179
180 /*
181 * Only map in one page at a time. We don't have to, but it
182 * makes things easier. This way is trivial - right?
183 */
184 do {
185 vm_map_t tmap;
186 vm_offset_t uva;
187 int page_offset; /* offset into page */
188 vm_map_entry_t out_entry;
189 vm_prot_t out_prot;
190 boolean_t wired;
191 vm_pindex_t pindex;
192 u_int len;
193 vm_page_t m;
194
195 object = NULL;
196
197 uva = (vm_offset_t)uio->uio_offset;
198
199 /*
200 * Get the page number of this segment.
201 */
202 pageno = trunc_page(uva);
203 page_offset = uva - pageno;
204
205 /*
206 * How many bytes to copy
207 */
208 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
209
210 /*
211 * Fault the page on behalf of the process
212 */
213 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
214 if (error) {
215 error = EFAULT;
216 break;
217 }
218
219 /*
220 * Now we need to get the page. out_entry, out_prot, wired,
221 * and single_use aren't used. One would think the vm code
222 * would be a *bit* nicer... We use tmap because
223 * vm_map_lookup() can change the map argument.
224 */
225 tmap = map;
226 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
227 &object, &pindex, &out_prot, &wired);
228 if (error) {
229 error = EFAULT;
230 break;
231 }
232 VM_OBJECT_LOCK(object);
233 while ((m = vm_page_lookup(object, pindex)) == NULL &&
234 !writing &&
235 (backing_object = object->backing_object) != NULL) {
236 /*
237 * Allow fallback to backing objects if we are reading.
238 */
239 VM_OBJECT_LOCK(backing_object);
240 pindex += OFF_TO_IDX(object->backing_object_offset);
241 VM_OBJECT_UNLOCK(object);
242 object = backing_object;
243 }
244 VM_OBJECT_UNLOCK(object);
245 if (m == NULL) {
246 vm_map_lookup_done(tmap, out_entry);
247 error = EFAULT;
248 break;
249 }
250
251 /*
252 * Hold the page in memory.
253 */
254 vm_page_lock_queues();
255 vm_page_hold(m);
256 vm_page_unlock_queues();
257
258 /*
259 * We're done with tmap now.
260 */
261 vm_map_lookup_done(tmap, out_entry);
262
263 /*
264 * Now do the i/o move.
265 */
266 error = uiomove_fromphys(&m, page_offset, len, uio);
267
268 /*
269 * Release the page.
270 */
271 vm_page_lock_queues();
272 vm_page_unhold(m);
273 vm_page_unlock_queues();
274
275 } while (error == 0 && uio->uio_resid > 0);
276
277 vmspace_free(vm);
278 return (error);
279 }
280
281 /*
282 * Process debugging system call.
283 */
284 #ifndef _SYS_SYSPROTO_H_
285 struct ptrace_args {
286 int req;
287 pid_t pid;
288 caddr_t addr;
289 int data;
290 };
291 #endif
292
293 /*
294 * MPSAFE
295 */
296 int
297 ptrace(struct thread *td, struct ptrace_args *uap)
298 {
299 /*
300 * XXX this obfuscation is to reduce stack usage, but the register
301 * structs may be too large to put on the stack anyway.
302 */
303 union {
304 struct ptrace_io_desc piod;
305 struct ptrace_lwpinfo pl;
306 struct dbreg dbreg;
307 struct fpreg fpreg;
308 struct reg reg;
309 } r;
310 void *addr;
311 int error = 0;
312
313 addr = &r;
314 switch (uap->req) {
315 case PT_GETREGS:
316 case PT_GETFPREGS:
317 case PT_GETDBREGS:
318 case PT_LWPINFO:
319 break;
320 case PT_SETREGS:
321 error = copyin(uap->addr, &r.reg, sizeof r.reg);
322 break;
323 case PT_SETFPREGS:
324 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
325 break;
326 case PT_SETDBREGS:
327 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
328 break;
329 case PT_IO:
330 error = copyin(uap->addr, &r.piod, sizeof r.piod);
331 break;
332 default:
333 addr = uap->addr;
334 break;
335 }
336 if (error)
337 return (error);
338
339 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
340 if (error)
341 return (error);
342
343 switch (uap->req) {
344 case PT_IO:
345 (void)copyout(&r.piod, uap->addr, sizeof r.piod);
346 break;
347 case PT_GETREGS:
348 error = copyout(&r.reg, uap->addr, sizeof r.reg);
349 break;
350 case PT_GETFPREGS:
351 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
352 break;
353 case PT_GETDBREGS:
354 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
355 break;
356 case PT_LWPINFO:
357 error = copyout(&r.pl, uap->addr, uap->data);
358 break;
359 }
360
361 return (error);
362 }
363
364 int
365 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
366 {
367 struct iovec iov;
368 struct uio uio;
369 struct proc *curp, *p, *pp;
370 struct thread *td2 = NULL;
371 struct ptrace_io_desc *piod;
372 struct ptrace_lwpinfo *pl;
373 int error, write, tmp, num;
374 int proctree_locked = 0;
375 lwpid_t tid = 0, *buf;
376
377 curp = td->td_proc;
378
379 /* Lock proctree before locking the process. */
380 switch (req) {
381 case PT_TRACE_ME:
382 case PT_ATTACH:
383 case PT_STEP:
384 case PT_CONTINUE:
385 case PT_TO_SCE:
386 case PT_TO_SCX:
387 case PT_SYSCALL:
388 case PT_DETACH:
389 sx_xlock(&proctree_lock);
390 proctree_locked = 1;
391 break;
392 default:
393 break;
394 }
395
396 write = 0;
397 if (req == PT_TRACE_ME) {
398 p = td->td_proc;
399 PROC_LOCK(p);
400 } else {
401 if (pid <= PID_MAX) {
402 if ((p = pfind(pid)) == NULL) {
403 if (proctree_locked)
404 sx_xunlock(&proctree_lock);
405 return (ESRCH);
406 }
407 } else {
408 /* this is slow, should be optimized */
409 sx_slock(&allproc_lock);
410 FOREACH_PROC_IN_SYSTEM(p) {
411 PROC_LOCK(p);
412 mtx_lock_spin(&sched_lock);
413 FOREACH_THREAD_IN_PROC(p, td2) {
414 if (td2->td_tid == pid)
415 break;
416 }
417 mtx_unlock_spin(&sched_lock);
418 if (td2 != NULL)
419 break; /* proc lock held */
420 PROC_UNLOCK(p);
421 }
422 sx_sunlock(&allproc_lock);
423 if (p == NULL) {
424 if (proctree_locked)
425 sx_xunlock(&proctree_lock);
426 return (ESRCH);
427 }
428 tid = pid;
429 pid = p->p_pid;
430 }
431 }
432 if ((error = p_cansee(td, p)) != 0)
433 goto fail;
434
435 if ((error = p_candebug(td, p)) != 0)
436 goto fail;
437
438 /*
439 * System processes can't be debugged.
440 */
441 if ((p->p_flag & P_SYSTEM) != 0) {
442 error = EINVAL;
443 goto fail;
444 }
445
446 if (tid == 0) {
447 if ((p->p_flag & P_STOPPED_TRACE) != 0) {
448 KASSERT(p->p_xthread != NULL, ("NULL p_xthread"));
449 td2 = p->p_xthread;
450 } else {
451 td2 = FIRST_THREAD_IN_PROC(p);
452 }
453 tid = td2->td_tid;
454 }
455
456 /*
457 * Permissions check
458 */
459 switch (req) {
460 case PT_TRACE_ME:
461 /* Always legal. */
462 break;
463
464 case PT_ATTACH:
465 /* Self */
466 if (p->p_pid == td->td_proc->p_pid) {
467 error = EINVAL;
468 goto fail;
469 }
470
471 /* Already traced */
472 if (p->p_flag & P_TRACED) {
473 error = EBUSY;
474 goto fail;
475 }
476
477 /* Can't trace an ancestor if you're being traced. */
478 if (curp->p_flag & P_TRACED) {
479 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
480 if (pp == p) {
481 error = EINVAL;
482 goto fail;
483 }
484 }
485 }
486
487
488 /* OK */
489 break;
490
491 case PT_CLEARSTEP:
492 /* Allow thread to clear single step for itself */
493 if (td->td_tid == tid)
494 break;
495
496 /* FALLTHROUGH */
497 default:
498 /* not being traced... */
499 if ((p->p_flag & P_TRACED) == 0) {
500 error = EPERM;
501 goto fail;
502 }
503
504 /* not being traced by YOU */
505 if (p->p_pptr != td->td_proc) {
506 error = EBUSY;
507 goto fail;
508 }
509
510 /* not currently stopped */
511 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 ||
512 p->p_suspcount != p->p_numthreads ||
513 (p->p_flag & P_WAITED) == 0) {
514 error = EBUSY;
515 goto fail;
516 }
517
518 if ((p->p_flag & P_STOPPED_TRACE) == 0) {
519 static int count = 0;
520 if (count++ == 0)
521 printf("P_STOPPED_TRACE not set.\n");
522 }
523
524 /* OK */
525 break;
526 }
527
528 #ifdef FIX_SSTEP
529 /*
530 * Single step fixup ala procfs
531 */
532 FIX_SSTEP(td2); /* XXXKSE */
533 #endif
534
535 /*
536 * Actually do the requests
537 */
538
539 td->td_retval[0] = 0;
540
541 switch (req) {
542 case PT_TRACE_ME:
543 /* set my trace flag and "owner" so it can read/write me */
544 p->p_flag |= P_TRACED;
545 p->p_oppid = p->p_pptr->p_pid;
546 PROC_UNLOCK(p);
547 sx_xunlock(&proctree_lock);
548 return (0);
549
550 case PT_ATTACH:
551 /* security check done above */
552 p->p_flag |= P_TRACED;
553 p->p_oppid = p->p_pptr->p_pid;
554 if (p->p_pptr != td->td_proc)
555 proc_reparent(p, td->td_proc);
556 data = SIGSTOP;
557 goto sendsig; /* in PT_CONTINUE below */
558
559 case PT_CLEARSTEP:
560 _PHOLD(p);
561 error = ptrace_clear_single_step(td2);
562 _PRELE(p);
563 if (error)
564 goto fail;
565 PROC_UNLOCK(p);
566 return (0);
567
568 case PT_SETSTEP:
569 _PHOLD(p);
570 error = ptrace_single_step(td2);
571 _PRELE(p);
572 if (error)
573 goto fail;
574 PROC_UNLOCK(p);
575 return (0);
576
577 case PT_SUSPEND:
578 _PHOLD(p);
579 mtx_lock_spin(&sched_lock);
580 td2->td_flags |= TDF_DBSUSPEND;
581 mtx_unlock_spin(&sched_lock);
582 _PRELE(p);
583 PROC_UNLOCK(p);
584 return (0);
585
586 case PT_RESUME:
587 _PHOLD(p);
588 mtx_lock_spin(&sched_lock);
589 td2->td_flags &= ~TDF_DBSUSPEND;
590 mtx_unlock_spin(&sched_lock);
591 _PRELE(p);
592 PROC_UNLOCK(p);
593 return (0);
594
595 case PT_STEP:
596 case PT_CONTINUE:
597 case PT_TO_SCE:
598 case PT_TO_SCX:
599 case PT_SYSCALL:
600 case PT_DETACH:
601 /* Zero means do not send any signal */
602 if (data < 0 || data > _SIG_MAXSIG) {
603 error = EINVAL;
604 goto fail;
605 }
606
607 _PHOLD(p);
608
609 switch (req) {
610 case PT_STEP:
611 PROC_UNLOCK(p);
612 error = ptrace_single_step(td2);
613 if (error) {
614 PRELE(p);
615 goto fail_noproc;
616 }
617 PROC_LOCK(p);
618 break;
619 case PT_TO_SCE:
620 p->p_stops |= S_PT_SCE;
621 break;
622 case PT_TO_SCX:
623 p->p_stops |= S_PT_SCX;
624 break;
625 case PT_SYSCALL:
626 p->p_stops |= S_PT_SCE | S_PT_SCX;
627 break;
628 }
629
630 if (addr != (void *)1) {
631 PROC_UNLOCK(p);
632 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
633 if (error) {
634 PRELE(p);
635 goto fail_noproc;
636 }
637 PROC_LOCK(p);
638 }
639 _PRELE(p);
640
641 if (req == PT_DETACH) {
642 /* reset process parent */
643 if (p->p_oppid != p->p_pptr->p_pid) {
644 struct proc *pp;
645
646 PROC_UNLOCK(p);
647 pp = pfind(p->p_oppid);
648 if (pp == NULL)
649 pp = initproc;
650 else
651 PROC_UNLOCK(pp);
652 PROC_LOCK(p);
653 proc_reparent(p, pp);
654 if (pp == initproc)
655 p->p_sigparent = SIGCHLD;
656 }
657 p->p_flag &= ~(P_TRACED | P_WAITED);
658 p->p_oppid = 0;
659
660 /* should we send SIGCHLD? */
661 }
662
663 sendsig:
664 if (proctree_locked)
665 sx_xunlock(&proctree_lock);
666 /* deliver or queue signal */
667 mtx_lock_spin(&sched_lock);
668 td2->td_flags &= ~TDF_XSIG;
669 mtx_unlock_spin(&sched_lock);
670 td2->td_xsig = data;
671 p->p_xstat = data;
672 p->p_xthread = NULL;
673 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) {
674 mtx_lock_spin(&sched_lock);
675 if (req == PT_DETACH) {
676 struct thread *td3;
677 FOREACH_THREAD_IN_PROC(p, td3)
678 td3->td_flags &= ~TDF_DBSUSPEND;
679 }
680 /*
681 * unsuspend all threads, to not let a thread run,
682 * you should use PT_SUSPEND to suspend it before
683 * continuing process.
684 */
685 mtx_unlock_spin(&sched_lock);
686 thread_continued(p);
687 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
688 mtx_lock_spin(&sched_lock);
689 thread_unsuspend(p);
690 mtx_unlock_spin(&sched_lock);
691 }
692
693 if (data)
694 psignal(p, data);
695
696 PROC_UNLOCK(p);
697 return (0);
698
699 case PT_WRITE_I:
700 case PT_WRITE_D:
701 write = 1;
702 /* FALLTHROUGH */
703 case PT_READ_I:
704 case PT_READ_D:
705 PROC_UNLOCK(p);
706 tmp = 0;
707 /* write = 0 set above */
708 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
709 iov.iov_len = sizeof(int);
710 uio.uio_iov = &iov;
711 uio.uio_iovcnt = 1;
712 uio.uio_offset = (off_t)(uintptr_t)addr;
713 uio.uio_resid = sizeof(int);
714 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
715 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
716 uio.uio_td = td;
717 error = proc_rwmem(p, &uio);
718 if (uio.uio_resid != 0) {
719 /*
720 * XXX proc_rwmem() doesn't currently return ENOSPC,
721 * so I think write() can bogusly return 0.
722 * XXX what happens for short writes? We don't want
723 * to write partial data.
724 * XXX proc_rwmem() returns EPERM for other invalid
725 * addresses. Convert this to EINVAL. Does this
726 * clobber returns of EPERM for other reasons?
727 */
728 if (error == 0 || error == ENOSPC || error == EPERM)
729 error = EINVAL; /* EOF */
730 }
731 if (!write)
732 td->td_retval[0] = tmp;
733 return (error);
734
735 case PT_IO:
736 PROC_UNLOCK(p);
737 piod = addr;
738 iov.iov_base = piod->piod_addr;
739 iov.iov_len = piod->piod_len;
740 uio.uio_iov = &iov;
741 uio.uio_iovcnt = 1;
742 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
743 uio.uio_resid = piod->piod_len;
744 uio.uio_segflg = UIO_USERSPACE;
745 uio.uio_td = td;
746 switch (piod->piod_op) {
747 case PIOD_READ_D:
748 case PIOD_READ_I:
749 uio.uio_rw = UIO_READ;
750 break;
751 case PIOD_WRITE_D:
752 case PIOD_WRITE_I:
753 uio.uio_rw = UIO_WRITE;
754 break;
755 default:
756 return (EINVAL);
757 }
758 error = proc_rwmem(p, &uio);
759 piod->piod_len -= uio.uio_resid;
760 return (error);
761
762 case PT_KILL:
763 data = SIGKILL;
764 goto sendsig; /* in PT_CONTINUE above */
765
766 case PT_SETREGS:
767 _PHOLD(p);
768 error = proc_write_regs(td2, addr);
769 _PRELE(p);
770 PROC_UNLOCK(p);
771 return (error);
772
773 case PT_GETREGS:
774 _PHOLD(p);
775 error = proc_read_regs(td2, addr);
776 _PRELE(p);
777 PROC_UNLOCK(p);
778 return (error);
779
780 case PT_SETFPREGS:
781 _PHOLD(p);
782 error = proc_write_fpregs(td2, addr);
783 _PRELE(p);
784 PROC_UNLOCK(p);
785 return (error);
786
787 case PT_GETFPREGS:
788 _PHOLD(p);
789 error = proc_read_fpregs(td2, addr);
790 _PRELE(p);
791 PROC_UNLOCK(p);
792 return (error);
793
794 case PT_SETDBREGS:
795 _PHOLD(p);
796 error = proc_write_dbregs(td2, addr);
797 _PRELE(p);
798 PROC_UNLOCK(p);
799 return (error);
800
801 case PT_GETDBREGS:
802 _PHOLD(p);
803 error = proc_read_dbregs(td2, addr);
804 _PRELE(p);
805 PROC_UNLOCK(p);
806 return (error);
807
808 case PT_LWPINFO:
809 if (data == 0 || data > sizeof(*pl))
810 return (EINVAL);
811 pl = addr;
812 _PHOLD(p);
813 pl->pl_lwpid = td2->td_tid;
814 if (td2->td_flags & TDF_XSIG)
815 pl->pl_event = PL_EVENT_SIGNAL;
816 else
817 pl->pl_event = 0;
818 if (td2->td_pflags & TDP_SA) {
819 pl->pl_flags = PL_FLAG_SA;
820 if (td2->td_upcall && !TD_CAN_UNBIND(td2))
821 pl->pl_flags |= PL_FLAG_BOUND;
822 } else {
823 pl->pl_flags = 0;
824 }
825 _PRELE(p);
826 PROC_UNLOCK(p);
827 return (0);
828
829 case PT_GETNUMLWPS:
830 td->td_retval[0] = p->p_numthreads;
831 PROC_UNLOCK(p);
832 return (0);
833
834 case PT_GETLWPLIST:
835 if (data <= 0) {
836 PROC_UNLOCK(p);
837 return (EINVAL);
838 }
839 num = imin(p->p_numthreads, data);
840 PROC_UNLOCK(p);
841 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
842 tmp = 0;
843 PROC_LOCK(p);
844 mtx_lock_spin(&sched_lock);
845 FOREACH_THREAD_IN_PROC(p, td2) {
846 if (tmp >= num)
847 break;
848 buf[tmp++] = td2->td_tid;
849 }
850 mtx_unlock_spin(&sched_lock);
851 PROC_UNLOCK(p);
852 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
853 free(buf, M_TEMP);
854 if (!error)
855 td->td_retval[0] = num;
856 return (error);
857
858 default:
859 #ifdef __HAVE_PTRACE_MACHDEP
860 if (req >= PT_FIRSTMACH) {
861 _PHOLD(p);
862 PROC_UNLOCK(p);
863 error = cpu_ptrace(td2, req, addr, data);
864 PRELE(p);
865 return (error);
866 }
867 #endif
868 break;
869 }
870
871 /* Unknown request. */
872 error = EINVAL;
873
874 fail:
875 PROC_UNLOCK(p);
876 fail_noproc:
877 if (proctree_locked)
878 sx_xunlock(&proctree_lock);
879 return (error);
880 }
881
882 /*
883 * Stop a process because of a debugging event;
884 * stay stopped until p->p_step is cleared
885 * (cleared by PIOCCONT in procfs).
886 */
887 void
888 stopevent(struct proc *p, unsigned int event, unsigned int val)
889 {
890
891 PROC_LOCK_ASSERT(p, MA_OWNED);
892 p->p_step = 1;
893 do {
894 p->p_xstat = val;
895 p->p_xthread = NULL;
896 p->p_stype = event; /* Which event caused the stop? */
897 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
898 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
899 } while (p->p_step);
900 }
Cache object: fba0e608beaf3ceb65519ea583fd5646
|