1 /*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/6.1/sys/kern/sys_process.c 158179 2006-04-30 16:44:43Z cvs2svn $");
34
35 #include "opt_compat.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/ptrace.h>
46 #include <sys/sx.h>
47 #include <sys/malloc.h>
48 #include <sys/signalvar.h>
49
50 #include <machine/reg.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59
60 #ifdef COMPAT_IA32
61 #include <sys/procfs.h>
62 #include <machine/fpu.h>
63 #include <compat/ia32/ia32_reg.h>
64
65 extern struct sysentvec ia32_freebsd_sysvec;
66
67 struct ptrace_io_desc32 {
68 int piod_op;
69 u_int32_t piod_offs;
70 u_int32_t piod_addr;
71 u_int32_t piod_len;
72 };
73 #endif
74
75 /*
76 * Functions implemented using PROC_ACTION():
77 *
78 * proc_read_regs(proc, regs)
79 * Get the current user-visible register set from the process
80 * and copy it into the regs structure (<machine/reg.h>).
81 * The process is stopped at the time read_regs is called.
82 *
83 * proc_write_regs(proc, regs)
84 * Update the current register set from the passed in regs
85 * structure. Take care to avoid clobbering special CPU
86 * registers or privileged bits in the PSL.
87 * Depending on the architecture this may have fix-up work to do,
88 * especially if the IAR or PCW are modified.
89 * The process is stopped at the time write_regs is called.
90 *
91 * proc_read_fpregs, proc_write_fpregs
92 * deal with the floating point register set, otherwise as above.
93 *
94 * proc_read_dbregs, proc_write_dbregs
95 * deal with the processor debug register set, otherwise as above.
96 *
97 * proc_sstep(proc)
98 * Arrange for the process to trap after executing a single instruction.
99 */
100
101 #define PROC_ACTION(action) do { \
102 int error; \
103 \
104 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
105 if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
106 error = EIO; \
107 else \
108 error = (action); \
109 return (error); \
110 } while(0)
111
112 int
113 proc_read_regs(struct thread *td, struct reg *regs)
114 {
115
116 PROC_ACTION(fill_regs(td, regs));
117 }
118
119 int
120 proc_write_regs(struct thread *td, struct reg *regs)
121 {
122
123 PROC_ACTION(set_regs(td, regs));
124 }
125
126 int
127 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
128 {
129
130 PROC_ACTION(fill_dbregs(td, dbregs));
131 }
132
133 int
134 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
135 {
136
137 PROC_ACTION(set_dbregs(td, dbregs));
138 }
139
140 /*
141 * Ptrace doesn't support fpregs at all, and there are no security holes
142 * or translations for fpregs, so we can just copy them.
143 */
144 int
145 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
146 {
147
148 PROC_ACTION(fill_fpregs(td, fpregs));
149 }
150
151 int
152 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
153 {
154
155 PROC_ACTION(set_fpregs(td, fpregs));
156 }
157
158 #ifdef COMPAT_IA32
159 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
160 int
161 proc_read_regs32(struct thread *td, struct reg32 *regs32)
162 {
163
164 PROC_ACTION(fill_regs32(td, regs32));
165 }
166
167 int
168 proc_write_regs32(struct thread *td, struct reg32 *regs32)
169 {
170
171 PROC_ACTION(set_regs32(td, regs32));
172 }
173
174 int
175 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
176 {
177
178 PROC_ACTION(fill_dbregs32(td, dbregs32));
179 }
180
181 int
182 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
183 {
184
185 PROC_ACTION(set_dbregs32(td, dbregs32));
186 }
187
188 int
189 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
190 {
191
192 PROC_ACTION(fill_fpregs32(td, fpregs32));
193 }
194
195 int
196 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
197 {
198
199 PROC_ACTION(set_fpregs32(td, fpregs32));
200 }
201 #endif
202
203 int
204 proc_sstep(struct thread *td)
205 {
206
207 PROC_ACTION(ptrace_single_step(td));
208 }
209
210 int
211 proc_rwmem(struct proc *p, struct uio *uio)
212 {
213 vm_map_t map;
214 vm_object_t backing_object, object = NULL;
215 vm_offset_t pageno = 0; /* page number */
216 vm_prot_t reqprot;
217 int error, writing;
218
219 /*
220 * Assert that someone has locked this vmspace. (Should be
221 * curthread but we can't assert that.) This keeps the process
222 * from exiting out from under us until this operation completes.
223 */
224 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__,
225 p, p->p_pid));
226
227 /*
228 * The map we want...
229 */
230 map = &p->p_vmspace->vm_map;
231
232 writing = uio->uio_rw == UIO_WRITE;
233 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
234 VM_PROT_READ;
235
236 /*
237 * Only map in one page at a time. We don't have to, but it
238 * makes things easier. This way is trivial - right?
239 */
240 do {
241 vm_map_t tmap;
242 vm_offset_t uva;
243 int page_offset; /* offset into page */
244 vm_map_entry_t out_entry;
245 vm_prot_t out_prot;
246 boolean_t wired;
247 vm_pindex_t pindex;
248 u_int len;
249 vm_page_t m;
250
251 object = NULL;
252
253 uva = (vm_offset_t)uio->uio_offset;
254
255 /*
256 * Get the page number of this segment.
257 */
258 pageno = trunc_page(uva);
259 page_offset = uva - pageno;
260
261 /*
262 * How many bytes to copy
263 */
264 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
265
266 /*
267 * Fault the page on behalf of the process
268 */
269 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
270 if (error) {
271 error = EFAULT;
272 break;
273 }
274
275 /*
276 * Now we need to get the page. out_entry, out_prot, wired,
277 * and single_use aren't used. One would think the vm code
278 * would be a *bit* nicer... We use tmap because
279 * vm_map_lookup() can change the map argument.
280 */
281 tmap = map;
282 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
283 &object, &pindex, &out_prot, &wired);
284 if (error) {
285 error = EFAULT;
286 break;
287 }
288 VM_OBJECT_LOCK(object);
289 while ((m = vm_page_lookup(object, pindex)) == NULL &&
290 !writing &&
291 (backing_object = object->backing_object) != NULL) {
292 /*
293 * Allow fallback to backing objects if we are reading.
294 */
295 VM_OBJECT_LOCK(backing_object);
296 pindex += OFF_TO_IDX(object->backing_object_offset);
297 VM_OBJECT_UNLOCK(object);
298 object = backing_object;
299 }
300 VM_OBJECT_UNLOCK(object);
301 if (m == NULL) {
302 vm_map_lookup_done(tmap, out_entry);
303 error = EFAULT;
304 break;
305 }
306
307 /*
308 * Hold the page in memory.
309 */
310 vm_page_lock_queues();
311 vm_page_hold(m);
312 vm_page_unlock_queues();
313
314 /*
315 * We're done with tmap now.
316 */
317 vm_map_lookup_done(tmap, out_entry);
318
319 /*
320 * Now do the i/o move.
321 */
322 error = uiomove_fromphys(&m, page_offset, len, uio);
323
324 /*
325 * Release the page.
326 */
327 vm_page_lock_queues();
328 vm_page_unhold(m);
329 vm_page_unlock_queues();
330
331 } while (error == 0 && uio->uio_resid > 0);
332
333 return (error);
334 }
335
336 /*
337 * Process debugging system call.
338 */
339 #ifndef _SYS_SYSPROTO_H_
340 struct ptrace_args {
341 int req;
342 pid_t pid;
343 caddr_t addr;
344 int data;
345 };
346 #endif
347
348 #ifdef COMPAT_IA32
349 /*
350 * This CPP subterfuge is to try and reduce the number of ifdefs in
351 * the body of the code.
352 * COPYIN(uap->addr, &r.reg, sizeof r.reg);
353 * becomes either:
354 * copyin(uap->addr, &r.reg, sizeof r.reg);
355 * or
356 * copyin(uap->addr, &r.reg32, sizeof r.reg32);
357 * .. except this is done at runtime.
358 */
359 #define COPYIN(u, k, s) wrap32 ? \
360 copyin(u, k ## 32, s ## 32) : \
361 copyin(u, k, s)
362 #define COPYOUT(k, u, s) wrap32 ? \
363 copyout(k ## 32, u, s ## 32) : \
364 copyout(k, u, s)
365 #else
366 #define COPYIN(u, k, s) copyin(u, k, s)
367 #define COPYOUT(k, u, s) copyout(k, u, s)
368 #endif
369 /*
370 * MPSAFE
371 */
372 int
373 ptrace(struct thread *td, struct ptrace_args *uap)
374 {
375 /*
376 * XXX this obfuscation is to reduce stack usage, but the register
377 * structs may be too large to put on the stack anyway.
378 */
379 union {
380 struct ptrace_io_desc piod;
381 struct ptrace_lwpinfo pl;
382 struct dbreg dbreg;
383 struct fpreg fpreg;
384 struct reg reg;
385 #ifdef COMPAT_IA32
386 struct dbreg32 dbreg32;
387 struct fpreg32 fpreg32;
388 struct reg32 reg32;
389 struct ptrace_io_desc32 piod32;
390 #endif
391 } r;
392 void *addr;
393 int error = 0;
394 #ifdef COMPAT_IA32
395 int wrap32 = 0;
396
397 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec)
398 wrap32 = 1;
399 #endif
400 addr = &r;
401 switch (uap->req) {
402 case PT_GETREGS:
403 case PT_GETFPREGS:
404 case PT_GETDBREGS:
405 case PT_LWPINFO:
406 break;
407 case PT_SETREGS:
408 error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
409 break;
410 case PT_SETFPREGS:
411 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
412 break;
413 case PT_SETDBREGS:
414 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
415 break;
416 case PT_IO:
417 error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
418 break;
419 default:
420 addr = uap->addr;
421 break;
422 }
423 if (error)
424 return (error);
425
426 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
427 if (error)
428 return (error);
429
430 switch (uap->req) {
431 case PT_IO:
432 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
433 break;
434 case PT_GETREGS:
435 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
436 break;
437 case PT_GETFPREGS:
438 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
439 break;
440 case PT_GETDBREGS:
441 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
442 break;
443 case PT_LWPINFO:
444 error = copyout(&r.pl, uap->addr, uap->data);
445 break;
446 }
447
448 return (error);
449 }
450 #undef COPYIN
451 #undef COPYOUT
452
453 #ifdef COMPAT_IA32
454 /*
455 * PROC_READ(regs, td2, addr);
456 * becomes either:
457 * proc_read_regs(td2, addr);
458 * or
459 * proc_read_regs32(td2, addr);
460 * .. except this is done at runtime. There is an additional
461 * complication in that PROC_WRITE disallows 32 bit consumers
462 * from writing to 64 bit address space targets.
463 */
464 #define PROC_READ(w, t, a) wrap32 ? \
465 proc_read_ ## w ## 32(t, a) : \
466 proc_read_ ## w (t, a)
467 #define PROC_WRITE(w, t, a) wrap32 ? \
468 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \
469 proc_write_ ## w (t, a)
470 #else
471 #define PROC_READ(w, t, a) proc_read_ ## w (t, a)
472 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a)
473 #endif
474
475 int
476 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
477 {
478 struct iovec iov;
479 struct uio uio;
480 struct proc *curp, *p, *pp;
481 struct thread *td2 = NULL;
482 struct ptrace_io_desc *piod = NULL;
483 struct ptrace_lwpinfo *pl;
484 int error, write, tmp, num;
485 int proctree_locked = 0;
486 lwpid_t tid = 0, *buf;
487 #ifdef COMPAT_IA32
488 int wrap32 = 0, safe = 0;
489 struct ptrace_io_desc32 *piod32 = NULL;
490 #endif
491
492 curp = td->td_proc;
493
494 /* Lock proctree before locking the process. */
495 switch (req) {
496 case PT_TRACE_ME:
497 case PT_ATTACH:
498 case PT_STEP:
499 case PT_CONTINUE:
500 case PT_TO_SCE:
501 case PT_TO_SCX:
502 case PT_SYSCALL:
503 case PT_DETACH:
504 sx_xlock(&proctree_lock);
505 proctree_locked = 1;
506 break;
507 default:
508 break;
509 }
510
511 write = 0;
512 if (req == PT_TRACE_ME) {
513 p = td->td_proc;
514 PROC_LOCK(p);
515 } else {
516 if (pid <= PID_MAX) {
517 if ((p = pfind(pid)) == NULL) {
518 if (proctree_locked)
519 sx_xunlock(&proctree_lock);
520 return (ESRCH);
521 }
522 } else {
523 /* this is slow, should be optimized */
524 sx_slock(&allproc_lock);
525 FOREACH_PROC_IN_SYSTEM(p) {
526 PROC_LOCK(p);
527 mtx_lock_spin(&sched_lock);
528 FOREACH_THREAD_IN_PROC(p, td2) {
529 if (td2->td_tid == pid)
530 break;
531 }
532 mtx_unlock_spin(&sched_lock);
533 if (td2 != NULL)
534 break; /* proc lock held */
535 PROC_UNLOCK(p);
536 }
537 sx_sunlock(&allproc_lock);
538 if (p == NULL) {
539 if (proctree_locked)
540 sx_xunlock(&proctree_lock);
541 return (ESRCH);
542 }
543 tid = pid;
544 pid = p->p_pid;
545 }
546 }
547
548 if ((p->p_flag & P_WEXIT) != 0) {
549 error = ESRCH;
550 goto fail;
551 }
552 if ((error = p_cansee(td, p)) != 0)
553 goto fail;
554
555 if ((error = p_candebug(td, p)) != 0)
556 goto fail;
557
558 /*
559 * System processes can't be debugged.
560 */
561 if ((p->p_flag & P_SYSTEM) != 0) {
562 error = EINVAL;
563 goto fail;
564 }
565
566 if (tid == 0) {
567 if ((p->p_flag & P_STOPPED_TRACE) != 0) {
568 KASSERT(p->p_xthread != NULL, ("NULL p_xthread"));
569 td2 = p->p_xthread;
570 } else {
571 td2 = FIRST_THREAD_IN_PROC(p);
572 }
573 tid = td2->td_tid;
574 }
575
576 #ifdef COMPAT_IA32
577 /*
578 * Test if we're a 32 bit client and what the target is.
579 * Set the wrap controls accordingly.
580 */
581 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec) {
582 if (td2->td_proc->p_sysent == &ia32_freebsd_sysvec)
583 safe = 1;
584 wrap32 = 1;
585 }
586 #endif
587 /*
588 * Permissions check
589 */
590 switch (req) {
591 case PT_TRACE_ME:
592 /* Always legal. */
593 break;
594
595 case PT_ATTACH:
596 /* Self */
597 if (p->p_pid == td->td_proc->p_pid) {
598 error = EINVAL;
599 goto fail;
600 }
601
602 /* Already traced */
603 if (p->p_flag & P_TRACED) {
604 error = EBUSY;
605 goto fail;
606 }
607
608 /* Can't trace an ancestor if you're being traced. */
609 if (curp->p_flag & P_TRACED) {
610 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
611 if (pp == p) {
612 error = EINVAL;
613 goto fail;
614 }
615 }
616 }
617
618
619 /* OK */
620 break;
621
622 case PT_CLEARSTEP:
623 /* Allow thread to clear single step for itself */
624 if (td->td_tid == tid)
625 break;
626
627 /* FALLTHROUGH */
628 default:
629 /* not being traced... */
630 if ((p->p_flag & P_TRACED) == 0) {
631 error = EPERM;
632 goto fail;
633 }
634
635 /* not being traced by YOU */
636 if (p->p_pptr != td->td_proc) {
637 error = EBUSY;
638 goto fail;
639 }
640
641 /* not currently stopped */
642 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 ||
643 p->p_suspcount != p->p_numthreads ||
644 (p->p_flag & P_WAITED) == 0) {
645 error = EBUSY;
646 goto fail;
647 }
648
649 if ((p->p_flag & P_STOPPED_TRACE) == 0) {
650 static int count = 0;
651 if (count++ == 0)
652 printf("P_STOPPED_TRACE not set.\n");
653 }
654
655 /* OK */
656 break;
657 }
658
659 /* Keep this process around until we finish this request. */
660 _PHOLD(p);
661
662 #ifdef FIX_SSTEP
663 /*
664 * Single step fixup ala procfs
665 */
666 FIX_SSTEP(td2);
667 #endif
668
669 /*
670 * Actually do the requests
671 */
672
673 td->td_retval[0] = 0;
674
675 switch (req) {
676 case PT_TRACE_ME:
677 /* set my trace flag and "owner" so it can read/write me */
678 p->p_flag |= P_TRACED;
679 p->p_oppid = p->p_pptr->p_pid;
680 break;
681
682 case PT_ATTACH:
683 /* security check done above */
684 p->p_flag |= P_TRACED;
685 p->p_oppid = p->p_pptr->p_pid;
686 if (p->p_pptr != td->td_proc)
687 proc_reparent(p, td->td_proc);
688 data = SIGSTOP;
689 goto sendsig; /* in PT_CONTINUE below */
690
691 case PT_CLEARSTEP:
692 error = ptrace_clear_single_step(td2);
693 break;
694
695 case PT_SETSTEP:
696 error = ptrace_single_step(td2);
697 break;
698
699 case PT_SUSPEND:
700 mtx_lock_spin(&sched_lock);
701 td2->td_flags |= TDF_DBSUSPEND;
702 mtx_unlock_spin(&sched_lock);
703 break;
704
705 case PT_RESUME:
706 mtx_lock_spin(&sched_lock);
707 td2->td_flags &= ~TDF_DBSUSPEND;
708 mtx_unlock_spin(&sched_lock);
709 break;
710
711 case PT_STEP:
712 case PT_CONTINUE:
713 case PT_TO_SCE:
714 case PT_TO_SCX:
715 case PT_SYSCALL:
716 case PT_DETACH:
717 /* Zero means do not send any signal */
718 if (data < 0 || data > _SIG_MAXSIG) {
719 error = EINVAL;
720 break;
721 }
722
723 switch (req) {
724 case PT_STEP:
725 error = ptrace_single_step(td2);
726 if (error)
727 goto out;
728 break;
729 case PT_TO_SCE:
730 p->p_stops |= S_PT_SCE;
731 break;
732 case PT_TO_SCX:
733 p->p_stops |= S_PT_SCX;
734 break;
735 case PT_SYSCALL:
736 p->p_stops |= S_PT_SCE | S_PT_SCX;
737 break;
738 }
739
740 if (addr != (void *)1) {
741 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
742 if (error)
743 break;
744 }
745
746 if (req == PT_DETACH) {
747 /* reset process parent */
748 if (p->p_oppid != p->p_pptr->p_pid) {
749 struct proc *pp;
750
751 PROC_UNLOCK(p);
752 pp = pfind(p->p_oppid);
753 if (pp == NULL)
754 pp = initproc;
755 else
756 PROC_UNLOCK(pp);
757 PROC_LOCK(p);
758 proc_reparent(p, pp);
759 if (pp == initproc)
760 p->p_sigparent = SIGCHLD;
761 }
762 p->p_flag &= ~(P_TRACED | P_WAITED);
763 p->p_oppid = 0;
764
765 /* should we send SIGCHLD? */
766 }
767
768 sendsig:
769 if (proctree_locked) {
770 sx_xunlock(&proctree_lock);
771 proctree_locked = 0;
772 }
773 /* deliver or queue signal */
774 mtx_lock_spin(&sched_lock);
775 td2->td_flags &= ~TDF_XSIG;
776 mtx_unlock_spin(&sched_lock);
777 td2->td_xsig = data;
778 p->p_xstat = data;
779 p->p_xthread = NULL;
780 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) {
781 mtx_lock_spin(&sched_lock);
782 if (req == PT_DETACH) {
783 struct thread *td3;
784 FOREACH_THREAD_IN_PROC(p, td3)
785 td3->td_flags &= ~TDF_DBSUSPEND;
786 }
787 /*
788 * unsuspend all threads, to not let a thread run,
789 * you should use PT_SUSPEND to suspend it before
790 * continuing process.
791 */
792 mtx_unlock_spin(&sched_lock);
793 thread_continued(p);
794 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
795 mtx_lock_spin(&sched_lock);
796 thread_unsuspend(p);
797 mtx_unlock_spin(&sched_lock);
798 }
799
800 if (data)
801 psignal(p, data);
802
803 break;
804
805 case PT_WRITE_I:
806 case PT_WRITE_D:
807 write = 1;
808 /* FALLTHROUGH */
809 case PT_READ_I:
810 case PT_READ_D:
811 PROC_UNLOCK(p);
812 tmp = 0;
813 /* write = 0 set above */
814 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
815 iov.iov_len = sizeof(int);
816 uio.uio_iov = &iov;
817 uio.uio_iovcnt = 1;
818 uio.uio_offset = (off_t)(uintptr_t)addr;
819 uio.uio_resid = sizeof(int);
820 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
821 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
822 uio.uio_td = td;
823 error = proc_rwmem(p, &uio);
824 if (uio.uio_resid != 0) {
825 /*
826 * XXX proc_rwmem() doesn't currently return ENOSPC,
827 * so I think write() can bogusly return 0.
828 * XXX what happens for short writes? We don't want
829 * to write partial data.
830 * XXX proc_rwmem() returns EPERM for other invalid
831 * addresses. Convert this to EINVAL. Does this
832 * clobber returns of EPERM for other reasons?
833 */
834 if (error == 0 || error == ENOSPC || error == EPERM)
835 error = EINVAL; /* EOF */
836 }
837 if (!write)
838 td->td_retval[0] = tmp;
839 PROC_LOCK(p);
840 break;
841
842 case PT_IO:
843 #ifdef COMPAT_IA32
844 if (wrap32) {
845 piod32 = addr;
846 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
847 iov.iov_len = piod32->piod_len;
848 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
849 uio.uio_resid = piod32->piod_len;
850 } else
851 #endif
852 {
853 piod = addr;
854 iov.iov_base = piod->piod_addr;
855 iov.iov_len = piod->piod_len;
856 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
857 uio.uio_resid = piod->piod_len;
858 }
859 uio.uio_iov = &iov;
860 uio.uio_iovcnt = 1;
861 uio.uio_segflg = UIO_USERSPACE;
862 uio.uio_td = td;
863 #ifdef COMPAT_IA32
864 tmp = wrap32 ? piod32->piod_op : piod->piod_op;
865 #else
866 tmp = piod->piod_op;
867 #endif
868 switch (tmp) {
869 case PIOD_READ_D:
870 case PIOD_READ_I:
871 uio.uio_rw = UIO_READ;
872 break;
873 case PIOD_WRITE_D:
874 case PIOD_WRITE_I:
875 uio.uio_rw = UIO_WRITE;
876 break;
877 default:
878 error = EINVAL;
879 goto out;
880 }
881 PROC_UNLOCK(p);
882 error = proc_rwmem(p, &uio);
883 #ifdef COMPAT_IA32
884 if (wrap32)
885 piod32->piod_len -= uio.uio_resid;
886 else
887 #endif
888 piod->piod_len -= uio.uio_resid;
889 PROC_LOCK(p);
890 break;
891
892 case PT_KILL:
893 data = SIGKILL;
894 goto sendsig; /* in PT_CONTINUE above */
895
896 case PT_SETREGS:
897 error = PROC_WRITE(regs, td2, addr);
898 break;
899
900 case PT_GETREGS:
901 error = PROC_READ(regs, td2, addr);
902 break;
903
904 case PT_SETFPREGS:
905 error = PROC_WRITE(fpregs, td2, addr);
906 break;
907
908 case PT_GETFPREGS:
909 error = PROC_READ(fpregs, td2, addr);
910 break;
911
912 case PT_SETDBREGS:
913 error = PROC_WRITE(dbregs, td2, addr);
914 break;
915
916 case PT_GETDBREGS:
917 error = PROC_READ(dbregs, td2, addr);
918 break;
919
920 case PT_LWPINFO:
921 if (data == 0 || data > sizeof(*pl)) {
922 error = EINVAL;
923 break;
924 }
925 pl = addr;
926 pl->pl_lwpid = td2->td_tid;
927 if (td2->td_flags & TDF_XSIG)
928 pl->pl_event = PL_EVENT_SIGNAL;
929 else
930 pl->pl_event = 0;
931 if (td2->td_pflags & TDP_SA) {
932 pl->pl_flags = PL_FLAG_SA;
933 if (td2->td_upcall && !TD_CAN_UNBIND(td2))
934 pl->pl_flags |= PL_FLAG_BOUND;
935 } else {
936 pl->pl_flags = 0;
937 }
938 break;
939
940 case PT_GETNUMLWPS:
941 td->td_retval[0] = p->p_numthreads;
942 break;
943
944 case PT_GETLWPLIST:
945 if (data <= 0) {
946 error = EINVAL;
947 break;
948 }
949 num = imin(p->p_numthreads, data);
950 PROC_UNLOCK(p);
951 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
952 tmp = 0;
953 PROC_LOCK(p);
954 mtx_lock_spin(&sched_lock);
955 FOREACH_THREAD_IN_PROC(p, td2) {
956 if (tmp >= num)
957 break;
958 buf[tmp++] = td2->td_tid;
959 }
960 mtx_unlock_spin(&sched_lock);
961 PROC_UNLOCK(p);
962 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
963 free(buf, M_TEMP);
964 if (!error)
965 td->td_retval[0] = num;
966 PROC_LOCK(p);
967 break;
968
969 default:
970 #ifdef __HAVE_PTRACE_MACHDEP
971 if (req >= PT_FIRSTMACH) {
972 PROC_UNLOCK(p);
973 error = cpu_ptrace(td2, req, addr, data);
974 PROC_LOCK(p);
975 } else
976 #endif
977 /* Unknown request. */
978 error = EINVAL;
979 break;
980 }
981
982 out:
983 /* Drop our hold on this process now that the request has completed. */
984 _PRELE(p);
985 fail:
986 PROC_UNLOCK(p);
987 if (proctree_locked)
988 sx_xunlock(&proctree_lock);
989 return (error);
990 }
991 #undef PROC_READ
992 #undef PROC_WRITE
993
994 /*
995 * Stop a process because of a debugging event;
996 * stay stopped until p->p_step is cleared
997 * (cleared by PIOCCONT in procfs).
998 */
999 void
1000 stopevent(struct proc *p, unsigned int event, unsigned int val)
1001 {
1002
1003 PROC_LOCK_ASSERT(p, MA_OWNED);
1004 p->p_step = 1;
1005 do {
1006 p->p_xstat = val;
1007 p->p_xthread = NULL;
1008 p->p_stype = event; /* Which event caused the stop? */
1009 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
1010 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
1011 } while (p->p_step);
1012 }
Cache object: 60b569a8deabb943e42afe4b12b637d5
|