1 /*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/6.0/sys/kern/sys_process.c 150614 2005-09-27 12:17:38Z davidxu $");
34
35 #include "opt_compat.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/ptrace.h>
46 #include <sys/sx.h>
47 #include <sys/malloc.h>
48 #include <sys/signalvar.h>
49
50 #include <machine/reg.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59
60 #ifdef COMPAT_IA32
61 #include <sys/procfs.h>
62 #include <machine/fpu.h>
63 #include <compat/ia32/ia32_reg.h>
64
65 extern struct sysentvec ia32_freebsd_sysvec;
66
67 struct ptrace_io_desc32 {
68 int piod_op;
69 u_int32_t piod_offs;
70 u_int32_t piod_addr;
71 u_int32_t piod_len;
72 };
73 #endif
74
75 /*
76 * Functions implemented using PROC_ACTION():
77 *
78 * proc_read_regs(proc, regs)
79 * Get the current user-visible register set from the process
80 * and copy it into the regs structure (<machine/reg.h>).
81 * The process is stopped at the time read_regs is called.
82 *
83 * proc_write_regs(proc, regs)
84 * Update the current register set from the passed in regs
85 * structure. Take care to avoid clobbering special CPU
86 * registers or privileged bits in the PSL.
87 * Depending on the architecture this may have fix-up work to do,
88 * especially if the IAR or PCW are modified.
89 * The process is stopped at the time write_regs is called.
90 *
91 * proc_read_fpregs, proc_write_fpregs
92 * deal with the floating point register set, otherwise as above.
93 *
94 * proc_read_dbregs, proc_write_dbregs
95 * deal with the processor debug register set, otherwise as above.
96 *
97 * proc_sstep(proc)
98 * Arrange for the process to trap after executing a single instruction.
99 */
100
101 #define PROC_ACTION(action) do { \
102 int error; \
103 \
104 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
105 if ((td->td_proc->p_sflag & PS_INMEM) == 0) \
106 error = EIO; \
107 else \
108 error = (action); \
109 return (error); \
110 } while(0)
111
112 int
113 proc_read_regs(struct thread *td, struct reg *regs)
114 {
115
116 PROC_ACTION(fill_regs(td, regs));
117 }
118
119 int
120 proc_write_regs(struct thread *td, struct reg *regs)
121 {
122
123 PROC_ACTION(set_regs(td, regs));
124 }
125
126 int
127 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
128 {
129
130 PROC_ACTION(fill_dbregs(td, dbregs));
131 }
132
133 int
134 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
135 {
136
137 PROC_ACTION(set_dbregs(td, dbregs));
138 }
139
140 /*
141 * Ptrace doesn't support fpregs at all, and there are no security holes
142 * or translations for fpregs, so we can just copy them.
143 */
144 int
145 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
146 {
147
148 PROC_ACTION(fill_fpregs(td, fpregs));
149 }
150
151 int
152 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
153 {
154
155 PROC_ACTION(set_fpregs(td, fpregs));
156 }
157
158 #ifdef COMPAT_IA32
159 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
160 int
161 proc_read_regs32(struct thread *td, struct reg32 *regs32)
162 {
163
164 PROC_ACTION(fill_regs32(td, regs32));
165 }
166
167 int
168 proc_write_regs32(struct thread *td, struct reg32 *regs32)
169 {
170
171 PROC_ACTION(set_regs32(td, regs32));
172 }
173
174 int
175 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
176 {
177
178 PROC_ACTION(fill_dbregs32(td, dbregs32));
179 }
180
181 int
182 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
183 {
184
185 PROC_ACTION(set_dbregs32(td, dbregs32));
186 }
187
188 int
189 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
190 {
191
192 PROC_ACTION(fill_fpregs32(td, fpregs32));
193 }
194
195 int
196 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
197 {
198
199 PROC_ACTION(set_fpregs32(td, fpregs32));
200 }
201 #endif
202
203 int
204 proc_sstep(struct thread *td)
205 {
206
207 PROC_ACTION(ptrace_single_step(td));
208 }
209
210 int
211 proc_rwmem(struct proc *p, struct uio *uio)
212 {
213 struct vmspace *vm;
214 vm_map_t map;
215 vm_object_t backing_object, object = NULL;
216 vm_offset_t pageno = 0; /* page number */
217 vm_prot_t reqprot;
218 int error, refcnt, writing;
219
220 /*
221 * if the vmspace is in the midst of being deallocated or the
222 * process is exiting, don't try to grab anything. The page table
223 * usage in that process can be messed up.
224 */
225 vm = p->p_vmspace;
226 if ((p->p_flag & P_WEXIT))
227 return (EFAULT);
228 do {
229 if ((refcnt = vm->vm_refcnt) < 1)
230 return (EFAULT);
231 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
232
233 /*
234 * The map we want...
235 */
236 map = &vm->vm_map;
237
238 writing = uio->uio_rw == UIO_WRITE;
239 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
240 VM_PROT_READ;
241
242 /*
243 * Only map in one page at a time. We don't have to, but it
244 * makes things easier. This way is trivial - right?
245 */
246 do {
247 vm_map_t tmap;
248 vm_offset_t uva;
249 int page_offset; /* offset into page */
250 vm_map_entry_t out_entry;
251 vm_prot_t out_prot;
252 boolean_t wired;
253 vm_pindex_t pindex;
254 u_int len;
255 vm_page_t m;
256
257 object = NULL;
258
259 uva = (vm_offset_t)uio->uio_offset;
260
261 /*
262 * Get the page number of this segment.
263 */
264 pageno = trunc_page(uva);
265 page_offset = uva - pageno;
266
267 /*
268 * How many bytes to copy
269 */
270 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
271
272 /*
273 * Fault the page on behalf of the process
274 */
275 error = vm_fault(map, pageno, reqprot, VM_FAULT_NORMAL);
276 if (error) {
277 error = EFAULT;
278 break;
279 }
280
281 /*
282 * Now we need to get the page. out_entry, out_prot, wired,
283 * and single_use aren't used. One would think the vm code
284 * would be a *bit* nicer... We use tmap because
285 * vm_map_lookup() can change the map argument.
286 */
287 tmap = map;
288 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
289 &object, &pindex, &out_prot, &wired);
290 if (error) {
291 error = EFAULT;
292 break;
293 }
294 VM_OBJECT_LOCK(object);
295 while ((m = vm_page_lookup(object, pindex)) == NULL &&
296 !writing &&
297 (backing_object = object->backing_object) != NULL) {
298 /*
299 * Allow fallback to backing objects if we are reading.
300 */
301 VM_OBJECT_LOCK(backing_object);
302 pindex += OFF_TO_IDX(object->backing_object_offset);
303 VM_OBJECT_UNLOCK(object);
304 object = backing_object;
305 }
306 VM_OBJECT_UNLOCK(object);
307 if (m == NULL) {
308 vm_map_lookup_done(tmap, out_entry);
309 error = EFAULT;
310 break;
311 }
312
313 /*
314 * Hold the page in memory.
315 */
316 vm_page_lock_queues();
317 vm_page_hold(m);
318 vm_page_unlock_queues();
319
320 /*
321 * We're done with tmap now.
322 */
323 vm_map_lookup_done(tmap, out_entry);
324
325 /*
326 * Now do the i/o move.
327 */
328 error = uiomove_fromphys(&m, page_offset, len, uio);
329
330 /*
331 * Release the page.
332 */
333 vm_page_lock_queues();
334 vm_page_unhold(m);
335 vm_page_unlock_queues();
336
337 } while (error == 0 && uio->uio_resid > 0);
338
339 vmspace_free(vm);
340 return (error);
341 }
342
343 /*
344 * Process debugging system call.
345 */
346 #ifndef _SYS_SYSPROTO_H_
347 struct ptrace_args {
348 int req;
349 pid_t pid;
350 caddr_t addr;
351 int data;
352 };
353 #endif
354
355 #ifdef COMPAT_IA32
356 /*
357 * This CPP subterfuge is to try and reduce the number of ifdefs in
358 * the body of the code.
359 * COPYIN(uap->addr, &r.reg, sizeof r.reg);
360 * becomes either:
361 * copyin(uap->addr, &r.reg, sizeof r.reg);
362 * or
363 * copyin(uap->addr, &r.reg32, sizeof r.reg32);
364 * .. except this is done at runtime.
365 */
366 #define COPYIN(u, k, s) wrap32 ? \
367 copyin(u, k ## 32, s ## 32) : \
368 copyin(u, k, s)
369 #define COPYOUT(k, u, s) wrap32 ? \
370 copyout(k ## 32, u, s ## 32) : \
371 copyout(k, u, s)
372 #else
373 #define COPYIN(u, k, s) copyin(u, k, s)
374 #define COPYOUT(k, u, s) copyout(k, u, s)
375 #endif
376 /*
377 * MPSAFE
378 */
379 int
380 ptrace(struct thread *td, struct ptrace_args *uap)
381 {
382 /*
383 * XXX this obfuscation is to reduce stack usage, but the register
384 * structs may be too large to put on the stack anyway.
385 */
386 union {
387 struct ptrace_io_desc piod;
388 struct ptrace_lwpinfo pl;
389 struct dbreg dbreg;
390 struct fpreg fpreg;
391 struct reg reg;
392 #ifdef COMPAT_IA32
393 struct dbreg32 dbreg32;
394 struct fpreg32 fpreg32;
395 struct reg32 reg32;
396 struct ptrace_io_desc32 piod32;
397 #endif
398 } r;
399 void *addr;
400 int error = 0;
401 #ifdef COMPAT_IA32
402 int wrap32 = 0;
403
404 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec)
405 wrap32 = 1;
406 #endif
407 addr = &r;
408 switch (uap->req) {
409 case PT_GETREGS:
410 case PT_GETFPREGS:
411 case PT_GETDBREGS:
412 case PT_LWPINFO:
413 break;
414 case PT_SETREGS:
415 error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
416 break;
417 case PT_SETFPREGS:
418 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
419 break;
420 case PT_SETDBREGS:
421 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
422 break;
423 case PT_IO:
424 error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
425 break;
426 default:
427 addr = uap->addr;
428 break;
429 }
430 if (error)
431 return (error);
432
433 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
434 if (error)
435 return (error);
436
437 switch (uap->req) {
438 case PT_IO:
439 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
440 break;
441 case PT_GETREGS:
442 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
443 break;
444 case PT_GETFPREGS:
445 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
446 break;
447 case PT_GETDBREGS:
448 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
449 break;
450 case PT_LWPINFO:
451 error = copyout(&r.pl, uap->addr, uap->data);
452 break;
453 }
454
455 return (error);
456 }
457 #undef COPYIN
458 #undef COPYOUT
459
460 #ifdef COMPAT_IA32
461 /*
462 * PROC_READ(regs, td2, addr);
463 * becomes either:
464 * proc_read_regs(td2, addr);
465 * or
466 * proc_read_regs32(td2, addr);
467 * .. except this is done at runtime. There is an additional
468 * complication in that PROC_WRITE disallows 32 bit consumers
469 * from writing to 64 bit address space targets.
470 */
471 #define PROC_READ(w, t, a) wrap32 ? \
472 proc_read_ ## w ## 32(t, a) : \
473 proc_read_ ## w (t, a)
474 #define PROC_WRITE(w, t, a) wrap32 ? \
475 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \
476 proc_write_ ## w (t, a)
477 #else
478 #define PROC_READ(w, t, a) proc_read_ ## w (t, a)
479 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a)
480 #endif
481
482 int
483 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
484 {
485 struct iovec iov;
486 struct uio uio;
487 struct proc *curp, *p, *pp;
488 struct thread *td2 = NULL;
489 struct ptrace_io_desc *piod = NULL;
490 struct ptrace_lwpinfo *pl;
491 int error, write, tmp, num;
492 int proctree_locked = 0;
493 lwpid_t tid = 0, *buf;
494 pid_t saved_pid = pid;
495 #ifdef COMPAT_IA32
496 int wrap32 = 0, safe = 0;
497 struct ptrace_io_desc32 *piod32 = NULL;
498 #endif
499
500 curp = td->td_proc;
501
502 /* Lock proctree before locking the process. */
503 switch (req) {
504 case PT_TRACE_ME:
505 case PT_ATTACH:
506 case PT_STEP:
507 case PT_CONTINUE:
508 case PT_TO_SCE:
509 case PT_TO_SCX:
510 case PT_SYSCALL:
511 case PT_DETACH:
512 sx_xlock(&proctree_lock);
513 proctree_locked = 1;
514 break;
515 default:
516 break;
517 }
518
519 write = 0;
520 if (req == PT_TRACE_ME) {
521 p = td->td_proc;
522 PROC_LOCK(p);
523 } else {
524 if (pid <= PID_MAX) {
525 if ((p = pfind(pid)) == NULL) {
526 if (proctree_locked)
527 sx_xunlock(&proctree_lock);
528 return (ESRCH);
529 }
530 } else {
531 /* this is slow, should be optimized */
532 sx_slock(&allproc_lock);
533 FOREACH_PROC_IN_SYSTEM(p) {
534 PROC_LOCK(p);
535 mtx_lock_spin(&sched_lock);
536 FOREACH_THREAD_IN_PROC(p, td2) {
537 if (td2->td_tid == pid)
538 break;
539 }
540 mtx_unlock_spin(&sched_lock);
541 if (td2 != NULL)
542 break; /* proc lock held */
543 PROC_UNLOCK(p);
544 }
545 sx_sunlock(&allproc_lock);
546 if (p == NULL) {
547 if (proctree_locked)
548 sx_xunlock(&proctree_lock);
549 return (ESRCH);
550 }
551 tid = pid;
552 pid = p->p_pid;
553 }
554 }
555 if ((error = p_cansee(td, p)) != 0)
556 goto fail;
557
558 if ((error = p_candebug(td, p)) != 0)
559 goto fail;
560
561 /*
562 * System processes can't be debugged.
563 */
564 if ((p->p_flag & P_SYSTEM) != 0) {
565 error = EINVAL;
566 goto fail;
567 }
568
569 if (tid == 0) {
570 td2 = FIRST_THREAD_IN_PROC(p);
571 tid = td2->td_tid;
572 }
573
574 #ifdef COMPAT_IA32
575 /*
576 * Test if we're a 32 bit client and what the target is.
577 * Set the wrap controls accordingly.
578 */
579 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec) {
580 if (td2->td_proc->p_sysent == &ia32_freebsd_sysvec)
581 safe = 1;
582 wrap32 = 1;
583 }
584 #endif
585 /*
586 * Permissions check
587 */
588 switch (req) {
589 case PT_TRACE_ME:
590 /* Always legal. */
591 break;
592
593 case PT_ATTACH:
594 /* Self */
595 if (p->p_pid == td->td_proc->p_pid) {
596 error = EINVAL;
597 goto fail;
598 }
599
600 /* Already traced */
601 if (p->p_flag & P_TRACED) {
602 error = EBUSY;
603 goto fail;
604 }
605
606 /* Can't trace an ancestor if you're being traced. */
607 if (curp->p_flag & P_TRACED) {
608 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
609 if (pp == p) {
610 error = EINVAL;
611 goto fail;
612 }
613 }
614 }
615
616
617 /* OK */
618 break;
619
620 case PT_CLEARSTEP:
621 /* Allow thread to clear single step for itself */
622 if (td->td_tid == tid)
623 break;
624
625 /* FALLTHROUGH */
626 default:
627 /* not being traced... */
628 if ((p->p_flag & P_TRACED) == 0) {
629 error = EPERM;
630 goto fail;
631 }
632
633 /* not being traced by YOU */
634 if (p->p_pptr != td->td_proc) {
635 error = EBUSY;
636 goto fail;
637 }
638
639 /* not currently stopped */
640 if (!P_SHOULDSTOP(p) || p->p_suspcount != p->p_numthreads ||
641 (p->p_flag & P_WAITED) == 0) {
642 error = EBUSY;
643 goto fail;
644 }
645
646 /* OK */
647 break;
648 }
649
650 #ifdef FIX_SSTEP
651 /*
652 * Single step fixup ala procfs
653 */
654 FIX_SSTEP(td2); /* XXXKSE */
655 #endif
656
657 /*
658 * Actually do the requests
659 */
660
661 td->td_retval[0] = 0;
662
663 switch (req) {
664 case PT_TRACE_ME:
665 /* set my trace flag and "owner" so it can read/write me */
666 p->p_flag |= P_TRACED;
667 p->p_oppid = p->p_pptr->p_pid;
668 PROC_UNLOCK(p);
669 sx_xunlock(&proctree_lock);
670 return (0);
671
672 case PT_ATTACH:
673 /* security check done above */
674 p->p_flag |= P_TRACED;
675 p->p_oppid = p->p_pptr->p_pid;
676 if (p->p_pptr != td->td_proc)
677 proc_reparent(p, td->td_proc);
678 data = SIGSTOP;
679 goto sendsig; /* in PT_CONTINUE below */
680
681 case PT_CLEARSTEP:
682 _PHOLD(p);
683 error = ptrace_clear_single_step(td2);
684 _PRELE(p);
685 if (error)
686 goto fail;
687 PROC_UNLOCK(p);
688 return (0);
689
690 case PT_SETSTEP:
691 _PHOLD(p);
692 error = ptrace_single_step(td2);
693 _PRELE(p);
694 if (error)
695 goto fail;
696 PROC_UNLOCK(p);
697 return (0);
698
699 case PT_SUSPEND:
700 _PHOLD(p);
701 mtx_lock_spin(&sched_lock);
702 td2->td_flags |= TDF_DBSUSPEND;
703 mtx_unlock_spin(&sched_lock);
704 _PRELE(p);
705 PROC_UNLOCK(p);
706 return (0);
707
708 case PT_RESUME:
709 _PHOLD(p);
710 mtx_lock_spin(&sched_lock);
711 td2->td_flags &= ~TDF_DBSUSPEND;
712 mtx_unlock_spin(&sched_lock);
713 _PRELE(p);
714 PROC_UNLOCK(p);
715 return (0);
716
717 case PT_STEP:
718 case PT_CONTINUE:
719 case PT_TO_SCE:
720 case PT_TO_SCX:
721 case PT_SYSCALL:
722 case PT_DETACH:
723 /* Zero means do not send any signal */
724 if (data < 0 || data > _SIG_MAXSIG) {
725 error = EINVAL;
726 goto fail;
727 }
728
729 _PHOLD(p);
730
731 switch (req) {
732 case PT_STEP:
733 PROC_UNLOCK(p);
734 error = ptrace_single_step(td2);
735 if (error) {
736 PRELE(p);
737 goto fail_noproc;
738 }
739 PROC_LOCK(p);
740 break;
741 case PT_TO_SCE:
742 p->p_stops |= S_PT_SCE;
743 break;
744 case PT_TO_SCX:
745 p->p_stops |= S_PT_SCX;
746 break;
747 case PT_SYSCALL:
748 p->p_stops |= S_PT_SCE | S_PT_SCX;
749 break;
750 }
751
752 if (addr != (void *)1) {
753 PROC_UNLOCK(p);
754 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
755 if (error) {
756 PRELE(p);
757 goto fail_noproc;
758 }
759 PROC_LOCK(p);
760 }
761 _PRELE(p);
762
763 if (req == PT_DETACH) {
764 /* reset process parent */
765 if (p->p_oppid != p->p_pptr->p_pid) {
766 struct proc *pp;
767
768 PROC_UNLOCK(p);
769 pp = pfind(p->p_oppid);
770 if (pp == NULL)
771 pp = initproc;
772 else
773 PROC_UNLOCK(pp);
774 PROC_LOCK(p);
775 proc_reparent(p, pp);
776 if (pp == initproc)
777 p->p_sigparent = SIGCHLD;
778 }
779 p->p_flag &= ~(P_TRACED | P_WAITED);
780 p->p_oppid = 0;
781
782 /* should we send SIGCHLD? */
783 }
784
785 sendsig:
786 if (proctree_locked)
787 sx_xunlock(&proctree_lock);
788 /* deliver or queue signal */
789 if (P_SHOULDSTOP(p)) {
790 p->p_xstat = data;
791 mtx_lock_spin(&sched_lock);
792 if (saved_pid <= PID_MAX) {
793 p->p_xthread->td_flags &= ~TDF_XSIG;
794 p->p_xthread->td_xsig = data;
795 } else {
796 td2->td_flags &= ~TDF_XSIG;
797 td2->td_xsig = data;
798 }
799 p->p_xthread = NULL;
800 if (req == PT_DETACH) {
801 struct thread *td3;
802 FOREACH_THREAD_IN_PROC(p, td3)
803 td3->td_flags &= ~TDF_DBSUSPEND;
804 }
805 /*
806 * unsuspend all threads, to not let a thread run,
807 * you should use PT_SUSPEND to suspend it before
808 * continuing process.
809 */
810 mtx_unlock_spin(&sched_lock);
811 thread_continued(p);
812 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG);
813 mtx_lock_spin(&sched_lock);
814 thread_unsuspend(p);
815 mtx_unlock_spin(&sched_lock);
816 } else if (data) {
817 psignal(p, data);
818 }
819 PROC_UNLOCK(p);
820
821 return (0);
822
823 case PT_WRITE_I:
824 case PT_WRITE_D:
825 write = 1;
826 /* FALLTHROUGH */
827 case PT_READ_I:
828 case PT_READ_D:
829 PROC_UNLOCK(p);
830 tmp = 0;
831 /* write = 0 set above */
832 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
833 iov.iov_len = sizeof(int);
834 uio.uio_iov = &iov;
835 uio.uio_iovcnt = 1;
836 uio.uio_offset = (off_t)(uintptr_t)addr;
837 uio.uio_resid = sizeof(int);
838 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
839 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
840 uio.uio_td = td;
841 error = proc_rwmem(p, &uio);
842 if (uio.uio_resid != 0) {
843 /*
844 * XXX proc_rwmem() doesn't currently return ENOSPC,
845 * so I think write() can bogusly return 0.
846 * XXX what happens for short writes? We don't want
847 * to write partial data.
848 * XXX proc_rwmem() returns EPERM for other invalid
849 * addresses. Convert this to EINVAL. Does this
850 * clobber returns of EPERM for other reasons?
851 */
852 if (error == 0 || error == ENOSPC || error == EPERM)
853 error = EINVAL; /* EOF */
854 }
855 if (!write)
856 td->td_retval[0] = tmp;
857 return (error);
858
859 case PT_IO:
860 PROC_UNLOCK(p);
861 #ifdef COMPAT_IA32
862 if (wrap32) {
863 piod32 = addr;
864 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
865 iov.iov_len = piod32->piod_len;
866 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
867 uio.uio_resid = piod32->piod_len;
868 } else
869 #endif
870 {
871 piod = addr;
872 iov.iov_base = piod->piod_addr;
873 iov.iov_len = piod->piod_len;
874 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
875 uio.uio_resid = piod->piod_len;
876 }
877 uio.uio_iov = &iov;
878 uio.uio_iovcnt = 1;
879 uio.uio_segflg = UIO_USERSPACE;
880 uio.uio_td = td;
881 #ifdef COMPAT_IA32
882 tmp = wrap32 ? piod32->piod_op : piod->piod_op;
883 #else
884 tmp = piod->piod_op;
885 #endif
886 switch (tmp) {
887 case PIOD_READ_D:
888 case PIOD_READ_I:
889 uio.uio_rw = UIO_READ;
890 break;
891 case PIOD_WRITE_D:
892 case PIOD_WRITE_I:
893 uio.uio_rw = UIO_WRITE;
894 break;
895 default:
896 return (EINVAL);
897 }
898 error = proc_rwmem(p, &uio);
899 #ifdef COMPAT_IA32
900 if (wrap32)
901 piod32->piod_len -= uio.uio_resid;
902 else
903 #endif
904 piod->piod_len -= uio.uio_resid;
905 return (error);
906
907 case PT_KILL:
908 data = SIGKILL;
909 goto sendsig; /* in PT_CONTINUE above */
910
911 case PT_SETREGS:
912 _PHOLD(p);
913 error = PROC_WRITE(regs, td2, addr);
914 _PRELE(p);
915 PROC_UNLOCK(p);
916 return (error);
917
918 case PT_GETREGS:
919 _PHOLD(p);
920 error = PROC_READ(regs, td2, addr);
921 _PRELE(p);
922 PROC_UNLOCK(p);
923 return (error);
924
925 case PT_SETFPREGS:
926 _PHOLD(p);
927 error = PROC_WRITE(fpregs, td2, addr);
928 _PRELE(p);
929 PROC_UNLOCK(p);
930 return (error);
931
932 case PT_GETFPREGS:
933 _PHOLD(p);
934 error = PROC_READ(fpregs, td2, addr);
935 _PRELE(p);
936 PROC_UNLOCK(p);
937 return (error);
938
939 case PT_SETDBREGS:
940 _PHOLD(p);
941 error = PROC_WRITE(dbregs, td2, addr);
942 _PRELE(p);
943 PROC_UNLOCK(p);
944 return (error);
945
946 case PT_GETDBREGS:
947 _PHOLD(p);
948 error = PROC_READ(dbregs, td2, addr);
949 _PRELE(p);
950 PROC_UNLOCK(p);
951 return (error);
952
953 case PT_LWPINFO:
954 if (data == 0 || data > sizeof(*pl))
955 return (EINVAL);
956 pl = addr;
957 _PHOLD(p);
958 if (saved_pid <= PID_MAX) {
959 pl->pl_lwpid = p->p_xthread->td_tid;
960 pl->pl_event = PL_EVENT_SIGNAL;
961 } else {
962 pl->pl_lwpid = td2->td_tid;
963 if (td2->td_flags & TDF_XSIG)
964 pl->pl_event = PL_EVENT_SIGNAL;
965 else
966 pl->pl_event = 0;
967 }
968 if (td2->td_pflags & TDP_SA) {
969 pl->pl_flags = PL_FLAG_SA;
970 if (td2->td_upcall && !TD_CAN_UNBIND(td2))
971 pl->pl_flags |= PL_FLAG_BOUND;
972 } else {
973 pl->pl_flags = 0;
974 }
975 _PRELE(p);
976 PROC_UNLOCK(p);
977 return (0);
978
979 case PT_GETNUMLWPS:
980 td->td_retval[0] = p->p_numthreads;
981 PROC_UNLOCK(p);
982 return (0);
983
984 case PT_GETLWPLIST:
985 if (data <= 0) {
986 PROC_UNLOCK(p);
987 return (EINVAL);
988 }
989 num = imin(p->p_numthreads, data);
990 PROC_UNLOCK(p);
991 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
992 tmp = 0;
993 PROC_LOCK(p);
994 mtx_lock_spin(&sched_lock);
995 FOREACH_THREAD_IN_PROC(p, td2) {
996 if (tmp >= num)
997 break;
998 buf[tmp++] = td2->td_tid;
999 }
1000 mtx_unlock_spin(&sched_lock);
1001 PROC_UNLOCK(p);
1002 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
1003 free(buf, M_TEMP);
1004 if (!error)
1005 td->td_retval[0] = num;
1006 return (error);
1007
1008 default:
1009 #ifdef __HAVE_PTRACE_MACHDEP
1010 if (req >= PT_FIRSTMACH) {
1011 _PHOLD(p);
1012 PROC_UNLOCK(p);
1013 error = cpu_ptrace(td2, req, addr, data);
1014 PRELE(p);
1015 return (error);
1016 }
1017 #endif
1018 break;
1019 }
1020
1021 /* Unknown request. */
1022 error = EINVAL;
1023
1024 fail:
1025 PROC_UNLOCK(p);
1026 fail_noproc:
1027 if (proctree_locked)
1028 sx_xunlock(&proctree_lock);
1029 return (error);
1030 }
1031 #undef PROC_READ
1032 #undef PROC_WRITE
1033
1034 /*
1035 * Stop a process because of a debugging event;
1036 * stay stopped until p->p_step is cleared
1037 * (cleared by PIOCCONT in procfs).
1038 */
1039 void
1040 stopevent(struct proc *p, unsigned int event, unsigned int val)
1041 {
1042
1043 PROC_LOCK_ASSERT(p, MA_OWNED);
1044 p->p_step = 1;
1045 do {
1046 p->p_xstat = val;
1047 p->p_xthread = NULL;
1048 p->p_stype = event; /* Which event caused the stop? */
1049 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
1050 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
1051 } while (p->p_step);
1052 }
Cache object: 1f3d3194bea0da1782dcedb6038a17ba
|