1 /*
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
32 * $DragonFly: src/sys/kern/sys_process.c,v 1.30 2007/02/19 01:14:23 corecode Exp $
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
44
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
49
50 #include <sys/user.h>
51 #include <vfs/procfs/procfs.h>
52
53 #include <sys/thread2.h>
54 #include <sys/spinlock2.h>
55
56 /* use the equivalent procfs code */
57 #if 0
58 static int
59 pread (struct proc *procp, unsigned int addr, unsigned int *retval) {
60 int rv;
61 vm_map_t map, tmap;
62 vm_object_t object;
63 vm_offset_t kva = 0;
64 int page_offset; /* offset into page */
65 vm_offset_t pageno; /* page number */
66 vm_map_entry_t out_entry;
67 vm_prot_t out_prot;
68 boolean_t wired;
69 vm_pindex_t pindex;
70
71 /* Map page into kernel space */
72
73 map = &procp->p_vmspace->vm_map;
74
75 page_offset = addr - trunc_page(addr);
76 pageno = trunc_page(addr);
77
78 tmap = map;
79 rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
80 &object, &pindex, &out_prot, &wired);
81
82 if (rv != KERN_SUCCESS)
83 return EINVAL;
84
85 vm_map_lookup_done (tmap, out_entry, 0);
86
87 /* Find space in kernel_map for the page we're interested in */
88 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex),
89 &kva,
90 PAGE_SIZE, PAGE_SIZE,
91 0, VM_MAPTYPE_NORMAL,
92 VM_PROT_ALL, VM_PROT_ALL,
93 0);
94
95 if (!rv) {
96 vm_object_reference XXX (object);
97
98 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
99 if (!rv) {
100 *retval = 0;
101 bcopy ((caddr_t)kva + page_offset,
102 retval, sizeof *retval);
103 }
104 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
105 }
106
107 return rv;
108 }
109
110 static int
111 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) {
112 int rv;
113 vm_map_t map, tmap;
114 vm_object_t object;
115 vm_offset_t kva = 0;
116 int page_offset; /* offset into page */
117 vm_offset_t pageno; /* page number */
118 vm_map_entry_t out_entry;
119 vm_prot_t out_prot;
120 boolean_t wired;
121 vm_pindex_t pindex;
122 boolean_t fix_prot = 0;
123
124 /* Map page into kernel space */
125
126 map = &procp->p_vmspace->vm_map;
127
128 page_offset = addr - trunc_page(addr);
129 pageno = trunc_page(addr);
130
131 /*
132 * Check the permissions for the area we're interested in.
133 */
134
135 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
136 VM_PROT_WRITE, FALSE) == FALSE) {
137 /*
138 * If the page was not writable, we make it so.
139 * XXX It is possible a page may *not* be read/executable,
140 * if a process changes that!
141 */
142 fix_prot = 1;
143 /* The page isn't writable, so let's try making it so... */
144 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
145 VM_PROT_ALL, 0)) != KERN_SUCCESS)
146 return EFAULT; /* I guess... */
147 }
148
149 /*
150 * Now we need to get the page. out_entry, out_prot, wired, and
151 * single_use aren't used. One would think the vm code would be
152 * a *bit* nicer... We use tmap because vm_map_lookup() can
153 * change the map argument.
154 */
155
156 tmap = map;
157 rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
158 &object, &pindex, &out_prot, &wired);
159 if (rv != KERN_SUCCESS)
160 return EINVAL;
161
162 /*
163 * Okay, we've got the page. Let's release tmap.
164 */
165 vm_map_lookup_done (tmap, out_entry, 0);
166
167 /*
168 * Fault the page in...
169 */
170 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
171 if (rv != KERN_SUCCESS)
172 return EFAULT;
173
174 /* Find space in kernel_map for the page we're interested in */
175 rv = vm_map_find (&kernel_map, object, IDX_TO_OFF(pindex),
176 &kva,
177 PAGE_SIZE, PAGE_SIZE,
178 0, VM_MAPTYPE_NORMAL,
179 VM_PROT_ALL, VM_PROT_ALL,
180 0);
181 if (!rv) {
182 vm_object_reference XXX (object);
183
184 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
185 if (!rv) {
186 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
187 }
188 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
189 }
190
191 if (fix_prot)
192 vm_map_protect (map, pageno, pageno + PAGE_SIZE,
193 VM_PROT_READ|VM_PROT_EXECUTE, 0);
194 return rv;
195 }
196 #endif
197
198 /*
199 * Process debugging system call.
200 *
201 * MPALMOSTSAFE
202 */
203 int
204 sys_ptrace(struct ptrace_args *uap)
205 {
206 struct proc *p = curproc;
207
208 /*
209 * XXX this obfuscation is to reduce stack usage, but the register
210 * structs may be too large to put on the stack anyway.
211 */
212 union {
213 struct ptrace_io_desc piod;
214 struct dbreg dbreg;
215 struct fpreg fpreg;
216 struct reg reg;
217 } r;
218 void *addr;
219 int error = 0;
220
221 addr = &r;
222 switch (uap->req) {
223 case PT_GETREGS:
224 case PT_GETFPREGS:
225 #ifdef PT_GETDBREGS
226 case PT_GETDBREGS:
227 #endif
228 break;
229 case PT_SETREGS:
230 error = copyin(uap->addr, &r.reg, sizeof r.reg);
231 break;
232 case PT_SETFPREGS:
233 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
234 break;
235 #ifdef PT_SETDBREGS
236 case PT_SETDBREGS:
237 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
238 break;
239 #endif
240 case PT_IO:
241 error = copyin(uap->addr, &r.piod, sizeof r.piod);
242 break;
243 default:
244 addr = uap->addr;
245 }
246 if (error)
247 return (error);
248
249 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
250 &uap->sysmsg_result);
251 if (error)
252 return (error);
253
254 switch (uap->req) {
255 case PT_IO:
256 (void)copyout(&r.piod, uap->addr, sizeof r.piod);
257 break;
258 case PT_GETREGS:
259 error = copyout(&r.reg, uap->addr, sizeof r.reg);
260 break;
261 case PT_GETFPREGS:
262 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
263 break;
264 #ifdef PT_GETDBREGS
265 case PT_GETDBREGS:
266 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
267 break;
268 #endif
269 }
270
271 return (error);
272 }
273
274 int
275 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
276 int data, int *res)
277 {
278 struct proc *p, *pp;
279 struct lwp *lp;
280 struct iovec iov;
281 struct uio uio;
282 struct ptrace_io_desc *piod;
283 int error = 0;
284 int write, tmp;
285 int t;
286
287 write = 0;
288 if (req == PT_TRACE_ME) {
289 p = curp;
290 PHOLD(p);
291 } else {
292 if ((p = pfind(pid)) == NULL)
293 return ESRCH;
294 }
295 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
296 PRELE(p);
297 return (ESRCH);
298 }
299
300 lwkt_gettoken(&p->p_token);
301 /* Can't trace a process that's currently exec'ing. */
302 if ((p->p_flags & P_INEXEC) != 0) {
303 lwkt_reltoken(&p->p_token);
304 PRELE(p);
305 return EAGAIN;
306 }
307
308 /*
309 * Permissions check
310 */
311 switch (req) {
312 case PT_TRACE_ME:
313 /* Always legal. */
314 break;
315
316 case PT_ATTACH:
317 /* Self */
318 if (p->p_pid == curp->p_pid) {
319 lwkt_reltoken(&p->p_token);
320 PRELE(p);
321 return EINVAL;
322 }
323
324 /* Already traced */
325 if (p->p_flags & P_TRACED) {
326 lwkt_reltoken(&p->p_token);
327 PRELE(p);
328 return EBUSY;
329 }
330
331 if (curp->p_flags & P_TRACED)
332 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
333 if (pp == p) {
334 lwkt_reltoken(&p->p_token);
335 PRELE(p);
336 return (EINVAL);
337 }
338
339 /* not owned by you, has done setuid (unless you're root) */
340 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
341 (p->p_flags & P_SUGID)) {
342 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
343 lwkt_reltoken(&p->p_token);
344 PRELE(p);
345 return error;
346 }
347 }
348
349 /* can't trace init when securelevel > 0 */
350 if (securelevel > 0 && p->p_pid == 1) {
351 lwkt_reltoken(&p->p_token);
352 PRELE(p);
353 return EPERM;
354 }
355
356 /* OK */
357 break;
358
359 case PT_READ_I:
360 case PT_READ_D:
361 case PT_WRITE_I:
362 case PT_WRITE_D:
363 case PT_IO:
364 case PT_CONTINUE:
365 case PT_KILL:
366 case PT_STEP:
367 case PT_DETACH:
368 #ifdef PT_GETREGS
369 case PT_GETREGS:
370 #endif
371 #ifdef PT_SETREGS
372 case PT_SETREGS:
373 #endif
374 #ifdef PT_GETFPREGS
375 case PT_GETFPREGS:
376 #endif
377 #ifdef PT_SETFPREGS
378 case PT_SETFPREGS:
379 #endif
380 #ifdef PT_GETDBREGS
381 case PT_GETDBREGS:
382 #endif
383 #ifdef PT_SETDBREGS
384 case PT_SETDBREGS:
385 #endif
386 /* not being traced... */
387 if ((p->p_flags & P_TRACED) == 0) {
388 lwkt_reltoken(&p->p_token);
389 PRELE(p);
390 return EPERM;
391 }
392
393 /* not being traced by YOU */
394 if (p->p_pptr != curp) {
395 lwkt_reltoken(&p->p_token);
396 PRELE(p);
397 return EBUSY;
398 }
399
400 /* not currently stopped */
401 if (p->p_stat != SSTOP ||
402 (p->p_flags & P_WAITED) == 0) {
403 lwkt_reltoken(&p->p_token);
404 PRELE(p);
405 return EBUSY;
406 }
407
408 /* OK */
409 break;
410
411 default:
412 lwkt_reltoken(&p->p_token);
413 PRELE(p);
414 return EINVAL;
415 }
416
417 /* XXX lwp */
418 lp = FIRST_LWP_IN_PROC(p);
419 #ifdef FIX_SSTEP
420 /*
421 * Single step fixup ala procfs
422 */
423 FIX_SSTEP(lp);
424 #endif
425
426 /*
427 * Actually do the requests
428 */
429
430 *res = 0;
431
432 switch (req) {
433 case PT_TRACE_ME:
434 /* set my trace flag and "owner" so it can read/write me */
435 p->p_flags |= P_TRACED;
436 p->p_oppid = p->p_pptr->p_pid;
437 lwkt_reltoken(&p->p_token);
438 PRELE(p);
439 return 0;
440
441 case PT_ATTACH:
442 /* security check done above */
443 p->p_flags |= P_TRACED;
444 p->p_oppid = p->p_pptr->p_pid;
445 proc_reparent(p, curp);
446 data = SIGSTOP;
447 goto sendsig; /* in PT_CONTINUE below */
448
449 case PT_STEP:
450 case PT_CONTINUE:
451 case PT_DETACH:
452 /* Zero means do not send any signal */
453 if (data < 0 || data > _SIG_MAXSIG) {
454 lwkt_reltoken(&p->p_token);
455 PRELE(p);
456 return EINVAL;
457 }
458
459 LWPHOLD(lp);
460
461 if (req == PT_STEP) {
462 if ((error = ptrace_single_step (lp))) {
463 LWPRELE(lp);
464 lwkt_reltoken(&p->p_token);
465 PRELE(p);
466 return error;
467 }
468 }
469
470 if (addr != (void *)1) {
471 if ((error = ptrace_set_pc (lp,
472 (u_long)(uintfptr_t)addr))) {
473 LWPRELE(lp);
474 lwkt_reltoken(&p->p_token);
475 PRELE(p);
476 return error;
477 }
478 }
479 LWPRELE(lp);
480
481 if (req == PT_DETACH) {
482 /* reset process parent */
483 if (p->p_oppid != p->p_pptr->p_pid) {
484 struct proc *pp;
485
486 pp = pfind(p->p_oppid);
487 if (pp) {
488 proc_reparent(p, pp);
489 PRELE(pp);
490 }
491 }
492
493 p->p_flags &= ~(P_TRACED | P_WAITED);
494 p->p_oppid = 0;
495
496 /* should we send SIGCHLD? */
497 }
498
499 sendsig:
500 /*
501 * Deliver or queue signal. If the process is stopped
502 * force it to be SACTIVE again.
503 */
504 crit_enter();
505 if (p->p_stat == SSTOP) {
506 p->p_xstat = data;
507 proc_unstop(p);
508 } else if (data) {
509 ksignal(p, data);
510 }
511 crit_exit();
512 lwkt_reltoken(&p->p_token);
513 PRELE(p);
514 return 0;
515
516 case PT_WRITE_I:
517 case PT_WRITE_D:
518 write = 1;
519 /* fallthrough */
520 case PT_READ_I:
521 case PT_READ_D:
522 /*
523 * NOTE! uio_offset represents the offset in the target
524 * process. The iov is in the current process (the guy
525 * making the ptrace call) so uio_td must be the current
526 * process (though for a SYSSPACE transfer it doesn't
527 * really matter).
528 */
529 tmp = 0;
530 /* write = 0 set above */
531 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
532 iov.iov_len = sizeof(int);
533 uio.uio_iov = &iov;
534 uio.uio_iovcnt = 1;
535 uio.uio_offset = (off_t)(uintptr_t)addr;
536 uio.uio_resid = sizeof(int);
537 uio.uio_segflg = UIO_SYSSPACE;
538 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
539 uio.uio_td = curthread;
540 error = procfs_domem(curp, lp, NULL, &uio);
541 if (uio.uio_resid != 0) {
542 /*
543 * XXX procfs_domem() doesn't currently return ENOSPC,
544 * so I think write() can bogusly return 0.
545 * XXX what happens for short writes? We don't want
546 * to write partial data.
547 * XXX procfs_domem() returns EPERM for other invalid
548 * addresses. Convert this to EINVAL. Does this
549 * clobber returns of EPERM for other reasons?
550 */
551 if (error == 0 || error == ENOSPC || error == EPERM)
552 error = EINVAL; /* EOF */
553 }
554 if (!write)
555 *res = tmp;
556 lwkt_reltoken(&p->p_token);
557 PRELE(p);
558 return (error);
559
560 case PT_IO:
561 /*
562 * NOTE! uio_offset represents the offset in the target
563 * process. The iov is in the current process (the guy
564 * making the ptrace call) so uio_td must be the current
565 * process.
566 */
567 piod = addr;
568 iov.iov_base = piod->piod_addr;
569 iov.iov_len = piod->piod_len;
570 uio.uio_iov = &iov;
571 uio.uio_iovcnt = 1;
572 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
573 uio.uio_resid = piod->piod_len;
574 uio.uio_segflg = UIO_USERSPACE;
575 uio.uio_td = curthread;
576 switch (piod->piod_op) {
577 case PIOD_READ_D:
578 case PIOD_READ_I:
579 uio.uio_rw = UIO_READ;
580 break;
581 case PIOD_WRITE_D:
582 case PIOD_WRITE_I:
583 uio.uio_rw = UIO_WRITE;
584 break;
585 default:
586 lwkt_reltoken(&p->p_token);
587 PRELE(p);
588 return (EINVAL);
589 }
590 error = procfs_domem(curp, lp, NULL, &uio);
591 piod->piod_len -= uio.uio_resid;
592 lwkt_reltoken(&p->p_token);
593 PRELE(p);
594 return (error);
595
596 case PT_KILL:
597 data = SIGKILL;
598 goto sendsig; /* in PT_CONTINUE above */
599
600 #ifdef PT_SETREGS
601 case PT_SETREGS:
602 write = 1;
603 /* fallthrough */
604 #endif /* PT_SETREGS */
605 #ifdef PT_GETREGS
606 case PT_GETREGS:
607 /* write = 0 above */
608 #endif /* PT_SETREGS */
609 #if defined(PT_SETREGS) || defined(PT_GETREGS)
610 if (!procfs_validregs(lp)) { /* no P_SYSTEM procs please */
611 lwkt_reltoken(&p->p_token);
612 PRELE(p);
613 return EINVAL;
614 } else {
615 iov.iov_base = addr;
616 iov.iov_len = sizeof(struct reg);
617 uio.uio_iov = &iov;
618 uio.uio_iovcnt = 1;
619 uio.uio_offset = 0;
620 uio.uio_resid = sizeof(struct reg);
621 uio.uio_segflg = UIO_SYSSPACE;
622 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
623 uio.uio_td = curthread;
624 t = procfs_doregs(curp, lp, NULL, &uio);
625 lwkt_reltoken(&p->p_token);
626 PRELE(p);
627 return t;
628 }
629 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
630
631 #ifdef PT_SETFPREGS
632 case PT_SETFPREGS:
633 write = 1;
634 /* fallthrough */
635 #endif /* PT_SETFPREGS */
636 #ifdef PT_GETFPREGS
637 case PT_GETFPREGS:
638 /* write = 0 above */
639 #endif /* PT_SETFPREGS */
640 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
641 if (!procfs_validfpregs(lp)) { /* no P_SYSTEM procs please */
642 lwkt_reltoken(&p->p_token);
643 PRELE(p);
644 return EINVAL;
645 } else {
646 iov.iov_base = addr;
647 iov.iov_len = sizeof(struct fpreg);
648 uio.uio_iov = &iov;
649 uio.uio_iovcnt = 1;
650 uio.uio_offset = 0;
651 uio.uio_resid = sizeof(struct fpreg);
652 uio.uio_segflg = UIO_SYSSPACE;
653 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
654 uio.uio_td = curthread;
655 t = procfs_dofpregs(curp, lp, NULL, &uio);
656 lwkt_reltoken(&p->p_token);
657 PRELE(p);
658 return t;
659 }
660 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
661
662 #ifdef PT_SETDBREGS
663 case PT_SETDBREGS:
664 write = 1;
665 /* fallthrough */
666 #endif /* PT_SETDBREGS */
667 #ifdef PT_GETDBREGS
668 case PT_GETDBREGS:
669 /* write = 0 above */
670 #endif /* PT_SETDBREGS */
671 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
672 if (!procfs_validdbregs(lp)) { /* no P_SYSTEM procs please */
673 lwkt_reltoken(&p->p_token);
674 PRELE(p);
675 return EINVAL;
676 } else {
677 iov.iov_base = addr;
678 iov.iov_len = sizeof(struct dbreg);
679 uio.uio_iov = &iov;
680 uio.uio_iovcnt = 1;
681 uio.uio_offset = 0;
682 uio.uio_resid = sizeof(struct dbreg);
683 uio.uio_segflg = UIO_SYSSPACE;
684 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
685 uio.uio_td = curthread;
686 t = procfs_dodbregs(curp, lp, NULL, &uio);
687 lwkt_reltoken(&p->p_token);
688 PRELE(p);
689 return t;
690 }
691 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
692
693 default:
694 break;
695 }
696
697 lwkt_reltoken(&p->p_token);
698 PRELE(p);
699
700 return 0;
701 }
702
703 int
704 trace_req(struct proc *p)
705 {
706 return 1;
707 }
708
709 /*
710 * stopevent()
711 *
712 * Stop a process because of a procfs event. Stay stopped until p->p_step
713 * is cleared (cleared by PIOCCONT in procfs).
714 *
715 * MPSAFE
716 */
717 void
718 stopevent(struct proc *p, unsigned int event, unsigned int val)
719 {
720 /*
721 * Set event info. Recheck p_stops in case we are
722 * racing a close() on procfs.
723 */
724 spin_lock(&p->p_spin);
725 if ((p->p_stops & event) == 0) {
726 spin_unlock(&p->p_spin);
727 return;
728 }
729 p->p_xstat = val;
730 p->p_stype = event;
731 p->p_step = 1;
732 tsleep_interlock(&p->p_step, 0);
733 spin_unlock(&p->p_spin);
734
735 /*
736 * Wakeup any PIOCWAITing procs and wait for p_step to
737 * be cleared.
738 */
739 for (;;) {
740 wakeup(&p->p_stype);
741 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
742 spin_lock(&p->p_spin);
743 if (p->p_step == 0) {
744 spin_unlock(&p->p_spin);
745 break;
746 }
747 tsleep_interlock(&p->p_step, 0);
748 spin_unlock(&p->p_spin);
749 }
750 }
751
Cache object: bf964dbfdf99fc8eb1a5bb7aee3fe7e0
|