FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_synch.c
1 /* $OpenBSD: kern_synch.c,v 1.190 2022/08/14 01:58:27 jsg Exp $ */
2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */
3
4 /*
5 * Copyright (c) 1982, 1986, 1990, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94
38 */
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/signalvar.h>
45 #include <sys/sched.h>
46 #include <sys/timeout.h>
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 #include <sys/refcnt.h>
50 #include <sys/atomic.h>
51 #include <sys/tracepoint.h>
52
53 #include <ddb/db_output.h>
54
55 #include <machine/spinlock.h>
56
57 #ifdef DIAGNOSTIC
58 #include <sys/syslog.h>
59 #endif
60
61 #ifdef KTRACE
62 #include <sys/ktrace.h>
63 #endif
64
65 int sleep_signal_check(void);
66 int thrsleep(struct proc *, struct sys___thrsleep_args *);
67 int thrsleep_unlock(void *);
68
69 /*
70 * We're only looking at 7 bits of the address; everything is
71 * aligned to 4, lots of things are aligned to greater powers
72 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
73 */
74 #define TABLESIZE 128
75 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1))
76 TAILQ_HEAD(slpque,proc) slpque[TABLESIZE];
77
78 void
79 sleep_queue_init(void)
80 {
81 int i;
82
83 for (i = 0; i < TABLESIZE; i++)
84 TAILQ_INIT(&slpque[i]);
85 }
86
87 /*
88 * Global sleep channel for threads that do not want to
89 * receive wakeup(9) broadcasts.
90 */
91 int nowake;
92
93 /*
94 * During autoconfiguration or after a panic, a sleep will simply
95 * lower the priority briefly to allow interrupts, then return.
96 * The priority to be used (safepri) is machine-dependent, thus this
97 * value is initialized and maintained in the machine-dependent layers.
98 * This priority will typically be 0, or the lowest priority
99 * that is safe for use on the interrupt stack; it can be made
100 * higher to block network software interrupts after panics.
101 */
102 extern int safepri;
103
104 /*
105 * General sleep call. Suspends the current process until a wakeup is
106 * performed on the specified identifier. The process will then be made
107 * runnable with the specified priority. Sleeps at most timo/hz seconds
108 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
109 * before and after sleeping, else signals are not checked. Returns 0 if
110 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
111 * signal needs to be delivered, ERESTART is returned if the current system
112 * call should be restarted if possible, and EINTR is returned if the system
113 * call should be interrupted by the signal (return EINTR).
114 */
115 int
116 tsleep(const volatile void *ident, int priority, const char *wmesg, int timo)
117 {
118 struct sleep_state sls;
119 #ifdef MULTIPROCESSOR
120 int hold_count;
121 #endif
122
123 KASSERT((priority & ~(PRIMASK | PCATCH)) == 0);
124 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
125
126 #ifdef MULTIPROCESSOR
127 KASSERT(timo || _kernel_lock_held());
128 #endif
129
130 #ifdef DDB
131 if (cold == 2)
132 db_stack_dump();
133 #endif
134 if (cold || panicstr) {
135 int s;
136 /*
137 * After a panic, or during autoconfiguration,
138 * just give interrupts a chance, then just return;
139 * don't run any other procs or panic below,
140 * in case this is the idle process and already asleep.
141 */
142 s = splhigh();
143 splx(safepri);
144 #ifdef MULTIPROCESSOR
145 if (_kernel_lock_held()) {
146 hold_count = __mp_release_all(&kernel_lock);
147 __mp_acquire_count(&kernel_lock, hold_count);
148 }
149 #endif
150 splx(s);
151 return (0);
152 }
153
154 sleep_setup(&sls, ident, priority, wmesg, timo);
155 return sleep_finish(&sls, 1);
156 }
157
158 int
159 tsleep_nsec(const volatile void *ident, int priority, const char *wmesg,
160 uint64_t nsecs)
161 {
162 uint64_t to_ticks;
163
164 if (nsecs == INFSLP)
165 return tsleep(ident, priority, wmesg, 0);
166 #ifdef DIAGNOSTIC
167 if (nsecs == 0) {
168 log(LOG_WARNING,
169 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
170 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
171 wmesg);
172 }
173 #endif
174 /*
175 * We want to sleep at least nsecs nanoseconds worth of ticks.
176 *
177 * - Clamp nsecs to prevent arithmetic overflow.
178 *
179 * - Round nsecs up to account for any nanoseconds that do not
180 * divide evenly into tick_nsec, otherwise we'll lose them to
181 * integer division in the next step. We add (tick_nsec - 1)
182 * to keep from introducing a spurious tick if there are no
183 * such nanoseconds, i.e. nsecs % tick_nsec == 0.
184 *
185 * - Divide the rounded value to a count of ticks. We divide
186 * by (tick_nsec + 1) to discard the extra tick introduced if,
187 * before rounding, nsecs % tick_nsec == 1.
188 *
189 * - Finally, add a tick to the result. We need to wait out
190 * the current tick before we can begin counting our interval,
191 * as we do not know how much time has elapsed since the
192 * current tick began.
193 */
194 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
195 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
196 if (to_ticks > INT_MAX)
197 to_ticks = INT_MAX;
198 return tsleep(ident, priority, wmesg, (int)to_ticks);
199 }
200
201 /*
202 * Same as tsleep, but if we have a mutex provided, then once we've
203 * entered the sleep queue we drop the mutex. After sleeping we re-lock.
204 */
205 int
206 msleep(const volatile void *ident, struct mutex *mtx, int priority,
207 const char *wmesg, int timo)
208 {
209 struct sleep_state sls;
210 int error, spl;
211 #ifdef MULTIPROCESSOR
212 int hold_count;
213 #endif
214
215 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
216 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
217 KASSERT(mtx != NULL);
218
219 #ifdef DDB
220 if (cold == 2)
221 db_stack_dump();
222 #endif
223 if (cold || panicstr) {
224 /*
225 * After a panic, or during autoconfiguration,
226 * just give interrupts a chance, then just return;
227 * don't run any other procs or panic below,
228 * in case this is the idle process and already asleep.
229 */
230 spl = MUTEX_OLDIPL(mtx);
231 MUTEX_OLDIPL(mtx) = safepri;
232 mtx_leave(mtx);
233 #ifdef MULTIPROCESSOR
234 if (_kernel_lock_held()) {
235 hold_count = __mp_release_all(&kernel_lock);
236 __mp_acquire_count(&kernel_lock, hold_count);
237 }
238 #endif
239 if ((priority & PNORELOCK) == 0) {
240 mtx_enter(mtx);
241 MUTEX_OLDIPL(mtx) = spl;
242 } else
243 splx(spl);
244 return (0);
245 }
246
247 sleep_setup(&sls, ident, priority, wmesg, timo);
248
249 /* XXX - We need to make sure that the mutex doesn't
250 * unblock splsched. This can be made a bit more
251 * correct when the sched_lock is a mutex.
252 */
253 spl = MUTEX_OLDIPL(mtx);
254 MUTEX_OLDIPL(mtx) = splsched();
255 mtx_leave(mtx);
256 /* signal may stop the process, release mutex before that */
257 error = sleep_finish(&sls, 1);
258
259 if ((priority & PNORELOCK) == 0) {
260 mtx_enter(mtx);
261 MUTEX_OLDIPL(mtx) = spl; /* put the ipl back */
262 } else
263 splx(spl);
264
265 return error;
266 }
267
268 int
269 msleep_nsec(const volatile void *ident, struct mutex *mtx, int priority,
270 const char *wmesg, uint64_t nsecs)
271 {
272 uint64_t to_ticks;
273
274 if (nsecs == INFSLP)
275 return msleep(ident, mtx, priority, wmesg, 0);
276 #ifdef DIAGNOSTIC
277 if (nsecs == 0) {
278 log(LOG_WARNING,
279 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
280 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
281 wmesg);
282 }
283 #endif
284 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
285 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
286 if (to_ticks > INT_MAX)
287 to_ticks = INT_MAX;
288 return msleep(ident, mtx, priority, wmesg, (int)to_ticks);
289 }
290
291 /*
292 * Same as tsleep, but if we have a rwlock provided, then once we've
293 * entered the sleep queue we drop the it. After sleeping we re-lock.
294 */
295 int
296 rwsleep(const volatile void *ident, struct rwlock *rwl, int priority,
297 const char *wmesg, int timo)
298 {
299 struct sleep_state sls;
300 int error, status;
301
302 KASSERT((priority & ~(PRIMASK | PCATCH | PNORELOCK)) == 0);
303 KASSERT(ident != &nowake || ISSET(priority, PCATCH) || timo != 0);
304 rw_assert_anylock(rwl);
305 status = rw_status(rwl);
306
307 sleep_setup(&sls, ident, priority, wmesg, timo);
308
309 rw_exit(rwl);
310 /* signal may stop the process, release rwlock before that */
311 error = sleep_finish(&sls, 1);
312
313 if ((priority & PNORELOCK) == 0)
314 rw_enter(rwl, status);
315
316 return error;
317 }
318
319 int
320 rwsleep_nsec(const volatile void *ident, struct rwlock *rwl, int priority,
321 const char *wmesg, uint64_t nsecs)
322 {
323 uint64_t to_ticks;
324
325 if (nsecs == INFSLP)
326 return rwsleep(ident, rwl, priority, wmesg, 0);
327 #ifdef DIAGNOSTIC
328 if (nsecs == 0) {
329 log(LOG_WARNING,
330 "%s: %s[%d]: %s: trying to sleep zero nanoseconds\n",
331 __func__, curproc->p_p->ps_comm, curproc->p_p->ps_pid,
332 wmesg);
333 }
334 #endif
335 nsecs = MIN(nsecs, UINT64_MAX - tick_nsec);
336 to_ticks = (nsecs + tick_nsec - 1) / (tick_nsec + 1) + 1;
337 if (to_ticks > INT_MAX)
338 to_ticks = INT_MAX;
339 return rwsleep(ident, rwl, priority, wmesg, (int)to_ticks);
340 }
341
342 void
343 sleep_setup(struct sleep_state *sls, const volatile void *ident, int prio,
344 const char *wmesg, int timo)
345 {
346 struct proc *p = curproc;
347
348 #ifdef DIAGNOSTIC
349 if (p->p_flag & P_CANTSLEEP)
350 panic("sleep: %s failed insomnia", p->p_p->ps_comm);
351 if (ident == NULL)
352 panic("tsleep: no ident");
353 if (p->p_stat != SONPROC)
354 panic("tsleep: not SONPROC");
355 #endif
356
357 sls->sls_catch = prio & PCATCH;
358 sls->sls_timeout = 0;
359
360 SCHED_LOCK(sls->sls_s);
361
362 TRACEPOINT(sched, sleep, NULL);
363
364 p->p_wchan = ident;
365 p->p_wmesg = wmesg;
366 p->p_slptime = 0;
367 p->p_slppri = prio & PRIMASK;
368 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_runq);
369
370 if (timo) {
371 KASSERT((p->p_flag & P_TIMEOUT) == 0);
372 sls->sls_timeout = 1;
373 timeout_add(&p->p_sleep_to, timo);
374 }
375 }
376
377 int
378 sleep_finish(struct sleep_state *sls, int do_sleep)
379 {
380 struct proc *p = curproc;
381 int error = 0, error1 = 0;
382
383 if (sls->sls_catch != 0) {
384 /*
385 * We put ourselves on the sleep queue and start our
386 * timeout before calling sleep_signal_check(), as we could
387 * stop there, and a wakeup or a SIGCONT (or both) could
388 * occur while we were stopped. A SIGCONT would cause
389 * us to be marked as SSLEEP without resuming us, thus
390 * we must be ready for sleep when sleep_signal_check() is
391 * called.
392 * If the wakeup happens while we're stopped, p->p_wchan
393 * will be NULL upon return from sleep_signal_check(). In
394 * that case we need to unwind immediately.
395 */
396 atomic_setbits_int(&p->p_flag, P_SINTR);
397 if ((error = sleep_signal_check()) != 0) {
398 p->p_stat = SONPROC;
399 sls->sls_catch = 0;
400 do_sleep = 0;
401 } else if (p->p_wchan == NULL) {
402 sls->sls_catch = 0;
403 do_sleep = 0;
404 }
405 }
406
407 if (do_sleep) {
408 p->p_stat = SSLEEP;
409 p->p_ru.ru_nvcsw++;
410 SCHED_ASSERT_LOCKED();
411 mi_switch();
412 } else {
413 unsleep(p);
414 }
415
416 #ifdef DIAGNOSTIC
417 if (p->p_stat != SONPROC)
418 panic("sleep_finish !SONPROC");
419 #endif
420
421 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
422 SCHED_UNLOCK(sls->sls_s);
423
424 /*
425 * Even though this belongs to the signal handling part of sleep,
426 * we need to clear it before the ktrace.
427 */
428 atomic_clearbits_int(&p->p_flag, P_SINTR);
429
430 if (sls->sls_timeout) {
431 if (p->p_flag & P_TIMEOUT) {
432 error1 = EWOULDBLOCK;
433 } else {
434 /* This can sleep. It must not use timeouts. */
435 timeout_del_barrier(&p->p_sleep_to);
436 }
437 atomic_clearbits_int(&p->p_flag, P_TIMEOUT);
438 }
439
440 /* Check if thread was woken up because of a unwind or signal */
441 if (sls->sls_catch != 0)
442 error = sleep_signal_check();
443
444 /* Signal errors are higher priority than timeouts. */
445 if (error == 0 && error1 != 0)
446 error = error1;
447
448 return error;
449 }
450
451 /*
452 * Check and handle signals and suspensions around a sleep cycle.
453 */
454 int
455 sleep_signal_check(void)
456 {
457 struct proc *p = curproc;
458 struct sigctx ctx;
459 int err, sig;
460
461 if ((err = single_thread_check(p, 1)) != 0)
462 return err;
463 if ((sig = cursig(p, &ctx)) != 0) {
464 if (ctx.sig_intr)
465 return EINTR;
466 else
467 return ERESTART;
468 }
469 return 0;
470 }
471
472 int
473 wakeup_proc(struct proc *p, const volatile void *chan)
474 {
475 int s, awakened = 0;
476
477 SCHED_LOCK(s);
478 if (p->p_wchan != NULL &&
479 ((chan == NULL) || (p->p_wchan == chan))) {
480 awakened = 1;
481 if (p->p_stat == SSLEEP)
482 setrunnable(p);
483 else
484 unsleep(p);
485 }
486 SCHED_UNLOCK(s);
487
488 return awakened;
489 }
490
491
492 /*
493 * Implement timeout for tsleep.
494 * If process hasn't been awakened (wchan non-zero),
495 * set timeout flag and undo the sleep. If proc
496 * is stopped, just unsleep so it will remain stopped.
497 */
498 void
499 endtsleep(void *arg)
500 {
501 struct proc *p = arg;
502 int s;
503
504 SCHED_LOCK(s);
505 if (wakeup_proc(p, NULL))
506 atomic_setbits_int(&p->p_flag, P_TIMEOUT);
507 SCHED_UNLOCK(s);
508 }
509
510 /*
511 * Remove a process from its wait queue
512 */
513 void
514 unsleep(struct proc *p)
515 {
516 SCHED_ASSERT_LOCKED();
517
518 if (p->p_wchan != NULL) {
519 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_runq);
520 p->p_wchan = NULL;
521 TRACEPOINT(sched, wakeup, p->p_tid + THREAD_PID_OFFSET,
522 p->p_p->ps_pid);
523 }
524 }
525
526 /*
527 * Make a number of processes sleeping on the specified identifier runnable.
528 */
529 void
530 wakeup_n(const volatile void *ident, int n)
531 {
532 struct slpque *qp;
533 struct proc *p;
534 struct proc *pnext;
535 int s;
536
537 SCHED_LOCK(s);
538 qp = &slpque[LOOKUP(ident)];
539 for (p = TAILQ_FIRST(qp); p != NULL && n != 0; p = pnext) {
540 pnext = TAILQ_NEXT(p, p_runq);
541 /*
542 * This happens if wakeup(9) is called after enqueuing
543 * itself on the sleep queue and both `ident' collide.
544 */
545 if (p == curproc)
546 continue;
547 #ifdef DIAGNOSTIC
548 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
549 panic("wakeup: p_stat is %d", (int)p->p_stat);
550 #endif
551 if (wakeup_proc(p, ident))
552 --n;
553 }
554 SCHED_UNLOCK(s);
555 }
556
557 /*
558 * Make all processes sleeping on the specified identifier runnable.
559 */
560 void
561 wakeup(const volatile void *chan)
562 {
563 wakeup_n(chan, -1);
564 }
565
566 int
567 sys_sched_yield(struct proc *p, void *v, register_t *retval)
568 {
569 struct proc *q;
570 uint8_t newprio;
571 int s;
572
573 SCHED_LOCK(s);
574 /*
575 * If one of the threads of a multi-threaded process called
576 * sched_yield(2), drop its priority to ensure its siblings
577 * can make some progress.
578 */
579 newprio = p->p_usrpri;
580 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link)
581 newprio = max(newprio, q->p_runpri);
582 setrunqueue(p->p_cpu, p, newprio);
583 p->p_ru.ru_nvcsw++;
584 mi_switch();
585 SCHED_UNLOCK(s);
586
587 return (0);
588 }
589
590 int
591 thrsleep_unlock(void *lock)
592 {
593 static _atomic_lock_t unlocked = _ATOMIC_LOCK_UNLOCKED;
594 _atomic_lock_t *atomiclock = lock;
595
596 if (!lock)
597 return 0;
598
599 return copyout(&unlocked, atomiclock, sizeof(unlocked));
600 }
601
602 struct tslpentry {
603 TAILQ_ENTRY(tslpentry) tslp_link;
604 long tslp_ident;
605 };
606
607 /* thrsleep queue shared between processes */
608 static struct tslpqueue thrsleep_queue = TAILQ_HEAD_INITIALIZER(thrsleep_queue);
609 static struct rwlock thrsleep_lock = RWLOCK_INITIALIZER("thrsleeplk");
610
611 int
612 thrsleep(struct proc *p, struct sys___thrsleep_args *v)
613 {
614 struct sys___thrsleep_args /* {
615 syscallarg(const volatile void *) ident;
616 syscallarg(clockid_t) clock_id;
617 syscallarg(const struct timespec *) tp;
618 syscallarg(void *) lock;
619 syscallarg(const int *) abort;
620 } */ *uap = v;
621 long ident = (long)SCARG(uap, ident);
622 struct tslpentry entry;
623 struct tslpqueue *queue;
624 struct rwlock *qlock;
625 struct timespec *tsp = (struct timespec *)SCARG(uap, tp);
626 void *lock = SCARG(uap, lock);
627 uint64_t nsecs = INFSLP;
628 int abort = 0, error;
629 clockid_t clock_id = SCARG(uap, clock_id);
630
631 if (ident == 0)
632 return (EINVAL);
633 if (tsp != NULL) {
634 struct timespec now;
635
636 if ((error = clock_gettime(p, clock_id, &now)))
637 return (error);
638 #ifdef KTRACE
639 if (KTRPOINT(p, KTR_STRUCT))
640 ktrabstimespec(p, tsp);
641 #endif
642
643 if (timespeccmp(tsp, &now, <=)) {
644 /* already passed: still do the unlock */
645 if ((error = thrsleep_unlock(lock)))
646 return (error);
647 return (EWOULDBLOCK);
648 }
649
650 timespecsub(tsp, &now, tsp);
651 nsecs = MIN(TIMESPEC_TO_NSEC(tsp), MAXTSLP);
652 }
653
654 if (ident == -1) {
655 queue = &thrsleep_queue;
656 qlock = &thrsleep_lock;
657 } else {
658 queue = &p->p_p->ps_tslpqueue;
659 qlock = &p->p_p->ps_lock;
660 }
661
662 /* Interlock with wakeup. */
663 entry.tslp_ident = ident;
664 rw_enter_write(qlock);
665 TAILQ_INSERT_TAIL(queue, &entry, tslp_link);
666 rw_exit_write(qlock);
667
668 error = thrsleep_unlock(lock);
669
670 if (error == 0 && SCARG(uap, abort) != NULL)
671 error = copyin(SCARG(uap, abort), &abort, sizeof(abort));
672
673 rw_enter_write(qlock);
674 if (error != 0)
675 goto out;
676 if (abort != 0) {
677 error = EINTR;
678 goto out;
679 }
680 if (entry.tslp_ident != 0) {
681 error = rwsleep_nsec(&entry, qlock, PWAIT|PCATCH, "thrsleep",
682 nsecs);
683 }
684
685 out:
686 if (entry.tslp_ident != 0)
687 TAILQ_REMOVE(queue, &entry, tslp_link);
688 rw_exit_write(qlock);
689
690 if (error == ERESTART)
691 error = ECANCELED;
692
693 return (error);
694
695 }
696
697 int
698 sys___thrsleep(struct proc *p, void *v, register_t *retval)
699 {
700 struct sys___thrsleep_args /* {
701 syscallarg(const volatile void *) ident;
702 syscallarg(clockid_t) clock_id;
703 syscallarg(struct timespec *) tp;
704 syscallarg(void *) lock;
705 syscallarg(const int *) abort;
706 } */ *uap = v;
707 struct timespec ts;
708 int error;
709
710 if (SCARG(uap, tp) != NULL) {
711 if ((error = copyin(SCARG(uap, tp), &ts, sizeof(ts)))) {
712 *retval = error;
713 return 0;
714 }
715 if (!timespecisvalid(&ts)) {
716 *retval = EINVAL;
717 return 0;
718 }
719 SCARG(uap, tp) = &ts;
720 }
721
722 *retval = thrsleep(p, uap);
723 return 0;
724 }
725
726 int
727 sys___thrwakeup(struct proc *p, void *v, register_t *retval)
728 {
729 struct sys___thrwakeup_args /* {
730 syscallarg(const volatile void *) ident;
731 syscallarg(int) n;
732 } */ *uap = v;
733 struct tslpentry *entry, *tmp;
734 struct tslpqueue *queue;
735 struct rwlock *qlock;
736 long ident = (long)SCARG(uap, ident);
737 int n = SCARG(uap, n);
738 int found = 0;
739
740 if (ident == 0)
741 *retval = EINVAL;
742 else {
743 if (ident == -1) {
744 queue = &thrsleep_queue;
745 qlock = &thrsleep_lock;
746 /*
747 * Wake up all waiters with ident -1. This is needed
748 * because ident -1 can be shared by multiple userspace
749 * lock state machines concurrently. The implementation
750 * has no way to direct the wakeup to a particular
751 * state machine.
752 */
753 n = 0;
754 } else {
755 queue = &p->p_p->ps_tslpqueue;
756 qlock = &p->p_p->ps_lock;
757 }
758
759 rw_enter_write(qlock);
760 TAILQ_FOREACH_SAFE(entry, queue, tslp_link, tmp) {
761 if (entry->tslp_ident == ident) {
762 TAILQ_REMOVE(queue, entry, tslp_link);
763 entry->tslp_ident = 0;
764 wakeup_one(entry);
765 if (++found == n)
766 break;
767 }
768 }
769 rw_exit_write(qlock);
770
771 if (ident == -1)
772 *retval = 0;
773 else
774 *retval = found ? 0 : ESRCH;
775 }
776
777 return (0);
778 }
779
780 void
781 refcnt_init(struct refcnt *r)
782 {
783 refcnt_init_trace(r, 0);
784 }
785
786 void
787 refcnt_init_trace(struct refcnt *r, int idx)
788 {
789 r->r_traceidx = idx;
790 atomic_store_int(&r->r_refs, 1);
791 TRACEINDEX(refcnt, r->r_traceidx, r, 0, +1);
792 }
793
794 void
795 refcnt_take(struct refcnt *r)
796 {
797 u_int refs;
798
799 refs = atomic_inc_int_nv(&r->r_refs);
800 KASSERT(refs != 0);
801 TRACEINDEX(refcnt, r->r_traceidx, r, refs - 1, +1);
802 (void)refs;
803 }
804
805 int
806 refcnt_rele(struct refcnt *r)
807 {
808 u_int refs;
809
810 membar_exit_before_atomic();
811 refs = atomic_dec_int_nv(&r->r_refs);
812 KASSERT(refs != ~0);
813 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
814 if (refs == 0) {
815 membar_enter_after_atomic();
816 return (1);
817 }
818 return (0);
819 }
820
821 void
822 refcnt_rele_wake(struct refcnt *r)
823 {
824 if (refcnt_rele(r))
825 wakeup_one(r);
826 }
827
828 void
829 refcnt_finalize(struct refcnt *r, const char *wmesg)
830 {
831 struct sleep_state sls;
832 u_int refs;
833
834 membar_exit_before_atomic();
835 refs = atomic_dec_int_nv(&r->r_refs);
836 KASSERT(refs != ~0);
837 TRACEINDEX(refcnt, r->r_traceidx, r, refs + 1, -1);
838 while (refs) {
839 sleep_setup(&sls, r, PWAIT, wmesg, 0);
840 refs = atomic_load_int(&r->r_refs);
841 sleep_finish(&sls, refs);
842 }
843 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
844 /* Order subsequent loads and stores after refs == 0 load. */
845 membar_sync();
846 }
847
848 int
849 refcnt_shared(struct refcnt *r)
850 {
851 u_int refs;
852
853 refs = atomic_load_int(&r->r_refs);
854 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
855 return (refs > 1);
856 }
857
858 unsigned int
859 refcnt_read(struct refcnt *r)
860 {
861 u_int refs;
862
863 refs = atomic_load_int(&r->r_refs);
864 TRACEINDEX(refcnt, r->r_traceidx, r, refs, 0);
865 return (refs);
866 }
867
868 void
869 cond_init(struct cond *c)
870 {
871 atomic_store_int(&c->c_wait, 1);
872 }
873
874 void
875 cond_signal(struct cond *c)
876 {
877 atomic_store_int(&c->c_wait, 0);
878
879 wakeup_one(c);
880 }
881
882 void
883 cond_wait(struct cond *c, const char *wmesg)
884 {
885 struct sleep_state sls;
886 unsigned int wait;
887
888 wait = atomic_load_int(&c->c_wait);
889 while (wait) {
890 sleep_setup(&sls, c, PWAIT, wmesg, 0);
891 wait = atomic_load_int(&c->c_wait);
892 sleep_finish(&sls, wait);
893 }
894 }
Cache object: b6ade72a27014a27c91fc781a0a7771f
|