FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_synch.c
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/9.2/sys/kern/kern_synch.c 253203 2013-07-11 12:42:46Z avg $");
39
40 #include "opt_kdtrace.h"
41 #include "opt_ktrace.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/condvar.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/sdt.h>
56 #include <sys/signalvar.h>
57 #include <sys/sleepqueue.h>
58 #include <sys/smp.h>
59 #include <sys/sx.h>
60 #include <sys/sysctl.h>
61 #include <sys/sysproto.h>
62 #include <sys/vmmeter.h>
63 #ifdef KTRACE
64 #include <sys/uio.h>
65 #include <sys/ktrace.h>
66 #endif
67
68 #include <machine/cpu.h>
69
70 #ifdef XEN
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/pmap.h>
74 #endif
75
76 #define KTDSTATE(td) \
77 (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
78 ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
79 ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
80 ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
81 ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
82
83 static void synch_setup(void *dummy);
84 SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
85 NULL);
86
87 int hogticks;
88 static int pause_wchan;
89
90 static struct callout loadav_callout;
91
92 struct loadavg averunnable =
93 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
94 /*
95 * Constants for averages over 1, 5, and 15 minutes
96 * when sampling at 5 second intervals.
97 */
98 static fixpt_t cexp[3] = {
99 0.9200444146293232 * FSCALE, /* exp(-1/12) */
100 0.9834714538216174 * FSCALE, /* exp(-1/60) */
101 0.9944598480048967 * FSCALE, /* exp(-1/180) */
102 };
103
104 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
105 static int fscale __unused = FSCALE;
106 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
107
108 static void loadav(void *arg);
109
110 SDT_PROVIDER_DECLARE(sched);
111 SDT_PROBE_DEFINE(sched, , , preempt, preempt);
112
113 /*
114 * These probes reference Solaris features that are not implemented in FreeBSD.
115 * Create the probes anyway for compatibility with existing D scripts; they'll
116 * just never fire.
117 */
118 SDT_PROBE_DEFINE(sched, , , cpucaps_sleep, cpucaps-sleep);
119 SDT_PROBE_DEFINE(sched, , , cpucaps_wakeup, cpucaps-wakeup);
120 SDT_PROBE_DEFINE(sched, , , schedctl_nopreempt, schedctl-nopreempt);
121 SDT_PROBE_DEFINE(sched, , , schedctl_preempt, schedctl-preempt);
122 SDT_PROBE_DEFINE(sched, , , schedctl_yield, schedctl-yield);
123
124 void
125 sleepinit(void)
126 {
127
128 hogticks = (hz / 10) * 2; /* Default only. */
129 init_sleepqueues();
130 }
131
132 /*
133 * General sleep call. Suspends the current thread until a wakeup is
134 * performed on the specified identifier. The thread will then be made
135 * runnable with the specified priority. Sleeps at most timo/hz seconds
136 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
137 * before and after sleeping, else signals are not checked. Returns 0 if
138 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
139 * signal needs to be delivered, ERESTART is returned if the current system
140 * call should be restarted if possible, and EINTR is returned if the system
141 * call should be interrupted by the signal (return EINTR).
142 *
143 * The lock argument is unlocked before the caller is suspended, and
144 * re-locked before _sleep() returns. If priority includes the PDROP
145 * flag the lock is not re-locked before returning.
146 */
147 int
148 _sleep(void *ident, struct lock_object *lock, int priority,
149 const char *wmesg, int timo)
150 {
151 struct thread *td;
152 struct proc *p;
153 struct lock_class *class;
154 int catch, flags, lock_state, pri, rval;
155 WITNESS_SAVE_DECL(lock_witness);
156
157 td = curthread;
158 p = td->td_proc;
159 #ifdef KTRACE
160 if (KTRPOINT(td, KTR_CSW))
161 ktrcsw(1, 0, wmesg);
162 #endif
163 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
164 "Sleeping on \"%s\"", wmesg);
165 KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
166 ("sleeping without a lock"));
167 KASSERT(p != NULL, ("msleep1"));
168 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
169 if (priority & PDROP)
170 KASSERT(lock != NULL && lock != &Giant.lock_object,
171 ("PDROP requires a non-Giant lock"));
172 if (lock != NULL)
173 class = LOCK_CLASS(lock);
174 else
175 class = NULL;
176
177 if (cold || SCHEDULER_STOPPED()) {
178 /*
179 * During autoconfiguration, just return;
180 * don't run any other threads or panic below,
181 * in case this is the idle thread and already asleep.
182 * XXX: this used to do "s = splhigh(); splx(safepri);
183 * splx(s);" to give interrupts a chance, but there is
184 * no way to give interrupts a chance now.
185 */
186 if (lock != NULL && priority & PDROP)
187 class->lc_unlock(lock);
188 return (0);
189 }
190 catch = priority & PCATCH;
191 pri = priority & PRIMASK;
192
193 /*
194 * If we are already on a sleep queue, then remove us from that
195 * sleep queue first. We have to do this to handle recursive
196 * sleeps.
197 */
198 if (TD_ON_SLEEPQ(td))
199 sleepq_remove(td, td->td_wchan);
200
201 if (ident == &pause_wchan)
202 flags = SLEEPQ_PAUSE;
203 else
204 flags = SLEEPQ_SLEEP;
205 if (catch)
206 flags |= SLEEPQ_INTERRUPTIBLE;
207 if (priority & PBDRY)
208 flags |= SLEEPQ_STOP_ON_BDRY;
209
210 sleepq_lock(ident);
211 CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
212 td->td_tid, p->p_pid, td->td_name, wmesg, ident);
213
214 if (lock == &Giant.lock_object)
215 mtx_assert(&Giant, MA_OWNED);
216 DROP_GIANT();
217 if (lock != NULL && lock != &Giant.lock_object &&
218 !(class->lc_flags & LC_SLEEPABLE)) {
219 WITNESS_SAVE(lock, lock_witness);
220 lock_state = class->lc_unlock(lock);
221 } else
222 /* GCC needs to follow the Yellow Brick Road */
223 lock_state = -1;
224
225 /*
226 * We put ourselves on the sleep queue and start our timeout
227 * before calling thread_suspend_check, as we could stop there,
228 * and a wakeup or a SIGCONT (or both) could occur while we were
229 * stopped without resuming us. Thus, we must be ready for sleep
230 * when cursig() is called. If the wakeup happens while we're
231 * stopped, then td will no longer be on a sleep queue upon
232 * return from cursig().
233 */
234 sleepq_add(ident, lock, wmesg, flags, 0);
235 if (timo)
236 sleepq_set_timeout(ident, timo);
237 if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
238 sleepq_release(ident);
239 WITNESS_SAVE(lock, lock_witness);
240 lock_state = class->lc_unlock(lock);
241 sleepq_lock(ident);
242 }
243 if (timo && catch)
244 rval = sleepq_timedwait_sig(ident, pri);
245 else if (timo)
246 rval = sleepq_timedwait(ident, pri);
247 else if (catch)
248 rval = sleepq_wait_sig(ident, pri);
249 else {
250 sleepq_wait(ident, pri);
251 rval = 0;
252 }
253 #ifdef KTRACE
254 if (KTRPOINT(td, KTR_CSW))
255 ktrcsw(0, 0, wmesg);
256 #endif
257 PICKUP_GIANT();
258 if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
259 class->lc_lock(lock, lock_state);
260 WITNESS_RESTORE(lock, lock_witness);
261 }
262 return (rval);
263 }
264
265 int
266 msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
267 {
268 struct thread *td;
269 struct proc *p;
270 int rval;
271 WITNESS_SAVE_DECL(mtx);
272
273 td = curthread;
274 p = td->td_proc;
275 KASSERT(mtx != NULL, ("sleeping without a mutex"));
276 KASSERT(p != NULL, ("msleep1"));
277 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
278
279 if (cold || SCHEDULER_STOPPED()) {
280 /*
281 * During autoconfiguration, just return;
282 * don't run any other threads or panic below,
283 * in case this is the idle thread and already asleep.
284 * XXX: this used to do "s = splhigh(); splx(safepri);
285 * splx(s);" to give interrupts a chance, but there is
286 * no way to give interrupts a chance now.
287 */
288 return (0);
289 }
290
291 sleepq_lock(ident);
292 CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
293 td->td_tid, p->p_pid, td->td_name, wmesg, ident);
294
295 DROP_GIANT();
296 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
297 WITNESS_SAVE(&mtx->lock_object, mtx);
298 mtx_unlock_spin(mtx);
299
300 /*
301 * We put ourselves on the sleep queue and start our timeout.
302 */
303 sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
304 if (timo)
305 sleepq_set_timeout(ident, timo);
306
307 /*
308 * Can't call ktrace with any spin locks held so it can lock the
309 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
310 * any spin lock. Thus, we have to drop the sleepq spin lock while
311 * we handle those requests. This is safe since we have placed our
312 * thread on the sleep queue already.
313 */
314 #ifdef KTRACE
315 if (KTRPOINT(td, KTR_CSW)) {
316 sleepq_release(ident);
317 ktrcsw(1, 0, wmesg);
318 sleepq_lock(ident);
319 }
320 #endif
321 #ifdef WITNESS
322 sleepq_release(ident);
323 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
324 wmesg);
325 sleepq_lock(ident);
326 #endif
327 if (timo)
328 rval = sleepq_timedwait(ident, 0);
329 else {
330 sleepq_wait(ident, 0);
331 rval = 0;
332 }
333 #ifdef KTRACE
334 if (KTRPOINT(td, KTR_CSW))
335 ktrcsw(0, 0, wmesg);
336 #endif
337 PICKUP_GIANT();
338 mtx_lock_spin(mtx);
339 WITNESS_RESTORE(&mtx->lock_object, mtx);
340 return (rval);
341 }
342
343 /*
344 * pause() delays the calling thread by the given number of system ticks.
345 * During cold bootup, pause() uses the DELAY() function instead of
346 * the tsleep() function to do the waiting. The "timo" argument must be
347 * greater than or equal to zero. A "timo" value of zero is equivalent
348 * to a "timo" value of one.
349 */
350 int
351 pause(const char *wmesg, int timo)
352 {
353 KASSERT(timo >= 0, ("pause: timo must be >= 0"));
354
355 /* silently convert invalid timeouts */
356 if (timo < 1)
357 timo = 1;
358
359 if (cold) {
360 /*
361 * We delay one HZ at a time to avoid overflowing the
362 * system specific DELAY() function(s):
363 */
364 while (timo >= hz) {
365 DELAY(1000000);
366 timo -= hz;
367 }
368 if (timo > 0)
369 DELAY(timo * tick);
370 return (0);
371 }
372 return (tsleep(&pause_wchan, 0, wmesg, timo));
373 }
374
375 /*
376 * Make all threads sleeping on the specified identifier runnable.
377 */
378 void
379 wakeup(void *ident)
380 {
381 int wakeup_swapper;
382
383 sleepq_lock(ident);
384 wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
385 sleepq_release(ident);
386 if (wakeup_swapper) {
387 KASSERT(ident != &proc0,
388 ("wakeup and wakeup_swapper and proc0"));
389 kick_proc0();
390 }
391 }
392
393 /*
394 * Make a thread sleeping on the specified identifier runnable.
395 * May wake more than one thread if a target thread is currently
396 * swapped out.
397 */
398 void
399 wakeup_one(void *ident)
400 {
401 int wakeup_swapper;
402
403 sleepq_lock(ident);
404 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
405 sleepq_release(ident);
406 if (wakeup_swapper)
407 kick_proc0();
408 }
409
410 static void
411 kdb_switch(void)
412 {
413 thread_unlock(curthread);
414 kdb_backtrace();
415 kdb_reenter();
416 panic("%s: did not reenter debugger", __func__);
417 }
418
419 /*
420 * The machine independent parts of context switching.
421 */
422 void
423 mi_switch(int flags, struct thread *newtd)
424 {
425 uint64_t runtime, new_switchtime;
426 struct thread *td;
427 struct proc *p;
428
429 td = curthread; /* XXX */
430 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
431 p = td->td_proc; /* XXX */
432 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
433 #ifdef INVARIANTS
434 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
435 mtx_assert(&Giant, MA_NOTOWNED);
436 #endif
437 KASSERT(td->td_critnest == 1 || panicstr,
438 ("mi_switch: switch in a critical section"));
439 KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
440 ("mi_switch: switch must be voluntary or involuntary"));
441 KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
442
443 /*
444 * Don't perform context switches from the debugger.
445 */
446 if (kdb_active)
447 kdb_switch();
448 if (SCHEDULER_STOPPED())
449 return;
450 if (flags & SW_VOL) {
451 td->td_ru.ru_nvcsw++;
452 td->td_swvoltick = ticks;
453 } else
454 td->td_ru.ru_nivcsw++;
455 #ifdef SCHED_STATS
456 SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
457 #endif
458 /*
459 * Compute the amount of time during which the current
460 * thread was running, and add that to its total so far.
461 */
462 new_switchtime = cpu_ticks();
463 runtime = new_switchtime - PCPU_GET(switchtime);
464 td->td_runtime += runtime;
465 td->td_incruntime += runtime;
466 PCPU_SET(switchtime, new_switchtime);
467 td->td_generation++; /* bump preempt-detect counter */
468 PCPU_INC(cnt.v_swtch);
469 PCPU_SET(switchticks, ticks);
470 CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
471 td->td_tid, td->td_sched, p->p_pid, td->td_name);
472 #if (KTR_COMPILE & KTR_SCHED) != 0
473 if (TD_IS_IDLETHREAD(td))
474 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
475 "prio:%d", td->td_priority);
476 else
477 KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
478 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
479 "lockname:\"%s\"", td->td_lockname);
480 #endif
481 SDT_PROBE0(sched, , , preempt);
482 #ifdef XEN
483 PT_UPDATES_FLUSH();
484 #endif
485 sched_switch(td, newtd, flags);
486 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
487 "prio:%d", td->td_priority);
488
489 CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
490 td->td_tid, td->td_sched, p->p_pid, td->td_name);
491
492 /*
493 * If the last thread was exiting, finish cleaning it up.
494 */
495 if ((td = PCPU_GET(deadthread))) {
496 PCPU_SET(deadthread, NULL);
497 thread_stash(td);
498 }
499 }
500
501 /*
502 * Change thread state to be runnable, placing it on the run queue if
503 * it is in memory. If it is swapped out, return true so our caller
504 * will know to awaken the swapper.
505 */
506 int
507 setrunnable(struct thread *td)
508 {
509
510 THREAD_LOCK_ASSERT(td, MA_OWNED);
511 KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
512 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
513 switch (td->td_state) {
514 case TDS_RUNNING:
515 case TDS_RUNQ:
516 return (0);
517 case TDS_INHIBITED:
518 /*
519 * If we are only inhibited because we are swapped out
520 * then arange to swap in this process. Otherwise just return.
521 */
522 if (td->td_inhibitors != TDI_SWAPPED)
523 return (0);
524 /* FALLTHROUGH */
525 case TDS_CAN_RUN:
526 break;
527 default:
528 printf("state is 0x%x", td->td_state);
529 panic("setrunnable(2)");
530 }
531 if ((td->td_flags & TDF_INMEM) == 0) {
532 if ((td->td_flags & TDF_SWAPINREQ) == 0) {
533 td->td_flags |= TDF_SWAPINREQ;
534 return (1);
535 }
536 } else
537 sched_wakeup(td);
538 return (0);
539 }
540
541 /*
542 * Compute a tenex style load average of a quantity on
543 * 1, 5 and 15 minute intervals.
544 */
545 static void
546 loadav(void *arg)
547 {
548 int i, nrun;
549 struct loadavg *avg;
550
551 nrun = sched_load();
552 avg = &averunnable;
553
554 for (i = 0; i < 3; i++)
555 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
556 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
557
558 /*
559 * Schedule the next update to occur after 5 seconds, but add a
560 * random variation to avoid synchronisation with processes that
561 * run at regular intervals.
562 */
563 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
564 loadav, NULL);
565 }
566
567 /* ARGSUSED */
568 static void
569 synch_setup(void *dummy)
570 {
571 callout_init(&loadav_callout, CALLOUT_MPSAFE);
572
573 /* Kick off timeout driven events by calling first time. */
574 loadav(NULL);
575 }
576
577 int
578 should_yield(void)
579 {
580
581 return ((unsigned int)(ticks - curthread->td_swvoltick) >= hogticks);
582 }
583
584 void
585 maybe_yield(void)
586 {
587
588 if (should_yield())
589 kern_yield(PRI_USER);
590 }
591
592 void
593 kern_yield(int prio)
594 {
595 struct thread *td;
596
597 td = curthread;
598 DROP_GIANT();
599 thread_lock(td);
600 if (prio == PRI_USER)
601 prio = td->td_user_pri;
602 if (prio >= 0)
603 sched_prio(td, prio);
604 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
605 thread_unlock(td);
606 PICKUP_GIANT();
607 }
608
609 /*
610 * General purpose yield system call.
611 */
612 int
613 sys_yield(struct thread *td, struct yield_args *uap)
614 {
615
616 thread_lock(td);
617 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
618 sched_prio(td, PRI_MAX_TIMESHARE);
619 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
620 thread_unlock(td);
621 td->td_retval[0] = 0;
622 return (0);
623 }
Cache object: 7b1ab5eca4393a31c013f25e7a0d585b
|