FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_synch.c
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
35 */
36
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD: releng/8.3/sys/kern/kern_synch.c 227572 2011-11-16 17:48:05Z jhb $");
39
40 #include "opt_ktrace.h"
41 #include "opt_sched.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/condvar.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/resourcevar.h>
53 #include <sys/sched.h>
54 #include <sys/signalvar.h>
55 #include <sys/sleepqueue.h>
56 #include <sys/smp.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/vmmeter.h>
61 #ifdef KTRACE
62 #include <sys/uio.h>
63 #include <sys/ktrace.h>
64 #endif
65
66 #include <machine/cpu.h>
67
68 #ifdef XEN
69 #include <vm/vm.h>
70 #include <vm/vm_param.h>
71 #include <vm/pmap.h>
72 #endif
73
74 #define KTDSTATE(td) \
75 (((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
76 ((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
77 ((td)->td_inhibitors & TDI_SWAPPED) != 0 ? "swapped" : \
78 ((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
79 ((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
80
81 static void synch_setup(void *dummy);
82 SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
83 NULL);
84
85 int hogticks;
86 static int pause_wchan;
87
88 static struct callout loadav_callout;
89
90 struct loadavg averunnable =
91 { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */
92 /*
93 * Constants for averages over 1, 5, and 15 minutes
94 * when sampling at 5 second intervals.
95 */
96 static fixpt_t cexp[3] = {
97 0.9200444146293232 * FSCALE, /* exp(-1/12) */
98 0.9834714538216174 * FSCALE, /* exp(-1/60) */
99 0.9944598480048967 * FSCALE, /* exp(-1/180) */
100 };
101
102 /* kernel uses `FSCALE', userland (SHOULD) use kern.fscale */
103 static int fscale __unused = FSCALE;
104 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
105
106 static void loadav(void *arg);
107
108 void
109 sleepinit(void)
110 {
111
112 hogticks = (hz / 10) * 2; /* Default only. */
113 init_sleepqueues();
114 }
115
116 /*
117 * General sleep call. Suspends the current thread until a wakeup is
118 * performed on the specified identifier. The thread will then be made
119 * runnable with the specified priority. Sleeps at most timo/hz seconds
120 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
121 * before and after sleeping, else signals are not checked. Returns 0 if
122 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
123 * signal needs to be delivered, ERESTART is returned if the current system
124 * call should be restarted if possible, and EINTR is returned if the system
125 * call should be interrupted by the signal (return EINTR).
126 *
127 * The lock argument is unlocked before the caller is suspended, and
128 * re-locked before _sleep() returns. If priority includes the PDROP
129 * flag the lock is not re-locked before returning.
130 */
131 int
132 _sleep(void *ident, struct lock_object *lock, int priority,
133 const char *wmesg, int timo)
134 {
135 struct thread *td;
136 struct proc *p;
137 struct lock_class *class;
138 int catch, flags, lock_state, pri, rval;
139 WITNESS_SAVE_DECL(lock_witness);
140
141 td = curthread;
142 p = td->td_proc;
143 #ifdef KTRACE
144 if (KTRPOINT(td, KTR_CSW))
145 ktrcsw(1, 0);
146 #endif
147 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
148 "Sleeping on \"%s\"", wmesg);
149 KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
150 ("sleeping without a lock"));
151 KASSERT(p != NULL, ("msleep1"));
152 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
153 if (priority & PDROP)
154 KASSERT(lock != NULL && lock != &Giant.lock_object,
155 ("PDROP requires a non-Giant lock"));
156 if (lock != NULL)
157 class = LOCK_CLASS(lock);
158 else
159 class = NULL;
160
161 if (cold) {
162 /*
163 * During autoconfiguration, just return;
164 * don't run any other threads or panic below,
165 * in case this is the idle thread and already asleep.
166 * XXX: this used to do "s = splhigh(); splx(safepri);
167 * splx(s);" to give interrupts a chance, but there is
168 * no way to give interrupts a chance now.
169 */
170 if (lock != NULL && priority & PDROP)
171 class->lc_unlock(lock);
172 return (0);
173 }
174 catch = priority & PCATCH;
175 pri = priority & PRIMASK;
176
177 /*
178 * If we are already on a sleep queue, then remove us from that
179 * sleep queue first. We have to do this to handle recursive
180 * sleeps.
181 */
182 if (TD_ON_SLEEPQ(td))
183 sleepq_remove(td, td->td_wchan);
184
185 if (ident == &pause_wchan)
186 flags = SLEEPQ_PAUSE;
187 else
188 flags = SLEEPQ_SLEEP;
189 if (catch)
190 flags |= SLEEPQ_INTERRUPTIBLE;
191 if (priority & PBDRY)
192 flags |= SLEEPQ_STOP_ON_BDRY;
193
194 sleepq_lock(ident);
195 CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
196 td->td_tid, p->p_pid, td->td_name, wmesg, ident);
197
198 if (lock == &Giant.lock_object)
199 mtx_assert(&Giant, MA_OWNED);
200 DROP_GIANT();
201 if (lock != NULL && lock != &Giant.lock_object &&
202 !(class->lc_flags & LC_SLEEPABLE)) {
203 WITNESS_SAVE(lock, lock_witness);
204 lock_state = class->lc_unlock(lock);
205 } else
206 /* GCC needs to follow the Yellow Brick Road */
207 lock_state = -1;
208
209 /*
210 * We put ourselves on the sleep queue and start our timeout
211 * before calling thread_suspend_check, as we could stop there,
212 * and a wakeup or a SIGCONT (or both) could occur while we were
213 * stopped without resuming us. Thus, we must be ready for sleep
214 * when cursig() is called. If the wakeup happens while we're
215 * stopped, then td will no longer be on a sleep queue upon
216 * return from cursig().
217 */
218 sleepq_add(ident, lock, wmesg, flags, 0);
219 if (timo)
220 sleepq_set_timeout(ident, timo);
221 if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
222 sleepq_release(ident);
223 WITNESS_SAVE(lock, lock_witness);
224 lock_state = class->lc_unlock(lock);
225 sleepq_lock(ident);
226 }
227 if (timo && catch)
228 rval = sleepq_timedwait_sig(ident, pri);
229 else if (timo)
230 rval = sleepq_timedwait(ident, pri);
231 else if (catch)
232 rval = sleepq_wait_sig(ident, pri);
233 else {
234 sleepq_wait(ident, pri);
235 rval = 0;
236 }
237 #ifdef KTRACE
238 if (KTRPOINT(td, KTR_CSW))
239 ktrcsw(0, 0);
240 #endif
241 PICKUP_GIANT();
242 if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
243 class->lc_lock(lock, lock_state);
244 WITNESS_RESTORE(lock, lock_witness);
245 }
246 return (rval);
247 }
248
249 int
250 msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
251 {
252 struct thread *td;
253 struct proc *p;
254 int rval;
255 WITNESS_SAVE_DECL(mtx);
256
257 td = curthread;
258 p = td->td_proc;
259 KASSERT(mtx != NULL, ("sleeping without a mutex"));
260 KASSERT(p != NULL, ("msleep1"));
261 KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
262
263 if (cold) {
264 /*
265 * During autoconfiguration, just return;
266 * don't run any other threads or panic below,
267 * in case this is the idle thread and already asleep.
268 * XXX: this used to do "s = splhigh(); splx(safepri);
269 * splx(s);" to give interrupts a chance, but there is
270 * no way to give interrupts a chance now.
271 */
272 return (0);
273 }
274
275 sleepq_lock(ident);
276 CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
277 td->td_tid, p->p_pid, td->td_name, wmesg, ident);
278
279 DROP_GIANT();
280 mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED);
281 WITNESS_SAVE(&mtx->lock_object, mtx);
282 mtx_unlock_spin(mtx);
283
284 /*
285 * We put ourselves on the sleep queue and start our timeout.
286 */
287 sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
288 if (timo)
289 sleepq_set_timeout(ident, timo);
290
291 /*
292 * Can't call ktrace with any spin locks held so it can lock the
293 * ktrace_mtx lock, and WITNESS_WARN considers it an error to hold
294 * any spin lock. Thus, we have to drop the sleepq spin lock while
295 * we handle those requests. This is safe since we have placed our
296 * thread on the sleep queue already.
297 */
298 #ifdef KTRACE
299 if (KTRPOINT(td, KTR_CSW)) {
300 sleepq_release(ident);
301 ktrcsw(1, 0);
302 sleepq_lock(ident);
303 }
304 #endif
305 #ifdef WITNESS
306 sleepq_release(ident);
307 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "Sleeping on \"%s\"",
308 wmesg);
309 sleepq_lock(ident);
310 #endif
311 if (timo)
312 rval = sleepq_timedwait(ident, 0);
313 else {
314 sleepq_wait(ident, 0);
315 rval = 0;
316 }
317 #ifdef KTRACE
318 if (KTRPOINT(td, KTR_CSW))
319 ktrcsw(0, 0);
320 #endif
321 PICKUP_GIANT();
322 mtx_lock_spin(mtx);
323 WITNESS_RESTORE(&mtx->lock_object, mtx);
324 return (rval);
325 }
326
327 /*
328 * pause() is like tsleep() except that the intention is to not be
329 * explicitly woken up by another thread. Instead, the current thread
330 * simply wishes to sleep until the timeout expires. It is
331 * implemented using a dummy wait channel.
332 */
333 int
334 pause(const char *wmesg, int timo)
335 {
336
337 KASSERT(timo != 0, ("pause: timeout required"));
338 return (tsleep(&pause_wchan, 0, wmesg, timo));
339 }
340
341 /*
342 * Make all threads sleeping on the specified identifier runnable.
343 */
344 void
345 wakeup(void *ident)
346 {
347 int wakeup_swapper;
348
349 sleepq_lock(ident);
350 wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
351 sleepq_release(ident);
352 if (wakeup_swapper) {
353 KASSERT(ident != &proc0,
354 ("wakeup and wakeup_swapper and proc0"));
355 kick_proc0();
356 }
357 }
358
359 /*
360 * Make a thread sleeping on the specified identifier runnable.
361 * May wake more than one thread if a target thread is currently
362 * swapped out.
363 */
364 void
365 wakeup_one(void *ident)
366 {
367 int wakeup_swapper;
368
369 sleepq_lock(ident);
370 wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, 0, 0);
371 sleepq_release(ident);
372 if (wakeup_swapper)
373 kick_proc0();
374 }
375
376 static void
377 kdb_switch(void)
378 {
379 thread_unlock(curthread);
380 kdb_backtrace();
381 kdb_reenter();
382 panic("%s: did not reenter debugger", __func__);
383 }
384
385 /*
386 * The machine independent parts of context switching.
387 */
388 void
389 mi_switch(int flags, struct thread *newtd)
390 {
391 uint64_t runtime, new_switchtime;
392 struct thread *td;
393 struct proc *p;
394
395 td = curthread; /* XXX */
396 THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
397 p = td->td_proc; /* XXX */
398 KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
399 #ifdef INVARIANTS
400 if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
401 mtx_assert(&Giant, MA_NOTOWNED);
402 #endif
403 KASSERT(td->td_critnest == 1 || panicstr,
404 ("mi_switch: switch in a critical section"));
405 KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
406 ("mi_switch: switch must be voluntary or involuntary"));
407 KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
408
409 /*
410 * Don't perform context switches from the debugger.
411 */
412 if (kdb_active)
413 kdb_switch();
414 if (flags & SW_VOL)
415 td->td_ru.ru_nvcsw++;
416 else
417 td->td_ru.ru_nivcsw++;
418 #ifdef SCHED_STATS
419 SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
420 #endif
421 /*
422 * Compute the amount of time during which the current
423 * thread was running, and add that to its total so far.
424 */
425 new_switchtime = cpu_ticks();
426 runtime = new_switchtime - PCPU_GET(switchtime);
427 td->td_runtime += runtime;
428 td->td_incruntime += runtime;
429 PCPU_SET(switchtime, new_switchtime);
430 td->td_generation++; /* bump preempt-detect counter */
431 PCPU_INC(cnt.v_swtch);
432 PCPU_SET(switchticks, ticks);
433 CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
434 td->td_tid, td->td_sched, p->p_pid, td->td_name);
435 #if (KTR_COMPILE & KTR_SCHED) != 0
436 if (TD_IS_IDLETHREAD(td))
437 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
438 "prio:%d", td->td_priority);
439 else
440 KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
441 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
442 "lockname:\"%s\"", td->td_lockname);
443 #endif
444 #ifdef XEN
445 PT_UPDATES_FLUSH();
446 #endif
447 sched_switch(td, newtd, flags);
448 KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
449 "prio:%d", td->td_priority);
450
451 CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
452 td->td_tid, td->td_sched, p->p_pid, td->td_name);
453
454 /*
455 * If the last thread was exiting, finish cleaning it up.
456 */
457 if ((td = PCPU_GET(deadthread))) {
458 PCPU_SET(deadthread, NULL);
459 thread_stash(td);
460 }
461 }
462
463 /*
464 * Change thread state to be runnable, placing it on the run queue if
465 * it is in memory. If it is swapped out, return true so our caller
466 * will know to awaken the swapper.
467 */
468 int
469 setrunnable(struct thread *td)
470 {
471
472 THREAD_LOCK_ASSERT(td, MA_OWNED);
473 KASSERT(td->td_proc->p_state != PRS_ZOMBIE,
474 ("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
475 switch (td->td_state) {
476 case TDS_RUNNING:
477 case TDS_RUNQ:
478 return (0);
479 case TDS_INHIBITED:
480 /*
481 * If we are only inhibited because we are swapped out
482 * then arange to swap in this process. Otherwise just return.
483 */
484 if (td->td_inhibitors != TDI_SWAPPED)
485 return (0);
486 /* FALLTHROUGH */
487 case TDS_CAN_RUN:
488 break;
489 default:
490 printf("state is 0x%x", td->td_state);
491 panic("setrunnable(2)");
492 }
493 if ((td->td_flags & TDF_INMEM) == 0) {
494 if ((td->td_flags & TDF_SWAPINREQ) == 0) {
495 td->td_flags |= TDF_SWAPINREQ;
496 return (1);
497 }
498 } else
499 sched_wakeup(td);
500 return (0);
501 }
502
503 /*
504 * Compute a tenex style load average of a quantity on
505 * 1, 5 and 15 minute intervals.
506 */
507 static void
508 loadav(void *arg)
509 {
510 int i, nrun;
511 struct loadavg *avg;
512
513 nrun = sched_load();
514 avg = &averunnable;
515
516 for (i = 0; i < 3; i++)
517 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
518 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
519
520 /*
521 * Schedule the next update to occur after 5 seconds, but add a
522 * random variation to avoid synchronisation with processes that
523 * run at regular intervals.
524 */
525 callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
526 loadav, NULL);
527 }
528
529 /* ARGSUSED */
530 static void
531 synch_setup(void *dummy)
532 {
533 callout_init(&loadav_callout, CALLOUT_MPSAFE);
534
535 /* Kick off timeout driven events by calling first time. */
536 loadav(NULL);
537 }
538
539 int
540 should_yield(void)
541 {
542
543 return (ticks - PCPU_GET(switchticks) >= hogticks);
544 }
545
546 void
547 maybe_yield(void)
548 {
549
550 if (should_yield())
551 kern_yield(PRI_USER);
552 }
553
554 void
555 kern_yield(int prio)
556 {
557 struct thread *td;
558
559 td = curthread;
560 DROP_GIANT();
561 thread_lock(td);
562 if (prio == PRI_USER)
563 prio = td->td_user_pri;
564 if (prio >= 0)
565 sched_prio(td, prio);
566 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
567 thread_unlock(td);
568 PICKUP_GIANT();
569 }
570
571 /*
572 * General purpose yield system call.
573 */
574 int
575 yield(struct thread *td, struct yield_args *uap)
576 {
577
578 thread_lock(td);
579 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
580 sched_prio(td, PRI_MAX_TIMESHARE);
581 mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
582 thread_unlock(td);
583 td->td_retval[0] = 0;
584 return (0);
585 }
Cache object: 653717af153d94c7c6dc3e98c6a6d5bd
|