FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_synch.c
1 /*-
2 * Copyright (c) 1982, 1986, 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
39 * $FreeBSD: src/sys/kern/kern_synch.c,v 1.26.2.6 1999/11/30 09:05:00 peter Exp $
40 */
41
42 #include "opt_ktrace.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/proc.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/signalvar.h>
52 #include <sys/vmmeter.h>
53 #include <sys/sysctl.h>
54 #include <vm/vm.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_extern.h>
57 #ifdef KTRACE
58 #include <sys/ktrace.h>
59 #endif
60
61 #include <machine/cpu.h>
62
63 static void rqinit __P((void *));
64 SYSINIT(runqueue, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, rqinit, NULL)
65
66 u_char curpriority; /* usrpri of curproc */
67 int lbolt; /* once a second sleep address */
68
69 extern void endtsleep __P((void *));
70 extern void updatepri __P((struct proc *p));
71
72 #define MAXIMUM_SCHEDULE_QUANTUM (1000000) /* arbitrary limit */
73 #ifndef DEFAULT_SCHEDULE_QUANTUM
74 #define DEFAULT_SCHEDULE_QUANTUM 10
75 #endif
76 static int quantum = DEFAULT_SCHEDULE_QUANTUM; /* default value */
77
78 static int
79 sysctl_kern_quantum SYSCTL_HANDLER_ARGS
80 {
81 int error;
82 int new_val = quantum;
83
84 new_val = quantum;
85 error = sysctl_handle_int(oidp, &new_val, 0, req);
86 if (error == 0) {
87 if ((new_val > 0) && (new_val < MAXIMUM_SCHEDULE_QUANTUM)) {
88 quantum = new_val;
89 } else {
90 error = EINVAL;
91 }
92 }
93 return (error);
94 }
95
96 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
97 0, sizeof quantum, sysctl_kern_quantum, "I", "");
98
99 /*
100 * Force switch among equal priority processes every 100ms.
101 */
102 /* ARGSUSED */
103 void
104 roundrobin(arg)
105 void *arg;
106 {
107
108 need_resched();
109 timeout(roundrobin, NULL, hz / quantum);
110 }
111
112 /*
113 * Constants for digital decay and forget:
114 * 90% of (p_estcpu) usage in 5 * loadav time
115 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive)
116 * Note that, as ps(1) mentions, this can let percentages
117 * total over 100% (I've seen 137.9% for 3 processes).
118 *
119 * Note that statclock updates p_estcpu and p_cpticks independently.
120 *
121 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
122 * That is, the system wants to compute a value of decay such
123 * that the following for loop:
124 * for (i = 0; i < (5 * loadavg); i++)
125 * p_estcpu *= decay;
126 * will compute
127 * p_estcpu *= 0.1;
128 * for all values of loadavg:
129 *
130 * Mathematically this loop can be expressed by saying:
131 * decay ** (5 * loadavg) ~= .1
132 *
133 * The system computes decay as:
134 * decay = (2 * loadavg) / (2 * loadavg + 1)
135 *
136 * We wish to prove that the system's computation of decay
137 * will always fulfill the equation:
138 * decay ** (5 * loadavg) ~= .1
139 *
140 * If we compute b as:
141 * b = 2 * loadavg
142 * then
143 * decay = b / (b + 1)
144 *
145 * We now need to prove two things:
146 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
147 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
148 *
149 * Facts:
150 * For x close to zero, exp(x) =~ 1 + x, since
151 * exp(x) = 0! + x**1/1! + x**2/2! + ... .
152 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
153 * For x close to zero, ln(1+x) =~ x, since
154 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
155 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
156 * ln(.1) =~ -2.30
157 *
158 * Proof of (1):
159 * Solve (factor)**(power) =~ .1 given power (5*loadav):
160 * solving for factor,
161 * ln(factor) =~ (-2.30/5*loadav), or
162 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
163 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
164 *
165 * Proof of (2):
166 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
167 * solving for power,
168 * power*ln(b/(b+1)) =~ -2.30, or
169 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED
170 *
171 * Actual power values for the implemented algorithm are as follows:
172 * loadav: 1 2 3 4
173 * power: 5.68 10.32 14.94 19.55
174 */
175
176 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
177 #define loadfactor(loadav) (2 * (loadav))
178 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE))
179
180 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
181 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
182
183 /*
184 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
185 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
186 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
187 *
188 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
189 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
190 *
191 * If you dont want to bother with the faster/more-accurate formula, you
192 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
193 * (more general) method of calculating the %age of CPU used by a process.
194 */
195 #define CCPU_SHIFT 11
196
197 /*
198 * Recompute process priorities, every hz ticks.
199 */
200 /* ARGSUSED */
201 void
202 schedcpu(arg)
203 void *arg;
204 {
205 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
206 register struct proc *p;
207 register int s;
208 register unsigned int newcpu;
209
210 wakeup((caddr_t)&lbolt);
211 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
212 /*
213 * Increment time in/out of memory and sleep time
214 * (if sleeping). We ignore overflow; with 16-bit int's
215 * (remember them?) overflow takes 45 days.
216 */
217 p->p_swtime++;
218 if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
219 p->p_slptime++;
220 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
221 /*
222 * If the process has slept the entire second,
223 * stop recalculating its priority until it wakes up.
224 */
225 if (p->p_slptime > 1)
226 continue;
227 s = splhigh(); /* prevent state changes and protect run queue */
228 /*
229 * p_pctcpu is only for ps.
230 */
231 #if (FSHIFT >= CCPU_SHIFT)
232 p->p_pctcpu += (hz == 100)?
233 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
234 100 * (((fixpt_t) p->p_cpticks)
235 << (FSHIFT - CCPU_SHIFT)) / hz;
236 #else
237 p->p_pctcpu += ((FSCALE - ccpu) *
238 (p->p_cpticks * FSCALE / hz)) >> FSHIFT;
239 #endif
240 p->p_cpticks = 0;
241 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice;
242 p->p_estcpu = min(newcpu, UCHAR_MAX);
243 resetpriority(p);
244 if (p->p_priority >= PUSER) {
245 #define PPQ (128 / NQS) /* priorities per queue */
246 if ((p != curproc) &&
247 p->p_stat == SRUN &&
248 (p->p_flag & P_INMEM) &&
249 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
250 remrq(p);
251 p->p_priority = p->p_usrpri;
252 setrunqueue(p);
253 } else
254 p->p_priority = p->p_usrpri;
255 }
256 splx(s);
257 }
258 vmmeter();
259 timeout(schedcpu, (void *)0, hz);
260 }
261
262 /*
263 * Recalculate the priority of a process after it has slept for a while.
264 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
265 * least six times the loadfactor will decay p_estcpu to zero.
266 */
267 void
268 updatepri(p)
269 register struct proc *p;
270 {
271 register unsigned int newcpu = p->p_estcpu;
272 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
273
274 if (p->p_slptime > 5 * loadfac)
275 p->p_estcpu = 0;
276 else {
277 p->p_slptime--; /* the first time was done in schedcpu */
278 while (newcpu && --p->p_slptime)
279 newcpu = (int) decay_cpu(loadfac, newcpu);
280 p->p_estcpu = min(newcpu, UCHAR_MAX);
281 }
282 resetpriority(p);
283 }
284
285 /*
286 * We're only looking at 7 bits of the address; everything is
287 * aligned to 4, lots of things are aligned to greater powers
288 * of 2. Shift right by 8, i.e. drop the bottom 256 worth.
289 */
290 #define TABLESIZE 128
291 TAILQ_HEAD(slpquehead, proc) slpque[TABLESIZE];
292 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1))
293
294 /*
295 * During autoconfiguration or after a panic, a sleep will simply
296 * lower the priority briefly to allow interrupts, then return.
297 * The priority to be used (safepri) is machine-dependent, thus this
298 * value is initialized and maintained in the machine-dependent layers.
299 * This priority will typically be 0, or the lowest priority
300 * that is safe for use on the interrupt stack; it can be made
301 * higher to block network software interrupts after panics.
302 */
303 int safepri;
304
305 void
306 sleepinit()
307 {
308 int i;
309
310 for (i = 0; i < TABLESIZE; i++)
311 TAILQ_INIT(&slpque[i]);
312 }
313
314 /*
315 * General sleep call. Suspends the current process until a wakeup is
316 * performed on the specified identifier. The process will then be made
317 * runnable with the specified priority. Sleeps at most timo/hz seconds
318 * (0 means no timeout). If pri includes PCATCH flag, signals are checked
319 * before and after sleeping, else signals are not checked. Returns 0 if
320 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
321 * signal needs to be delivered, ERESTART is returned if the current system
322 * call should be restarted if possible, and EINTR is returned if the system
323 * call should be interrupted by the signal (return EINTR).
324 */
325 int
326 tsleep(ident, priority, wmesg, timo)
327 void *ident;
328 int priority, timo;
329 char *wmesg;
330 {
331 struct proc *p = curproc;
332 int s, sig, catch = priority & PCATCH;
333
334 #ifdef KTRACE
335 if (p && KTRPOINT(p, KTR_CSW))
336 ktrcsw(p->p_tracep, 1, 0);
337 #endif
338 s = splhigh();
339 if (cold || panicstr) {
340 /*
341 * After a panic, or during autoconfiguration,
342 * just give interrupts a chance, then just return;
343 * don't run any other procs or panic below,
344 * in case this is the idle process and already asleep.
345 */
346 splx(safepri);
347 splx(s);
348 return (0);
349 }
350 #ifdef DIAGNOSTIC
351 if(p == NULL)
352 panic("tsleep1");
353 if (ident == NULL || p->p_stat != SRUN)
354 panic("tsleep");
355 /* XXX This is not exhaustive, just the most common case */
356 if ((p->p_procq.tqe_prev != NULL) && (*p->p_procq.tqe_prev == p))
357 panic("sleeping process already on another queue");
358 #endif
359 p->p_wchan = ident;
360 p->p_wmesg = wmesg;
361 p->p_slptime = 0;
362 p->p_priority = priority & PRIMASK;
363 TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
364 if (timo)
365 timeout(endtsleep, (void *)p, timo);
366 /*
367 * We put ourselves on the sleep queue and start our timeout
368 * before calling CURSIG, as we could stop there, and a wakeup
369 * or a SIGCONT (or both) could occur while we were stopped.
370 * A SIGCONT would cause us to be marked as SSLEEP
371 * without resuming us, thus we must be ready for sleep
372 * when CURSIG is called. If the wakeup happens while we're
373 * stopped, p->p_wchan will be 0 upon return from CURSIG.
374 */
375 if (catch) {
376 p->p_flag |= P_SINTR;
377 if ((sig = CURSIG(p))) {
378 if (p->p_wchan)
379 unsleep(p);
380 p->p_stat = SRUN;
381 goto resume;
382 }
383 if (p->p_wchan == 0) {
384 catch = 0;
385 goto resume;
386 }
387 } else
388 sig = 0;
389 p->p_stat = SSLEEP;
390 p->p_stats->p_ru.ru_nvcsw++;
391 mi_switch();
392 resume:
393 curpriority = p->p_usrpri;
394 splx(s);
395 p->p_flag &= ~P_SINTR;
396 if (p->p_flag & P_TIMEOUT) {
397 p->p_flag &= ~P_TIMEOUT;
398 if (sig == 0) {
399 #ifdef KTRACE
400 if (KTRPOINT(p, KTR_CSW))
401 ktrcsw(p->p_tracep, 0, 0);
402 #endif
403 return (EWOULDBLOCK);
404 }
405 } else if (timo)
406 untimeout(endtsleep, (void *)p);
407 if (catch && (sig != 0 || (sig = CURSIG(p)))) {
408 #ifdef KTRACE
409 if (KTRPOINT(p, KTR_CSW))
410 ktrcsw(p->p_tracep, 0, 0);
411 #endif
412 if (p->p_sigacts->ps_sigintr & sigmask(sig))
413 return (EINTR);
414 return (ERESTART);
415 }
416 #ifdef KTRACE
417 if (KTRPOINT(p, KTR_CSW))
418 ktrcsw(p->p_tracep, 0, 0);
419 #endif
420 return (0);
421 }
422
423 /*
424 * Implement timeout for tsleep.
425 * If process hasn't been awakened (wchan non-zero),
426 * set timeout flag and undo the sleep. If proc
427 * is stopped, just unsleep so it will remain stopped.
428 */
429 void
430 endtsleep(arg)
431 void *arg;
432 {
433 register struct proc *p;
434 int s;
435
436 p = (struct proc *)arg;
437 s = splhigh();
438 if (p->p_wchan) {
439 if (p->p_stat == SSLEEP)
440 setrunnable(p);
441 else
442 unsleep(p);
443 p->p_flag |= P_TIMEOUT;
444 }
445 splx(s);
446 }
447
448 /*
449 * Remove a process from its wait queue
450 */
451 void
452 unsleep(p)
453 register struct proc *p;
454 {
455 int s;
456
457 s = splhigh();
458 if (p->p_wchan) {
459 TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_procq);
460 p->p_wchan = 0;
461 }
462 splx(s);
463 }
464
465 /*
466 * Make all processes sleeping on the specified identifier runnable.
467 */
468 void
469 wakeup(ident)
470 register void *ident;
471 {
472 register struct slpquehead *qp;
473 register struct proc *p;
474 int s;
475
476 s = splhigh();
477 qp = &slpque[LOOKUP(ident)];
478 restart:
479 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
480 #ifdef DIAGNOSTIC
481 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
482 panic("wakeup");
483 #endif
484 if (p->p_wchan == ident) {
485 TAILQ_REMOVE(qp, p, p_procq);
486 p->p_wchan = 0;
487 if (p->p_stat == SSLEEP) {
488 /* OPTIMIZED EXPANSION OF setrunnable(p); */
489 if (p->p_slptime > 1)
490 updatepri(p);
491 p->p_slptime = 0;
492 p->p_stat = SRUN;
493 if (p->p_flag & P_INMEM) {
494 setrunqueue(p);
495 need_resched();
496 } else {
497 p->p_flag |= P_SWAPINREQ;
498 wakeup((caddr_t)&proc0);
499 }
500 /* END INLINE EXPANSION */
501 goto restart;
502 }
503 }
504 }
505 splx(s);
506 }
507
508 /*
509 * Make a process sleeping on the specified identifier runnable.
510 * May wake more than one process if a target prcoess is currently
511 * swapped out.
512 */
513 void
514 wakeup_one(ident)
515 register void *ident;
516 {
517 register struct slpquehead *qp;
518 register struct proc *p;
519 int s;
520
521 s = splhigh();
522 qp = &slpque[LOOKUP(ident)];
523
524 for (p = qp->tqh_first; p != NULL; p = p->p_procq.tqe_next) {
525 #ifdef DIAGNOSTIC
526 if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
527 panic("wakeup_one");
528 #endif
529 if (p->p_wchan == ident) {
530 TAILQ_REMOVE(qp, p, p_procq);
531 p->p_wchan = 0;
532 if (p->p_stat == SSLEEP) {
533 /* OPTIMIZED EXPANSION OF setrunnable(p); */
534 if (p->p_slptime > 1)
535 updatepri(p);
536 p->p_slptime = 0;
537 p->p_stat = SRUN;
538 if (p->p_flag & P_INMEM) {
539 setrunqueue(p);
540 need_resched();
541 break;
542 } else {
543 p->p_flag |= P_SWAPINREQ;
544 wakeup((caddr_t)&proc0);
545 }
546 /* END INLINE EXPANSION */
547 }
548 }
549 }
550 splx(s);
551 }
552
553 /*
554 * The machine independent parts of mi_switch().
555 * Must be called at splstatclock() or higher.
556 */
557 void
558 mi_switch()
559 {
560 register struct proc *p = curproc; /* XXX */
561 register struct rlimit *rlim;
562 register long s, u;
563 int x;
564 struct timeval tv;
565
566 /*
567 * XXX this spl is almost unnecessary. It is partly to allow for
568 * sloppy callers that don't do it (issignal() via CURSIG() is the
569 * main offender). It is partly to work around a bug in the i386
570 * cpu_switch() (the ipl is not preserved). We ran for years
571 * without it. I think there was only a interrupt latency problem.
572 * The main caller, tsleep(), does an splx() a couple of instructions
573 * after calling here. The buggy caller, issignal(), usually calls
574 * here at spl0() and sometimes returns at splhigh(). The process
575 * then runs for a little too long at splhigh(). The ipl gets fixed
576 * when the process returns to user mode (or earlier).
577 *
578 * It would probably be better to always call here at spl0(). Callers
579 * are prepared to give up control to another process, so they must
580 * be prepared to be interrupted. The clock stuff here may not
581 * actually need splstatclock().
582 */
583 x = splstatclock();
584
585 #ifdef DEBUG
586 if (p->p_simple_locks)
587 panic("sleep: holding simple lock");
588 #endif
589 /*
590 * Compute the amount of time during which the current
591 * process was running, and add that to its total so far.
592 */
593 microtime(&tv);
594 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec);
595 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec);
596 if (u < 0) {
597 u += 1000000;
598 s--;
599 } else if (u >= 1000000) {
600 u -= 1000000;
601 s++;
602 }
603 p->p_rtime.tv_usec = u;
604 p->p_rtime.tv_sec = s;
605
606 /*
607 * Check if the process exceeds its cpu resource allocation.
608 * If over max, kill it.
609 */
610 if (p->p_stat != SZOMB) {
611 rlim = &p->p_rlimit[RLIMIT_CPU];
612 if (s >= rlim->rlim_cur) {
613 if (s >= rlim->rlim_max)
614 killproc(p, "exceeded maximum CPU limit");
615 else {
616 psignal(p, SIGXCPU);
617 if (rlim->rlim_cur < rlim->rlim_max)
618 rlim->rlim_cur += 5;
619 }
620 }
621 }
622
623 /*
624 * Pick a new current process and record its start time.
625 */
626 cnt.v_swtch++;
627 cpu_switch(p);
628 microtime(&runtime);
629 splx(x);
630 }
631
632 /*
633 * Initialize the (doubly-linked) run queues
634 * to be empty.
635 */
636 /* ARGSUSED*/
637 static void
638 rqinit(dummy)
639 void *dummy;
640 {
641 register int i;
642
643 for (i = 0; i < NQS; i++) {
644 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i];
645 rtqs[i].ph_link = rtqs[i].ph_rlink = (struct proc *)&rtqs[i];
646 idqs[i].ph_link = idqs[i].ph_rlink = (struct proc *)&idqs[i];
647 }
648 }
649
650 /*
651 * Change process state to be runnable,
652 * placing it on the run queue if it is in memory,
653 * and awakening the swapper if it isn't in memory.
654 */
655 void
656 setrunnable(p)
657 register struct proc *p;
658 {
659 register int s;
660
661 s = splhigh();
662 switch (p->p_stat) {
663 case 0:
664 case SRUN:
665 case SZOMB:
666 default:
667 panic("setrunnable");
668 case SSTOP:
669 case SSLEEP:
670 unsleep(p); /* e.g. when sending signals */
671 break;
672
673 case SIDL:
674 break;
675 }
676 p->p_stat = SRUN;
677 if (p->p_flag & P_INMEM)
678 setrunqueue(p);
679 splx(s);
680 if (p->p_slptime > 1)
681 updatepri(p);
682 p->p_slptime = 0;
683 if ((p->p_flag & P_INMEM) == 0) {
684 p->p_flag |= P_SWAPINREQ;
685 wakeup((caddr_t)&proc0);
686 }
687 else if (p->p_priority < curpriority)
688 need_resched();
689 }
690
691 /*
692 * Compute the priority of a process when running in user mode.
693 * Arrange to reschedule if the resulting priority is better
694 * than that of the current process.
695 */
696 void
697 resetpriority(p)
698 register struct proc *p;
699 {
700 register unsigned int newpriority;
701
702 if (p->p_rtprio.type == RTP_PRIO_NORMAL) {
703 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice;
704 newpriority = min(newpriority, MAXPRI);
705 p->p_usrpri = newpriority;
706 if (newpriority < curpriority)
707 need_resched();
708 } else {
709 need_resched();
710 }
711 }
Cache object: a5573506133e32c11e1d581ec2a3c2f2
|