1 /*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /***
28 Here is the logic..
29
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
75 \ \____
76 \ \
77 KSEGROUP---thread--thread--thread (queued in priority order)
78 \ /
79 \_______________/
80 (last_assigned)
81
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD: releng/5.4/sys/kern/kern_switch.c 145335 2005-04-20 19:11:07Z cvs2svn $");
90
91 #include "opt_sched.h"
92
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kdb.h>
97 #include <sys/kernel.h>
98 #include <sys/ktr.h>
99 #include <sys/lock.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106 #include <sys/smp.h>
107 #endif
108 #include <machine/critical.h>
109 #if defined(SMP) && defined(SCHED_4BSD)
110 #include <sys/sysctl.h>
111 #endif
112
113 #ifdef FULL_PREEMPTION
114 #ifndef PREEMPTION
115 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
116 #endif
117 #endif
118
119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121 #define td_kse td_sched
122
123 /************************************************************************
124 * Functions that manipulate runnability from a thread perspective. *
125 ************************************************************************/
126 /*
127 * Select the KSE that will be run next. From that find the thread, and
128 * remove it from the KSEGRP's run queue. If there is thread clustering,
129 * this will be what does it.
130 */
131 struct thread *
132 choosethread(void)
133 {
134 struct kse *ke;
135 struct thread *td;
136 struct ksegrp *kg;
137
138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140 /* Shutting down, run idlethread on AP's */
141 td = PCPU_GET(idlethread);
142 ke = td->td_kse;
143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144 ke->ke_flags |= KEF_DIDRUN;
145 TD_SET_RUNNING(td);
146 return (td);
147 }
148 #endif
149
150 retry:
151 ke = sched_choose();
152 if (ke) {
153 td = ke->ke_thread;
154 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155 kg = ke->ke_ksegrp;
156 if (td->td_proc->p_flag & P_HADTHREADS) {
157 if (kg->kg_last_assigned == td) {
158 kg->kg_last_assigned = TAILQ_PREV(td,
159 threadqueue, td_runq);
160 }
161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162 kg->kg_runnable--;
163 }
164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165 td, td->td_priority);
166 } else {
167 /* Simulate runq_choose() having returned the idle thread */
168 td = PCPU_GET(idlethread);
169 ke = td->td_kse;
170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171 }
172 ke->ke_flags |= KEF_DIDRUN;
173
174 /*
175 * If we are in panic, only allow system threads,
176 * plus the one we are running in, to be run.
177 */
178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179 (td->td_flags & TDF_INPANIC) == 0)) {
180 /* note that it is no longer on the run queue */
181 TD_SET_CAN_RUN(td);
182 goto retry;
183 }
184
185 TD_SET_RUNNING(td);
186 return (td);
187 }
188
189 /*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 * sched_thread_exit() (local)
193 * sched_switch() (local)
194 * sched_thread_exit() (local)
195 * remrunqueue() (local) (not at the moment)
196 */
197 static void
198 slot_fill(struct ksegrp *kg)
199 {
200 struct thread *td;
201
202 mtx_assert(&sched_lock, MA_OWNED);
203 while (kg->kg_avail_opennings > 0) {
204 /*
205 * Find the first unassigned thread
206 */
207 if ((td = kg->kg_last_assigned) != NULL)
208 td = TAILQ_NEXT(td, td_runq);
209 else
210 td = TAILQ_FIRST(&kg->kg_runq);
211
212 /*
213 * If we found one, send it to the system scheduler.
214 */
215 if (td) {
216 kg->kg_last_assigned = td;
217 sched_add(td, SRQ_YIELDING);
218 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219 } else {
220 /* no threads to use up the slots. quit now */
221 break;
222 }
223 }
224 }
225
226 #ifdef SCHED_4BSD
227 /*
228 * Remove a thread from its KSEGRP's run queue.
229 * This in turn may remove it from a KSE if it was already assigned
230 * to one, possibly causing a new thread to be assigned to the KSE
231 * and the KSE getting a new priority.
232 */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 struct thread *td2, *td3;
237 struct ksegrp *kg;
238 struct kse *ke;
239
240 mtx_assert(&sched_lock, MA_OWNED);
241 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 kg = td->td_ksegrp;
243 ke = td->td_kse;
244 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 TD_SET_CAN_RUN(td);
246 /*
247 * If it is not a threaded process, take the shortcut.
248 */
249 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250 /* remve from sys run queue and free up a slot */
251 sched_rem(td);
252 ke->ke_state = KES_THREAD;
253 return;
254 }
255 td3 = TAILQ_PREV(td, threadqueue, td_runq);
256 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257 kg->kg_runnable--;
258 if (ke->ke_state == KES_ONRUNQ) {
259 /*
260 * This thread has been assigned to the system run queue.
261 * We need to dissociate it and try assign the
262 * KSE to the next available thread. Then, we should
263 * see if we need to move the KSE in the run queues.
264 */
265 sched_rem(td);
266 ke->ke_state = KES_THREAD;
267 td2 = kg->kg_last_assigned;
268 KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 if (td2 == td)
270 kg->kg_last_assigned = td3;
271 /* slot_fill(kg); */ /* will replace it with another */
272 }
273 }
274 #endif
275
276 /*
277 * Change the priority of a thread that is on the run queue.
278 */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 struct ksegrp *kg;
283 struct kse *ke;
284
285 mtx_assert(&sched_lock, MA_OWNED);
286 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287
288 ke = td->td_kse;
289 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 /*
291 * If it is not a threaded process, take the shortcut.
292 */
293 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294 /* We only care about the kse in the run queue. */
295 td->td_priority = newpri;
296 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 sched_rem(td);
298 sched_add(td, SRQ_BORING);
299 }
300 return;
301 }
302
303 /* It is a threaded process */
304 kg = td->td_ksegrp;
305 if (ke->ke_state == KES_ONRUNQ) {
306 if (kg->kg_last_assigned == td) {
307 kg->kg_last_assigned =
308 TAILQ_PREV(td, threadqueue, td_runq);
309 }
310 sched_rem(td);
311 }
312 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313 kg->kg_runnable--;
314 TD_SET_CAN_RUN(td);
315 td->td_priority = newpri;
316 setrunqueue(td, SRQ_BORING);
317 }
318
319 /*
320 * This function is called when a thread is about to be put on a
321 * ksegrp run queue because it has been made runnable or its
322 * priority has been adjusted and the ksegrp does not have a
323 * free kse slot. It determines if a thread from the same ksegrp
324 * should be preempted. If so, it tries to switch threads
325 * if the thread is on the same cpu or notifies another cpu that
326 * it should switch threads.
327 */
328
329 static void
330 maybe_preempt_in_ksegrp(struct thread *td)
331 #if !defined(SMP)
332 {
333 struct thread *running_thread;
334
335 #ifndef FULL_PREEMPTION
336 int pri;
337 pri = td->td_priority;
338 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
339 return;
340 #endif
341 mtx_assert(&sched_lock, MA_OWNED);
342 running_thread = curthread;
343
344 if (running_thread->td_ksegrp != td->td_ksegrp)
345 return;
346
347 if (td->td_priority > running_thread->td_priority)
348 return;
349 #ifdef PREEMPTION
350 if (running_thread->td_critnest > 1)
351 running_thread->td_pflags |= TDP_OWEPREEMPT;
352 else
353 mi_switch(SW_INVOL, NULL);
354
355 #else
356 running_thread->td_flags |= TDF_NEEDRESCHED;
357 #endif
358 return;
359 }
360
361 #else /* SMP */
362 {
363 struct thread *running_thread;
364 int worst_pri;
365 struct ksegrp *kg;
366 cpumask_t cpumask,dontuse;
367 struct pcpu *pc;
368 struct pcpu *best_pcpu;
369 struct thread *cputhread;
370
371 #ifndef FULL_PREEMPTION
372 int pri;
373 pri = td->td_priority;
374 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD))
375 return;
376 #endif
377
378 mtx_assert(&sched_lock, MA_OWNED);
379
380 running_thread = curthread;
381
382 #if !defined(KSEG_PEEMPT_BEST_CPU)
383 if (running_thread->td_ksegrp != td->td_ksegrp) {
384 #endif
385 kg = td->td_ksegrp;
386
387 /* if someone is ahead of this thread, wait our turn */
388 if (td != TAILQ_FIRST(&kg->kg_runq))
389 return;
390
391 worst_pri = td->td_priority;
392 best_pcpu = NULL;
393 dontuse = stopped_cpus | idle_cpus_mask;
394
395 /*
396 * Find a cpu with the worst priority that runs at thread from
397 * the same ksegrp - if multiple exist give first the last run
398 * cpu and then the current cpu priority
399 */
400
401 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
402 cpumask = pc->pc_cpumask;
403 cputhread = pc->pc_curthread;
404
405 if ((cpumask & dontuse) ||
406 cputhread->td_ksegrp != kg)
407 continue;
408
409 if (cputhread->td_priority > worst_pri) {
410 worst_pri = cputhread->td_priority;
411 best_pcpu = pc;
412 continue;
413 }
414
415 if (cputhread->td_priority == worst_pri &&
416 best_pcpu != NULL &&
417 (td->td_lastcpu == pc->pc_cpuid ||
418 (PCPU_GET(cpumask) == cpumask &&
419 td->td_lastcpu != best_pcpu->pc_cpuid)))
420 best_pcpu = pc;
421 }
422
423 /* Check if we need to preempt someone */
424 if (best_pcpu == NULL)
425 return;
426
427 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
428 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
429 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
430 return;
431 }
432 #if !defined(KSEG_PEEMPT_BEST_CPU)
433 }
434 #endif
435
436 if (td->td_priority > running_thread->td_priority)
437 return;
438 #ifdef PREEMPTION
439 if (running_thread->td_critnest > 1)
440 running_thread->td_pflags |= TDP_OWEPREEMPT;
441 else
442 mi_switch(SW_INVOL, NULL);
443
444 #else
445 running_thread->td_flags |= TDF_NEEDRESCHED;
446 #endif
447 return;
448 }
449 #endif /* !SMP */
450
451
452 int limitcount;
453 void
454 setrunqueue(struct thread *td, int flags)
455 {
456 struct ksegrp *kg;
457 struct thread *td2;
458 struct thread *tda;
459 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
460 td, td->td_proc->p_comm, td->td_priority, curthread,
461 curthread->td_proc->p_comm);
462
463 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
464 td, td->td_ksegrp, td->td_proc->p_pid);
465 mtx_assert(&sched_lock, MA_OWNED);
466 KASSERT((td->td_inhibitors == 0),
467 ("setrunqueue: trying to run inhibitted thread"));
468 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
469 ("setrunqueue: bad thread state"));
470 TD_SET_RUNQ(td);
471 kg = td->td_ksegrp;
472 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
473 /*
474 * Common path optimisation: Only one of everything
475 * and the KSE is always already attached.
476 * Totally ignore the ksegrp run queue.
477 */
478 if (kg->kg_avail_opennings != 1) {
479 if (limitcount < 1) {
480 limitcount++;
481 printf("pid %d: corrected slot count (%d->1)\n",
482 td->td_proc->p_pid, kg->kg_avail_opennings);
483
484 }
485 kg->kg_avail_opennings = 1;
486 }
487 sched_add(td, flags);
488 return;
489 }
490
491 /*
492 * If the concurrency has reduced, and we would go in the
493 * assigned section, then keep removing entries from the
494 * system run queue, until we are not in that section
495 * or there is room for us to be put in that section.
496 * What we MUST avoid is the case where there are threads of less
497 * priority than the new one scheduled, but it can not
498 * be scheduled itself. That would lead to a non contiguous set
499 * of scheduled threads, and everything would break.
500 */
501 tda = kg->kg_last_assigned;
502 while ((kg->kg_avail_opennings <= 0) &&
503 (tda && (tda->td_priority > td->td_priority))) {
504 /*
505 * None free, but there is one we can commandeer.
506 */
507 CTR2(KTR_RUNQ,
508 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
509 sched_rem(tda);
510 tda = kg->kg_last_assigned =
511 TAILQ_PREV(tda, threadqueue, td_runq);
512 }
513
514 /*
515 * Add the thread to the ksegrp's run queue at
516 * the appropriate place.
517 */
518 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
519 if (td2->td_priority > td->td_priority) {
520 kg->kg_runnable++;
521 TAILQ_INSERT_BEFORE(td2, td, td_runq);
522 break;
523 }
524 }
525 if (td2 == NULL) {
526 /* We ran off the end of the TAILQ or it was empty. */
527 kg->kg_runnable++;
528 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
529 }
530
531 /*
532 * If we have a slot to use, then put the thread on the system
533 * run queue and if needed, readjust the last_assigned pointer.
534 * it may be that we need to schedule something anyhow
535 * even if the availabel slots are -ve so that
536 * all the items < last_assigned are scheduled.
537 */
538 if (kg->kg_avail_opennings > 0) {
539 if (tda == NULL) {
540 /*
541 * No pre-existing last assigned so whoever is first
542 * gets the slot.. (maybe us)
543 */
544 td2 = TAILQ_FIRST(&kg->kg_runq);
545 kg->kg_last_assigned = td2;
546 } else if (tda->td_priority > td->td_priority) {
547 td2 = td;
548 } else {
549 /*
550 * We are past last_assigned, so
551 * give the next slot to whatever is next,
552 * which may or may not be us.
553 */
554 td2 = TAILQ_NEXT(tda, td_runq);
555 kg->kg_last_assigned = td2;
556 }
557 sched_add(td2, flags);
558 } else {
559 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
560 td, td->td_ksegrp, td->td_proc->p_pid);
561 if ((flags & SRQ_YIELDING) == 0)
562 maybe_preempt_in_ksegrp(td);
563 }
564 }
565
566 /*
567 * Kernel thread preemption implementation. Critical sections mark
568 * regions of code in which preemptions are not allowed.
569 */
570 void
571 critical_enter(void)
572 {
573 struct thread *td;
574
575 td = curthread;
576 if (td->td_critnest == 0)
577 cpu_critical_enter(td);
578 td->td_critnest++;
579 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
580 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
581 }
582
583 void
584 critical_exit(void)
585 {
586 struct thread *td;
587
588 td = curthread;
589 KASSERT(td->td_critnest != 0,
590 ("critical_exit: td_critnest == 0"));
591 if (td->td_critnest == 1) {
592 if (td->td_pflags & TDP_WAKEPROC0) {
593 td->td_pflags &= ~TDP_WAKEPROC0;
594 wakeup(&proc0);
595 }
596 #ifdef PREEMPTION
597 mtx_assert(&sched_lock, MA_NOTOWNED);
598 if (td->td_pflags & TDP_OWEPREEMPT) {
599 mtx_lock_spin(&sched_lock);
600 mi_switch(SW_INVOL, NULL);
601 mtx_unlock_spin(&sched_lock);
602 }
603 #endif
604 td->td_critnest = 0;
605 cpu_critical_exit(td);
606 } else {
607 td->td_critnest--;
608 }
609 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
610 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
611 }
612
613 /*
614 * This function is called when a thread is about to be put on run queue
615 * because it has been made runnable or its priority has been adjusted. It
616 * determines if the new thread should be immediately preempted to. If so,
617 * it switches to it and eventually returns true. If not, it returns false
618 * so that the caller may place the thread on an appropriate run queue.
619 */
620 int
621 maybe_preempt(struct thread *td)
622 {
623 #ifdef PREEMPTION
624 struct thread *ctd;
625 int cpri, pri;
626 #endif
627
628 mtx_assert(&sched_lock, MA_OWNED);
629 #ifdef PREEMPTION
630 /*
631 * The new thread should not preempt the current thread if any of the
632 * following conditions are true:
633 *
634 * - The current thread has a higher (numerically lower) or
635 * equivalent priority. Note that this prevents curthread from
636 * trying to preempt to itself.
637 * - It is too early in the boot for context switches (cold is set).
638 * - The current thread has an inhibitor set or is in the process of
639 * exiting. In this case, the current thread is about to switch
640 * out anyways, so there's no point in preempting. If we did,
641 * the current thread would not be properly resumed as well, so
642 * just avoid that whole landmine.
643 * - If the new thread's priority is not a realtime priority and
644 * the current thread's priority is not an idle priority and
645 * FULL_PREEMPTION is disabled.
646 *
647 * If all of these conditions are false, but the current thread is in
648 * a nested critical section, then we have to defer the preemption
649 * until we exit the critical section. Otherwise, switch immediately
650 * to the new thread.
651 */
652 ctd = curthread;
653 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
654 ("thread has no (or wrong) sched-private part."));
655 KASSERT((td->td_inhibitors == 0),
656 ("maybe_preempt: trying to run inhibitted thread"));
657 pri = td->td_priority;
658 cpri = ctd->td_priority;
659 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
660 td->td_kse->ke_state != KES_THREAD)
661 return (0);
662 #ifndef FULL_PREEMPTION
663 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
664 !(cpri >= PRI_MIN_IDLE))
665 return (0);
666 #endif
667 if (ctd->td_critnest > 1) {
668 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
669 ctd->td_critnest);
670 ctd->td_pflags |= TDP_OWEPREEMPT;
671 return (0);
672 }
673
674 /*
675 * Thread is runnable but not yet put on system run queue.
676 */
677 MPASS(TD_ON_RUNQ(td));
678 MPASS(td->td_sched->ke_state != KES_ONRUNQ);
679 if (td->td_proc->p_flag & P_HADTHREADS) {
680 /*
681 * If this is a threaded process we actually ARE on the
682 * ksegrp run queue so take it off that first.
683 * Also undo any damage done to the last_assigned pointer.
684 * XXX Fix setrunqueue so this isn't needed
685 */
686 struct ksegrp *kg;
687
688 kg = td->td_ksegrp;
689 if (kg->kg_last_assigned == td)
690 kg->kg_last_assigned =
691 TAILQ_PREV(td, threadqueue, td_runq);
692 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
693 }
694
695 TD_SET_RUNNING(td);
696 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
697 td->td_proc->p_pid, td->td_proc->p_comm);
698 mi_switch(SW_INVOL|SW_PREEMPT, td);
699 return (1);
700 #else
701 return (0);
702 #endif
703 }
704
705 #if 0
706 #ifndef PREEMPTION
707 /* XXX: There should be a non-static version of this. */
708 static void
709 printf_caddr_t(void *data)
710 {
711 printf("%s", (char *)data);
712 }
713 static char preempt_warning[] =
714 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
715 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
716 preempt_warning)
717 #endif
718 #endif
719
720 /************************************************************************
721 * SYSTEM RUN QUEUE manipulations and tests *
722 ************************************************************************/
723 /*
724 * Initialize a run structure.
725 */
726 void
727 runq_init(struct runq *rq)
728 {
729 int i;
730
731 bzero(rq, sizeof *rq);
732 for (i = 0; i < RQ_NQS; i++)
733 TAILQ_INIT(&rq->rq_queues[i]);
734 }
735
736 /*
737 * Clear the status bit of the queue corresponding to priority level pri,
738 * indicating that it is empty.
739 */
740 static __inline void
741 runq_clrbit(struct runq *rq, int pri)
742 {
743 struct rqbits *rqb;
744
745 rqb = &rq->rq_status;
746 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
747 rqb->rqb_bits[RQB_WORD(pri)],
748 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
749 RQB_BIT(pri), RQB_WORD(pri));
750 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
751 }
752
753 /*
754 * Find the index of the first non-empty run queue. This is done by
755 * scanning the status bits, a set bit indicates a non-empty queue.
756 */
757 static __inline int
758 runq_findbit(struct runq *rq)
759 {
760 struct rqbits *rqb;
761 int pri;
762 int i;
763
764 rqb = &rq->rq_status;
765 for (i = 0; i < RQB_LEN; i++)
766 if (rqb->rqb_bits[i]) {
767 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
768 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
769 rqb->rqb_bits[i], i, pri);
770 return (pri);
771 }
772
773 return (-1);
774 }
775
776 /*
777 * Set the status bit of the queue corresponding to priority level pri,
778 * indicating that it is non-empty.
779 */
780 static __inline void
781 runq_setbit(struct runq *rq, int pri)
782 {
783 struct rqbits *rqb;
784
785 rqb = &rq->rq_status;
786 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
787 rqb->rqb_bits[RQB_WORD(pri)],
788 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
789 RQB_BIT(pri), RQB_WORD(pri));
790 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
791 }
792
793 /*
794 * Add the KSE to the queue specified by its priority, and set the
795 * corresponding status bit.
796 */
797 void
798 runq_add(struct runq *rq, struct kse *ke, int flags)
799 {
800 struct rqhead *rqh;
801 int pri;
802
803 pri = ke->ke_thread->td_priority / RQ_PPQ;
804 ke->ke_rqindex = pri;
805 runq_setbit(rq, pri);
806 rqh = &rq->rq_queues[pri];
807 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
808 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
809 if (flags & SRQ_PREEMPTED) {
810 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
811 } else {
812 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
813 }
814 }
815
816 /*
817 * Return true if there are runnable processes of any priority on the run
818 * queue, false otherwise. Has no side effects, does not modify the run
819 * queue structure.
820 */
821 int
822 runq_check(struct runq *rq)
823 {
824 struct rqbits *rqb;
825 int i;
826
827 rqb = &rq->rq_status;
828 for (i = 0; i < RQB_LEN; i++)
829 if (rqb->rqb_bits[i]) {
830 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
831 rqb->rqb_bits[i], i);
832 return (1);
833 }
834 CTR0(KTR_RUNQ, "runq_check: empty");
835
836 return (0);
837 }
838
839 #if defined(SMP) && defined(SCHED_4BSD)
840 int runq_fuzz = 1;
841 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
842 #endif
843
844 /*
845 * Find the highest priority process on the run queue.
846 */
847 struct kse *
848 runq_choose(struct runq *rq)
849 {
850 struct rqhead *rqh;
851 struct kse *ke;
852 int pri;
853
854 mtx_assert(&sched_lock, MA_OWNED);
855 while ((pri = runq_findbit(rq)) != -1) {
856 rqh = &rq->rq_queues[pri];
857 #if defined(SMP) && defined(SCHED_4BSD)
858 /* fuzz == 1 is normal.. 0 or less are ignored */
859 if (runq_fuzz > 1) {
860 /*
861 * In the first couple of entries, check if
862 * there is one for our CPU as a preference.
863 */
864 int count = runq_fuzz;
865 int cpu = PCPU_GET(cpuid);
866 struct kse *ke2;
867 ke2 = ke = TAILQ_FIRST(rqh);
868
869 while (count-- && ke2) {
870 if (ke->ke_thread->td_lastcpu == cpu) {
871 ke = ke2;
872 break;
873 }
874 ke2 = TAILQ_NEXT(ke2, ke_procq);
875 }
876 } else
877 #endif
878 ke = TAILQ_FIRST(rqh);
879 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
880 CTR3(KTR_RUNQ,
881 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
882 return (ke);
883 }
884 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
885
886 return (NULL);
887 }
888
889 /*
890 * Remove the KSE from the queue specified by its priority, and clear the
891 * corresponding status bit if the queue becomes empty.
892 * Caller must set ke->ke_state afterwards.
893 */
894 void
895 runq_remove(struct runq *rq, struct kse *ke)
896 {
897 struct rqhead *rqh;
898 int pri;
899
900 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
901 ("runq_remove: process swapped out"));
902 pri = ke->ke_rqindex;
903 rqh = &rq->rq_queues[pri];
904 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
905 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
906 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
907 TAILQ_REMOVE(rqh, ke, ke_procq);
908 if (TAILQ_EMPTY(rqh)) {
909 CTR0(KTR_RUNQ, "runq_remove: empty");
910 runq_clrbit(rq, pri);
911 }
912 }
913
914 /****** functions that are temporarily here ***********/
915 #include <vm/uma.h>
916 extern struct mtx kse_zombie_lock;
917
918 /*
919 * Allocate scheduler specific per-process resources.
920 * The thread and ksegrp have already been linked in.
921 * In this case just set the default concurrency value.
922 *
923 * Called from:
924 * proc_init() (UMA init method)
925 */
926 void
927 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
928 {
929
930 /* This can go in sched_fork */
931 sched_init_concurrency(kg);
932 }
933
934 /*
935 * Called by the uma process fini routine..
936 * undo anything we may have done in the uma_init method.
937 * Panic if it's not all 1:1:1:1
938 * Called from:
939 * proc_fini() (UMA method)
940 */
941 void
942 sched_destroyproc(struct proc *p)
943 {
944
945 /* this function slated for destruction */
946 KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
947 KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
948 }
949
950 /*
951 * thread is being either created or recycled.
952 * Fix up the per-scheduler resources associated with it.
953 * Called from:
954 * sched_fork_thread()
955 * thread_dtor() (*may go away)
956 * thread_init() (*may go away)
957 */
958 void
959 sched_newthread(struct thread *td)
960 {
961 struct td_sched *ke;
962
963 ke = (struct td_sched *) (td + 1);
964 bzero(ke, sizeof(*ke));
965 td->td_sched = ke;
966 ke->ke_thread = td;
967 ke->ke_state = KES_THREAD;
968 }
969
970 /*
971 * Set up an initial concurrency of 1
972 * and set the given thread (if given) to be using that
973 * concurrency slot.
974 * May be used "offline"..before the ksegrp is attached to the world
975 * and thus wouldn't need schedlock in that case.
976 * Called from:
977 * thr_create()
978 * proc_init() (UMA) via sched_newproc()
979 */
980 void
981 sched_init_concurrency(struct ksegrp *kg)
982 {
983
984 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
985 kg->kg_concurrency = 1;
986 kg->kg_avail_opennings = 1;
987 }
988
989 /*
990 * Change the concurrency of an existing ksegrp to N
991 * Called from:
992 * kse_create()
993 * kse_exit()
994 * thread_exit()
995 * thread_single()
996 */
997 void
998 sched_set_concurrency(struct ksegrp *kg, int concurrency)
999 {
1000
1001 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1002 kg,
1003 concurrency,
1004 kg->kg_avail_opennings,
1005 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1006 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1007 kg->kg_concurrency = concurrency;
1008 }
1009
1010 /*
1011 * Called from thread_exit() for all exiting thread
1012 *
1013 * Not to be confused with sched_exit_thread()
1014 * that is only called from thread_exit() for threads exiting
1015 * without the rest of the process exiting because it is also called from
1016 * sched_exit() and we wouldn't want to call it twice.
1017 * XXX This can probably be fixed.
1018 */
1019 void
1020 sched_thread_exit(struct thread *td)
1021 {
1022
1023 SLOT_RELEASE(td->td_ksegrp);
1024 slot_fill(td->td_ksegrp);
1025 }
1026
1027 #endif /* KERN_SWITCH_INCLUDE */
Cache object: 567a0a39d8ee64727a8c6f4e86848420
|