1 /*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /***
28 Here is the logic..
29
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
75 \ \____
76 \ \
77 KSEGROUP---thread--thread--thread (queued in priority order)
78 \ /
79 \_______________/
80 (last_assigned)
81
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD: src/sys/kern/kern_switch.c,v 1.78.2.19 2005/07/03 20:08:04 ups Exp $");
90
91 #include "opt_sched.h"
92
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kdb.h>
97 #include <sys/kernel.h>
98 #include <sys/ktr.h>
99 #include <sys/lock.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106 #include <sys/smp.h>
107 #endif
108 #include <machine/critical.h>
109 #if defined(SMP) && defined(SCHED_4BSD)
110 #include <sys/sysctl.h>
111 #endif
112
113 #ifdef FULL_PREEMPTION
114 #ifndef PREEMPTION
115 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
116 #endif
117 #endif
118
119 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
120
121 #define td_kse td_sched
122
123 /************************************************************************
124 * Functions that manipulate runnability from a thread perspective. *
125 ************************************************************************/
126 /*
127 * Select the KSE that will be run next. From that find the thread, and
128 * remove it from the KSEGRP's run queue. If there is thread clustering,
129 * this will be what does it.
130 */
131 struct thread *
132 choosethread(void)
133 {
134 struct kse *ke;
135 struct thread *td;
136 struct ksegrp *kg;
137
138 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
139 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
140 /* Shutting down, run idlethread on AP's */
141 td = PCPU_GET(idlethread);
142 ke = td->td_kse;
143 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
144 ke->ke_flags |= KEF_DIDRUN;
145 TD_SET_RUNNING(td);
146 return (td);
147 }
148 #endif
149
150 retry:
151 ke = sched_choose();
152 if (ke) {
153 td = ke->ke_thread;
154 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
155 kg = ke->ke_ksegrp;
156 if (td->td_proc->p_flag & P_HADTHREADS) {
157 if (kg->kg_last_assigned == td) {
158 kg->kg_last_assigned = TAILQ_PREV(td,
159 threadqueue, td_runq);
160 }
161 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
162 kg->kg_runnable--;
163 }
164 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
165 td, td->td_priority);
166 } else {
167 /* Simulate runq_choose() having returned the idle thread */
168 td = PCPU_GET(idlethread);
169 ke = td->td_kse;
170 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
171 }
172 ke->ke_flags |= KEF_DIDRUN;
173
174 /*
175 * If we are in panic, only allow system threads,
176 * plus the one we are running in, to be run.
177 */
178 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
179 (td->td_flags & TDF_INPANIC) == 0)) {
180 /* note that it is no longer on the run queue */
181 TD_SET_CAN_RUN(td);
182 goto retry;
183 }
184
185 TD_SET_RUNNING(td);
186 return (td);
187 }
188
189 /*
190 * Given a surplus system slot, try assign a new runnable thread to it.
191 * Called from:
192 * sched_thread_exit() (local)
193 * sched_switch() (local)
194 * sched_thread_exit() (local)
195 * remrunqueue() (local) (not at the moment)
196 */
197 static void
198 slot_fill(struct ksegrp *kg)
199 {
200 struct thread *td;
201
202 mtx_assert(&sched_lock, MA_OWNED);
203 while (kg->kg_avail_opennings > 0) {
204 /*
205 * Find the first unassigned thread
206 */
207 if ((td = kg->kg_last_assigned) != NULL)
208 td = TAILQ_NEXT(td, td_runq);
209 else
210 td = TAILQ_FIRST(&kg->kg_runq);
211
212 /*
213 * If we found one, send it to the system scheduler.
214 */
215 if (td) {
216 kg->kg_last_assigned = td;
217 sched_add(td, SRQ_YIELDING);
218 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
219 } else {
220 /* no threads to use up the slots. quit now */
221 break;
222 }
223 }
224 }
225
226 #ifdef SCHED_4BSD
227 /*
228 * Remove a thread from its KSEGRP's run queue.
229 * This in turn may remove it from a KSE if it was already assigned
230 * to one, possibly causing a new thread to be assigned to the KSE
231 * and the KSE getting a new priority.
232 */
233 static void
234 remrunqueue(struct thread *td)
235 {
236 struct thread *td2, *td3;
237 struct ksegrp *kg;
238 struct kse *ke;
239
240 mtx_assert(&sched_lock, MA_OWNED);
241 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
242 kg = td->td_ksegrp;
243 ke = td->td_kse;
244 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
245 TD_SET_CAN_RUN(td);
246 /*
247 * If it is not a threaded process, take the shortcut.
248 */
249 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
250 /* remve from sys run queue and free up a slot */
251 sched_rem(td);
252 ke->ke_state = KES_THREAD;
253 return;
254 }
255 td3 = TAILQ_PREV(td, threadqueue, td_runq);
256 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
257 kg->kg_runnable--;
258 if (ke->ke_state == KES_ONRUNQ) {
259 /*
260 * This thread has been assigned to the system run queue.
261 * We need to dissociate it and try assign the
262 * KSE to the next available thread. Then, we should
263 * see if we need to move the KSE in the run queues.
264 */
265 sched_rem(td);
266 ke->ke_state = KES_THREAD;
267 td2 = kg->kg_last_assigned;
268 KASSERT((td2 != NULL), ("last assigned has wrong value"));
269 if (td2 == td)
270 kg->kg_last_assigned = td3;
271 /* slot_fill(kg); */ /* will replace it with another */
272 }
273 }
274 #endif
275
276 /*
277 * Change the priority of a thread that is on the run queue.
278 */
279 void
280 adjustrunqueue( struct thread *td, int newpri)
281 {
282 struct ksegrp *kg;
283 struct kse *ke;
284
285 mtx_assert(&sched_lock, MA_OWNED);
286 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
287
288 ke = td->td_kse;
289 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
290 /*
291 * If it is not a threaded process, take the shortcut.
292 */
293 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
294 /* We only care about the kse in the run queue. */
295 td->td_priority = newpri;
296 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
297 sched_rem(td);
298 sched_add(td, SRQ_BORING);
299 }
300 return;
301 }
302
303 /* It is a threaded process */
304 kg = td->td_ksegrp;
305 if (ke->ke_state == KES_ONRUNQ) {
306 if (kg->kg_last_assigned == td) {
307 kg->kg_last_assigned =
308 TAILQ_PREV(td, threadqueue, td_runq);
309 }
310 sched_rem(td);
311 }
312 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
313 kg->kg_runnable--;
314 TD_SET_CAN_RUN(td);
315 td->td_priority = newpri;
316 setrunqueue(td, SRQ_BORING);
317 }
318
319 /*
320 * This function is called when a thread is about to be put on a
321 * ksegrp run queue because it has been made runnable or its
322 * priority has been adjusted and the ksegrp does not have a
323 * free kse slot. It determines if a thread from the same ksegrp
324 * should be preempted. If so, it tries to switch threads
325 * if the thread is on the same cpu or notifies another cpu that
326 * it should switch threads.
327 */
328
329 static void
330 maybe_preempt_in_ksegrp(struct thread *td)
331 #if !defined(SMP)
332 {
333 struct thread *running_thread;
334
335 mtx_assert(&sched_lock, MA_OWNED);
336 running_thread = curthread;
337
338 if (running_thread->td_ksegrp != td->td_ksegrp)
339 return;
340
341 if (td->td_priority >= running_thread->td_priority)
342 return;
343 #ifdef PREEMPTION
344 #ifndef FULL_PREEMPTION
345 if (td->td_priority > PRI_MAX_ITHD) {
346 running_thread->td_flags |= TDF_NEEDRESCHED;
347 return;
348 }
349 #endif /* FULL_PREEMPTION */
350
351 if (running_thread->td_critnest > 1)
352 running_thread->td_pflags |= TDP_OWEPREEMPT;
353 else
354 mi_switch(SW_INVOL, NULL);
355
356 #else /* PREEMPTION */
357 running_thread->td_flags |= TDF_NEEDRESCHED;
358 #endif /* PREEMPTION */
359 return;
360 }
361
362 #else /* SMP */
363 {
364 struct thread *running_thread;
365 int worst_pri;
366 struct ksegrp *kg;
367 cpumask_t cpumask,dontuse;
368 struct pcpu *pc;
369 struct pcpu *best_pcpu;
370 struct thread *cputhread;
371
372 mtx_assert(&sched_lock, MA_OWNED);
373
374 running_thread = curthread;
375
376 #if !defined(KSEG_PEEMPT_BEST_CPU)
377 if (running_thread->td_ksegrp != td->td_ksegrp) {
378 #endif
379 kg = td->td_ksegrp;
380
381 /* if someone is ahead of this thread, wait our turn */
382 if (td != TAILQ_FIRST(&kg->kg_runq))
383 return;
384
385 worst_pri = td->td_priority;
386 best_pcpu = NULL;
387 dontuse = stopped_cpus | idle_cpus_mask;
388
389 /*
390 * Find a cpu with the worst priority that runs at thread from
391 * the same ksegrp - if multiple exist give first the last run
392 * cpu and then the current cpu priority
393 */
394
395 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
396 cpumask = pc->pc_cpumask;
397 cputhread = pc->pc_curthread;
398
399 if ((cpumask & dontuse) ||
400 cputhread->td_ksegrp != kg)
401 continue;
402
403 if (cputhread->td_priority > worst_pri) {
404 worst_pri = cputhread->td_priority;
405 best_pcpu = pc;
406 continue;
407 }
408
409 if (cputhread->td_priority == worst_pri &&
410 best_pcpu != NULL &&
411 (td->td_lastcpu == pc->pc_cpuid ||
412 (PCPU_GET(cpumask) == cpumask &&
413 td->td_lastcpu != best_pcpu->pc_cpuid)))
414 best_pcpu = pc;
415 }
416
417 /* Check if we need to preempt someone */
418 if (best_pcpu == NULL)
419 return;
420
421 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
422 #if !defined(FULL_PREEMPTION)
423 if (td->td_priority <= PRI_MAX_ITHD)
424 #endif /* ! FULL_PREEMPTION */
425 {
426 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
427 return;
428 }
429 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
430
431 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
432 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
433 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
434 return;
435 }
436 #if !defined(KSEG_PEEMPT_BEST_CPU)
437 }
438 #endif
439
440 if (td->td_priority >= running_thread->td_priority)
441 return;
442 #ifdef PREEMPTION
443
444 #if !defined(FULL_PREEMPTION)
445 if (td->td_priority > PRI_MAX_ITHD) {
446 running_thread->td_flags |= TDF_NEEDRESCHED;
447 }
448 #endif /* ! FULL_PREEMPTION */
449
450 if (running_thread->td_critnest > 1)
451 running_thread->td_pflags |= TDP_OWEPREEMPT;
452 else
453 mi_switch(SW_INVOL, NULL);
454
455 #else /* PREEMPTION */
456 running_thread->td_flags |= TDF_NEEDRESCHED;
457 #endif /* PREEMPTION */
458 return;
459 }
460 #endif /* !SMP */
461
462
463 int limitcount;
464 void
465 setrunqueue(struct thread *td, int flags)
466 {
467 struct ksegrp *kg;
468 struct thread *td2;
469 struct thread *tda;
470 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
471 td, td->td_proc->p_comm, td->td_priority, curthread,
472 curthread->td_proc->p_comm);
473
474 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
475 td, td->td_ksegrp, td->td_proc->p_pid);
476 mtx_assert(&sched_lock, MA_OWNED);
477 KASSERT((td->td_inhibitors == 0),
478 ("setrunqueue: trying to run inhibitted thread"));
479 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
480 ("setrunqueue: bad thread state"));
481 TD_SET_RUNQ(td);
482 kg = td->td_ksegrp;
483 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
484 /*
485 * Common path optimisation: Only one of everything
486 * and the KSE is always already attached.
487 * Totally ignore the ksegrp run queue.
488 */
489 if (kg->kg_avail_opennings != 1) {
490 if (limitcount < 1) {
491 limitcount++;
492 printf("pid %d: corrected slot count (%d->1)\n",
493 td->td_proc->p_pid, kg->kg_avail_opennings);
494
495 }
496 kg->kg_avail_opennings = 1;
497 }
498 sched_add(td, flags);
499 return;
500 }
501
502 /*
503 * If the concurrency has reduced, and we would go in the
504 * assigned section, then keep removing entries from the
505 * system run queue, until we are not in that section
506 * or there is room for us to be put in that section.
507 * What we MUST avoid is the case where there are threads of less
508 * priority than the new one scheduled, but it can not
509 * be scheduled itself. That would lead to a non contiguous set
510 * of scheduled threads, and everything would break.
511 */
512 tda = kg->kg_last_assigned;
513 while ((kg->kg_avail_opennings <= 0) &&
514 (tda && (tda->td_priority > td->td_priority))) {
515 /*
516 * None free, but there is one we can commandeer.
517 */
518 CTR2(KTR_RUNQ,
519 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
520 sched_rem(tda);
521 tda = kg->kg_last_assigned =
522 TAILQ_PREV(tda, threadqueue, td_runq);
523 }
524
525 /*
526 * Add the thread to the ksegrp's run queue at
527 * the appropriate place.
528 */
529 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
530 if (td2->td_priority > td->td_priority) {
531 kg->kg_runnable++;
532 TAILQ_INSERT_BEFORE(td2, td, td_runq);
533 break;
534 }
535 }
536 if (td2 == NULL) {
537 /* We ran off the end of the TAILQ or it was empty. */
538 kg->kg_runnable++;
539 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
540 }
541
542 /*
543 * If we have a slot to use, then put the thread on the system
544 * run queue and if needed, readjust the last_assigned pointer.
545 * it may be that we need to schedule something anyhow
546 * even if the availabel slots are -ve so that
547 * all the items < last_assigned are scheduled.
548 */
549 if (kg->kg_avail_opennings > 0) {
550 if (tda == NULL) {
551 /*
552 * No pre-existing last assigned so whoever is first
553 * gets the slot.. (maybe us)
554 */
555 td2 = TAILQ_FIRST(&kg->kg_runq);
556 kg->kg_last_assigned = td2;
557 } else if (tda->td_priority > td->td_priority) {
558 td2 = td;
559 } else {
560 /*
561 * We are past last_assigned, so
562 * give the next slot to whatever is next,
563 * which may or may not be us.
564 */
565 td2 = TAILQ_NEXT(tda, td_runq);
566 kg->kg_last_assigned = td2;
567 }
568 sched_add(td2, flags);
569 } else {
570 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
571 td, td->td_ksegrp, td->td_proc->p_pid);
572 if ((flags & SRQ_YIELDING) == 0)
573 maybe_preempt_in_ksegrp(td);
574 }
575 }
576
577 /*
578 * Kernel thread preemption implementation. Critical sections mark
579 * regions of code in which preemptions are not allowed.
580 */
581 void
582 critical_enter(void)
583 {
584 struct thread *td;
585
586 td = curthread;
587 if (td->td_critnest == 0)
588 cpu_critical_enter(td);
589 td->td_critnest++;
590 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
591 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
592 }
593
594 void
595 critical_exit(void)
596 {
597 struct thread *td;
598
599 td = curthread;
600 KASSERT(td->td_critnest != 0,
601 ("critical_exit: td_critnest == 0"));
602 if (td->td_critnest == 1) {
603 if (td->td_pflags & TDP_WAKEPROC0) {
604 td->td_pflags &= ~TDP_WAKEPROC0;
605 wakeup(&proc0);
606 }
607 #ifdef PREEMPTION
608 mtx_assert(&sched_lock, MA_NOTOWNED);
609 if (td->td_pflags & TDP_OWEPREEMPT) {
610 mtx_lock_spin(&sched_lock);
611 mi_switch(SW_INVOL, NULL);
612 mtx_unlock_spin(&sched_lock);
613 }
614 #endif
615 td->td_critnest = 0;
616 cpu_critical_exit(td);
617 } else {
618 td->td_critnest--;
619 }
620 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
621 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
622 }
623
624 /*
625 * This function is called when a thread is about to be put on run queue
626 * because it has been made runnable or its priority has been adjusted. It
627 * determines if the new thread should be immediately preempted to. If so,
628 * it switches to it and eventually returns true. If not, it returns false
629 * so that the caller may place the thread on an appropriate run queue.
630 */
631 int
632 maybe_preempt(struct thread *td)
633 {
634 #ifdef PREEMPTION
635 struct thread *ctd;
636 int cpri, pri;
637 #endif
638
639 mtx_assert(&sched_lock, MA_OWNED);
640 #ifdef PREEMPTION
641 /*
642 * The new thread should not preempt the current thread if any of the
643 * following conditions are true:
644 *
645 * - The current thread has a higher (numerically lower) or
646 * equivalent priority. Note that this prevents curthread from
647 * trying to preempt to itself.
648 * - It is too early in the boot for context switches (cold is set).
649 * - The current thread has an inhibitor set or is in the process of
650 * exiting. In this case, the current thread is about to switch
651 * out anyways, so there's no point in preempting. If we did,
652 * the current thread would not be properly resumed as well, so
653 * just avoid that whole landmine.
654 * - If the new thread's priority is not a realtime priority and
655 * the current thread's priority is not an idle priority and
656 * FULL_PREEMPTION is disabled.
657 *
658 * If all of these conditions are false, but the current thread is in
659 * a nested critical section, then we have to defer the preemption
660 * until we exit the critical section. Otherwise, switch immediately
661 * to the new thread.
662 */
663 ctd = curthread;
664 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
665 ("thread has no (or wrong) sched-private part."));
666 KASSERT((td->td_inhibitors == 0),
667 ("maybe_preempt: trying to run inhibitted thread"));
668 pri = td->td_priority;
669 cpri = ctd->td_priority;
670 if (pri >= cpri || cold /* || dumping */ || TD_IS_INHIBITED(ctd) ||
671 td->td_kse->ke_state != KES_THREAD)
672 return (0);
673 #ifndef FULL_PREEMPTION
674 if (!(pri >= PRI_MIN_ITHD && pri <= PRI_MAX_ITHD) &&
675 !(cpri >= PRI_MIN_IDLE))
676 return (0);
677 #endif
678 if (ctd->td_critnest > 1) {
679 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
680 ctd->td_critnest);
681 ctd->td_pflags |= TDP_OWEPREEMPT;
682 return (0);
683 }
684
685 /*
686 * Thread is runnable but not yet put on system run queue.
687 */
688 MPASS(TD_ON_RUNQ(td));
689 MPASS(td->td_sched->ke_state != KES_ONRUNQ);
690 if (td->td_proc->p_flag & P_HADTHREADS) {
691 /*
692 * If this is a threaded process we actually ARE on the
693 * ksegrp run queue so take it off that first.
694 * Also undo any damage done to the last_assigned pointer.
695 * XXX Fix setrunqueue so this isn't needed
696 */
697 struct ksegrp *kg;
698
699 kg = td->td_ksegrp;
700 if (kg->kg_last_assigned == td)
701 kg->kg_last_assigned =
702 TAILQ_PREV(td, threadqueue, td_runq);
703 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
704 }
705
706 TD_SET_RUNNING(td);
707 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
708 td->td_proc->p_pid, td->td_proc->p_comm);
709 mi_switch(SW_INVOL|SW_PREEMPT, td);
710 return (1);
711 #else
712 return (0);
713 #endif
714 }
715
716 #if 0
717 #ifndef PREEMPTION
718 /* XXX: There should be a non-static version of this. */
719 static void
720 printf_caddr_t(void *data)
721 {
722 printf("%s", (char *)data);
723 }
724 static char preempt_warning[] =
725 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
726 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
727 preempt_warning)
728 #endif
729 #endif
730
731 /************************************************************************
732 * SYSTEM RUN QUEUE manipulations and tests *
733 ************************************************************************/
734 /*
735 * Initialize a run structure.
736 */
737 void
738 runq_init(struct runq *rq)
739 {
740 int i;
741
742 bzero(rq, sizeof *rq);
743 for (i = 0; i < RQ_NQS; i++)
744 TAILQ_INIT(&rq->rq_queues[i]);
745 }
746
747 /*
748 * Clear the status bit of the queue corresponding to priority level pri,
749 * indicating that it is empty.
750 */
751 static __inline void
752 runq_clrbit(struct runq *rq, int pri)
753 {
754 struct rqbits *rqb;
755
756 rqb = &rq->rq_status;
757 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
758 rqb->rqb_bits[RQB_WORD(pri)],
759 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
760 RQB_BIT(pri), RQB_WORD(pri));
761 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
762 }
763
764 /*
765 * Find the index of the first non-empty run queue. This is done by
766 * scanning the status bits, a set bit indicates a non-empty queue.
767 */
768 static __inline int
769 runq_findbit(struct runq *rq)
770 {
771 struct rqbits *rqb;
772 int pri;
773 int i;
774
775 rqb = &rq->rq_status;
776 for (i = 0; i < RQB_LEN; i++)
777 if (rqb->rqb_bits[i]) {
778 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
779 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
780 rqb->rqb_bits[i], i, pri);
781 return (pri);
782 }
783
784 return (-1);
785 }
786
787 /*
788 * Set the status bit of the queue corresponding to priority level pri,
789 * indicating that it is non-empty.
790 */
791 static __inline void
792 runq_setbit(struct runq *rq, int pri)
793 {
794 struct rqbits *rqb;
795
796 rqb = &rq->rq_status;
797 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
798 rqb->rqb_bits[RQB_WORD(pri)],
799 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
800 RQB_BIT(pri), RQB_WORD(pri));
801 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
802 }
803
804 /*
805 * Add the KSE to the queue specified by its priority, and set the
806 * corresponding status bit.
807 */
808 void
809 runq_add(struct runq *rq, struct kse *ke, int flags)
810 {
811 struct rqhead *rqh;
812 int pri;
813
814 pri = ke->ke_thread->td_priority / RQ_PPQ;
815 ke->ke_rqindex = pri;
816 runq_setbit(rq, pri);
817 rqh = &rq->rq_queues[pri];
818 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
819 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
820 if (flags & SRQ_PREEMPTED) {
821 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
822 } else {
823 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
824 }
825 }
826
827 /*
828 * Return true if there are runnable processes of any priority on the run
829 * queue, false otherwise. Has no side effects, does not modify the run
830 * queue structure.
831 */
832 int
833 runq_check(struct runq *rq)
834 {
835 struct rqbits *rqb;
836 int i;
837
838 rqb = &rq->rq_status;
839 for (i = 0; i < RQB_LEN; i++)
840 if (rqb->rqb_bits[i]) {
841 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
842 rqb->rqb_bits[i], i);
843 return (1);
844 }
845 CTR0(KTR_RUNQ, "runq_check: empty");
846
847 return (0);
848 }
849
850 #if defined(SMP) && defined(SCHED_4BSD)
851 int runq_fuzz = 1;
852 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
853 #endif
854
855 /*
856 * Find the highest priority process on the run queue.
857 */
858 struct kse *
859 runq_choose(struct runq *rq)
860 {
861 struct rqhead *rqh;
862 struct kse *ke;
863 int pri;
864
865 mtx_assert(&sched_lock, MA_OWNED);
866 while ((pri = runq_findbit(rq)) != -1) {
867 rqh = &rq->rq_queues[pri];
868 #if defined(SMP) && defined(SCHED_4BSD)
869 /* fuzz == 1 is normal.. 0 or less are ignored */
870 if (runq_fuzz > 1) {
871 /*
872 * In the first couple of entries, check if
873 * there is one for our CPU as a preference.
874 */
875 int count = runq_fuzz;
876 int cpu = PCPU_GET(cpuid);
877 struct kse *ke2;
878 ke2 = ke = TAILQ_FIRST(rqh);
879
880 while (count-- && ke2) {
881 if (ke->ke_thread->td_lastcpu == cpu) {
882 ke = ke2;
883 break;
884 }
885 ke2 = TAILQ_NEXT(ke2, ke_procq);
886 }
887 } else
888 #endif
889 ke = TAILQ_FIRST(rqh);
890 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
891 CTR3(KTR_RUNQ,
892 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
893 return (ke);
894 }
895 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
896
897 return (NULL);
898 }
899
900 /*
901 * Remove the KSE from the queue specified by its priority, and clear the
902 * corresponding status bit if the queue becomes empty.
903 * Caller must set ke->ke_state afterwards.
904 */
905 void
906 runq_remove(struct runq *rq, struct kse *ke)
907 {
908 struct rqhead *rqh;
909 int pri;
910
911 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
912 ("runq_remove: process swapped out"));
913 pri = ke->ke_rqindex;
914 rqh = &rq->rq_queues[pri];
915 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
916 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
917 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
918 TAILQ_REMOVE(rqh, ke, ke_procq);
919 if (TAILQ_EMPTY(rqh)) {
920 CTR0(KTR_RUNQ, "runq_remove: empty");
921 runq_clrbit(rq, pri);
922 }
923 }
924
925 /****** functions that are temporarily here ***********/
926 #include <vm/uma.h>
927 extern struct mtx kse_zombie_lock;
928
929 /*
930 * Allocate scheduler specific per-process resources.
931 * The thread and ksegrp have already been linked in.
932 * In this case just set the default concurrency value.
933 *
934 * Called from:
935 * proc_init() (UMA init method)
936 */
937 void
938 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
939 {
940
941 /* This can go in sched_fork */
942 sched_init_concurrency(kg);
943 }
944
945 /*
946 * Called by the uma process fini routine..
947 * undo anything we may have done in the uma_init method.
948 * Panic if it's not all 1:1:1:1
949 * Called from:
950 * proc_fini() (UMA method)
951 */
952 void
953 sched_destroyproc(struct proc *p)
954 {
955
956 /* this function slated for destruction */
957 KASSERT((p->p_numthreads == 1), ("Cached proc with > 1 thread "));
958 KASSERT((p->p_numksegrps == 1), ("Cached proc with > 1 ksegrp "));
959 }
960
961 /*
962 * thread is being either created or recycled.
963 * Fix up the per-scheduler resources associated with it.
964 * Called from:
965 * sched_fork_thread()
966 * thread_dtor() (*may go away)
967 * thread_init() (*may go away)
968 */
969 void
970 sched_newthread(struct thread *td)
971 {
972 struct td_sched *ke;
973
974 ke = (struct td_sched *) (td + 1);
975 bzero(ke, sizeof(*ke));
976 td->td_sched = ke;
977 ke->ke_thread = td;
978 ke->ke_state = KES_THREAD;
979 }
980
981 /*
982 * Set up an initial concurrency of 1
983 * and set the given thread (if given) to be using that
984 * concurrency slot.
985 * May be used "offline"..before the ksegrp is attached to the world
986 * and thus wouldn't need schedlock in that case.
987 * Called from:
988 * thr_create()
989 * proc_init() (UMA) via sched_newproc()
990 */
991 void
992 sched_init_concurrency(struct ksegrp *kg)
993 {
994
995 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
996 kg->kg_concurrency = 1;
997 kg->kg_avail_opennings = 1;
998 }
999
1000 /*
1001 * Change the concurrency of an existing ksegrp to N
1002 * Called from:
1003 * kse_create()
1004 * kse_exit()
1005 * thread_exit()
1006 * thread_single()
1007 */
1008 void
1009 sched_set_concurrency(struct ksegrp *kg, int concurrency)
1010 {
1011
1012 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1013 kg,
1014 concurrency,
1015 kg->kg_avail_opennings,
1016 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1017 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1018 kg->kg_concurrency = concurrency;
1019 }
1020
1021 /*
1022 * Called from thread_exit() for all exiting thread
1023 *
1024 * Not to be confused with sched_exit_thread()
1025 * that is only called from thread_exit() for threads exiting
1026 * without the rest of the process exiting because it is also called from
1027 * sched_exit() and we wouldn't want to call it twice.
1028 * XXX This can probably be fixed.
1029 */
1030 void
1031 sched_thread_exit(struct thread *td)
1032 {
1033
1034 SLOT_RELEASE(td->td_ksegrp);
1035 slot_fill(td->td_ksegrp);
1036 }
1037
1038 #endif /* KERN_SWITCH_INCLUDE */
Cache object: b373359e2be2bc58d0c67a263159d0f9
|