1 /*-
2 * Copyright (c) 2001 Jake Burkholder <jake@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 /***
28 Here is the logic..
29
30 If there are N processors, then there are at most N KSEs (kernel
31 schedulable entities) working to process threads that belong to a
32 KSEGROUP (kg). If there are X of these KSEs actually running at the
33 moment in question, then there are at most M (N-X) of these KSEs on
34 the run queue, as running KSEs are not on the queue.
35
36 Runnable threads are queued off the KSEGROUP in priority order.
37 If there are M or more threads runnable, the top M threads
38 (by priority) are 'preassigned' to the M KSEs not running. The KSEs take
39 their priority from those threads and are put on the run queue.
40
41 The last thread that had a priority high enough to have a KSE associated
42 with it, AND IS ON THE RUN QUEUE is pointed to by
43 kg->kg_last_assigned. If no threads queued off the KSEGROUP have KSEs
44 assigned as all the available KSEs are activly running, or because there
45 are no threads queued, that pointer is NULL.
46
47 When a KSE is removed from the run queue to become runnable, we know
48 it was associated with the highest priority thread in the queue (at the head
49 of the queue). If it is also the last assigned we know M was 1 and must
50 now be 0. Since the thread is no longer queued that pointer must be
51 removed from it. Since we know there were no more KSEs available,
52 (M was 1 and is now 0) and since we are not FREEING our KSE
53 but using it, we know there are STILL no more KSEs available, we can prove
54 that the next thread in the ksegrp list will not have a KSE to assign to
55 it, so we can show that the pointer must be made 'invalid' (NULL).
56
57 The pointer exists so that when a new thread is made runnable, it can
58 have its priority compared with the last assigned thread to see if
59 it should 'steal' its KSE or not.. i.e. is it 'earlier'
60 on the list than that thread or later.. If it's earlier, then the KSE is
61 removed from the last assigned (which is now not assigned a KSE)
62 and reassigned to the new thread, which is placed earlier in the list.
63 The pointer is then backed up to the previous thread (which may or may not
64 be the new thread).
65
66 When a thread sleeps or is removed, the KSE becomes available and if there
67 are queued threads that are not assigned KSEs, the highest priority one of
68 them is assigned the KSE, which is then placed back on the run queue at
69 the approipriate place, and the kg->kg_last_assigned pointer is adjusted down
70 to point to it.
71
72 The following diagram shows 2 KSEs and 3 threads from a single process.
73
74 RUNQ: --->KSE---KSE--... (KSEs queued at priorities from threads)
75 \ \____
76 \ \
77 KSEGROUP---thread--thread--thread (queued in priority order)
78 \ /
79 \_______________/
80 (last_assigned)
81
82 The result of this scheme is that the M available KSEs are always
83 queued at the priorities they have inherrited from the M highest priority
84 threads for that KSEGROUP. If this situation changes, the KSEs are
85 reassigned to keep this true.
86 ***/
87
88 #include <sys/cdefs.h>
89 __FBSDID("$FreeBSD: releng/6.3/sys/kern/kern_switch.c 173886 2007-11-24 19:45:58Z cvs2svn $");
90
91 #include "opt_sched.h"
92
93 #ifndef KERN_SWITCH_INCLUDE
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/kdb.h>
97 #include <sys/kernel.h>
98 #include <sys/ktr.h>
99 #include <sys/lock.h>
100 #include <sys/mutex.h>
101 #include <sys/proc.h>
102 #include <sys/queue.h>
103 #include <sys/sched.h>
104 #else /* KERN_SWITCH_INCLUDE */
105 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
106 #include <sys/smp.h>
107 #endif
108 #if defined(SMP) && defined(SCHED_4BSD)
109 #include <sys/sysctl.h>
110 #endif
111
112 #ifdef FULL_PREEMPTION
113 #ifndef PREEMPTION
114 #error "The FULL_PREEMPTION option requires the PREEMPTION option"
115 #endif
116 #endif
117
118 CTASSERT((RQB_BPW * RQB_LEN) == RQ_NQS);
119
120 #define td_kse td_sched
121
122 /*
123 * kern.sched.preemption allows user space to determine if preemption support
124 * is compiled in or not. It is not currently a boot or runtime flag that
125 * can be changed.
126 */
127 #ifdef PREEMPTION
128 static int kern_sched_preemption = 1;
129 #else
130 static int kern_sched_preemption = 0;
131 #endif
132 SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
133 &kern_sched_preemption, 0, "Kernel preemption enabled");
134
135 /************************************************************************
136 * Functions that manipulate runnability from a thread perspective. *
137 ************************************************************************/
138 /*
139 * Select the KSE that will be run next. From that find the thread, and
140 * remove it from the KSEGRP's run queue. If there is thread clustering,
141 * this will be what does it.
142 */
143 struct thread *
144 choosethread(void)
145 {
146 struct kse *ke;
147 struct thread *td;
148 struct ksegrp *kg;
149
150 #if defined(SMP) && (defined(__i386__) || defined(__amd64__))
151 if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
152 /* Shutting down, run idlethread on AP's */
153 td = PCPU_GET(idlethread);
154 ke = td->td_kse;
155 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
156 ke->ke_flags |= KEF_DIDRUN;
157 TD_SET_RUNNING(td);
158 return (td);
159 }
160 #endif
161
162 retry:
163 ke = sched_choose();
164 if (ke) {
165 td = ke->ke_thread;
166 KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
167 kg = ke->ke_ksegrp;
168 if (td->td_proc->p_flag & P_HADTHREADS) {
169 if (kg->kg_last_assigned == td) {
170 kg->kg_last_assigned = TAILQ_PREV(td,
171 threadqueue, td_runq);
172 }
173 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
174 }
175 CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
176 td, td->td_priority);
177 } else {
178 /* Simulate runq_choose() having returned the idle thread */
179 td = PCPU_GET(idlethread);
180 ke = td->td_kse;
181 CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
182 }
183 ke->ke_flags |= KEF_DIDRUN;
184
185 /*
186 * If we are in panic, only allow system threads,
187 * plus the one we are running in, to be run.
188 */
189 if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 &&
190 (td->td_flags & TDF_INPANIC) == 0)) {
191 /* note that it is no longer on the run queue */
192 TD_SET_CAN_RUN(td);
193 goto retry;
194 }
195
196 TD_SET_RUNNING(td);
197 return (td);
198 }
199
200 /*
201 * Given a surplus system slot, try assign a new runnable thread to it.
202 * Called from:
203 * sched_thread_exit() (local)
204 * sched_switch() (local)
205 * sched_thread_exit() (local)
206 * remrunqueue() (local) (not at the moment)
207 */
208 static void
209 slot_fill(struct ksegrp *kg)
210 {
211 struct thread *td;
212
213 mtx_assert(&sched_lock, MA_OWNED);
214 while (kg->kg_avail_opennings > 0) {
215 /*
216 * Find the first unassigned thread
217 */
218 if ((td = kg->kg_last_assigned) != NULL)
219 td = TAILQ_NEXT(td, td_runq);
220 else
221 td = TAILQ_FIRST(&kg->kg_runq);
222
223 /*
224 * If we found one, send it to the system scheduler.
225 */
226 if (td) {
227 kg->kg_last_assigned = td;
228 sched_add(td, SRQ_YIELDING);
229 CTR2(KTR_RUNQ, "slot_fill: td%p -> kg%p", td, kg);
230 } else {
231 /* no threads to use up the slots. quit now */
232 break;
233 }
234 }
235 }
236
237 #ifdef SCHED_4BSD
238 /*
239 * Remove a thread from its KSEGRP's run queue.
240 * This in turn may remove it from a KSE if it was already assigned
241 * to one, possibly causing a new thread to be assigned to the KSE
242 * and the KSE getting a new priority.
243 */
244 static void
245 remrunqueue(struct thread *td)
246 {
247 struct thread *td2, *td3;
248 struct ksegrp *kg;
249 struct kse *ke;
250
251 mtx_assert(&sched_lock, MA_OWNED);
252 KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
253 kg = td->td_ksegrp;
254 ke = td->td_kse;
255 CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
256 TD_SET_CAN_RUN(td);
257 /*
258 * If it is not a threaded process, take the shortcut.
259 */
260 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
261 /* remve from sys run queue and free up a slot */
262 sched_rem(td);
263 ke->ke_state = KES_THREAD;
264 return;
265 }
266 td3 = TAILQ_PREV(td, threadqueue, td_runq);
267 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
268 if (ke->ke_state == KES_ONRUNQ) {
269 /*
270 * This thread has been assigned to the system run queue.
271 * We need to dissociate it and try assign the
272 * KSE to the next available thread. Then, we should
273 * see if we need to move the KSE in the run queues.
274 */
275 sched_rem(td);
276 ke->ke_state = KES_THREAD;
277 td2 = kg->kg_last_assigned;
278 KASSERT((td2 != NULL), ("last assigned has wrong value"));
279 if (td2 == td)
280 kg->kg_last_assigned = td3;
281 /* slot_fill(kg); */ /* will replace it with another */
282 }
283 }
284 #endif
285
286 /*
287 * Change the priority of a thread that is on the run queue.
288 */
289 void
290 adjustrunqueue( struct thread *td, int newpri)
291 {
292 struct ksegrp *kg;
293 struct kse *ke;
294
295 mtx_assert(&sched_lock, MA_OWNED);
296 KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
297
298 ke = td->td_kse;
299 CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
300 /*
301 * If it is not a threaded process, take the shortcut.
302 */
303 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
304 /* We only care about the kse in the run queue. */
305 td->td_priority = newpri;
306 if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
307 sched_rem(td);
308 sched_add(td, SRQ_BORING);
309 }
310 return;
311 }
312
313 /* It is a threaded process */
314 kg = td->td_ksegrp;
315 if (ke->ke_state == KES_ONRUNQ
316 #ifdef SCHED_ULE
317 || ((ke->ke_flags & KEF_ASSIGNED) != 0 &&
318 (ke->ke_flags & KEF_REMOVED) == 0)
319 #endif
320 ) {
321 if (kg->kg_last_assigned == td) {
322 kg->kg_last_assigned =
323 TAILQ_PREV(td, threadqueue, td_runq);
324 }
325 sched_rem(td);
326 }
327 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
328 TD_SET_CAN_RUN(td);
329 td->td_priority = newpri;
330 setrunqueue(td, SRQ_BORING);
331 }
332
333 /*
334 * This function is called when a thread is about to be put on a
335 * ksegrp run queue because it has been made runnable or its
336 * priority has been adjusted and the ksegrp does not have a
337 * free kse slot. It determines if a thread from the same ksegrp
338 * should be preempted. If so, it tries to switch threads
339 * if the thread is on the same cpu or notifies another cpu that
340 * it should switch threads.
341 */
342
343 static void
344 maybe_preempt_in_ksegrp(struct thread *td)
345 #if !defined(SMP)
346 {
347 struct thread *ctd;
348 #ifdef PREEMPTION
349 int cpri, pri;
350 #endif
351
352 mtx_assert(&sched_lock, MA_OWNED);
353 ctd = curthread;
354
355 #ifdef PREEMPTION
356 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
357 ("thread has no (or wrong) sched-private part."));
358 KASSERT((td->td_inhibitors == 0),
359 ("maybe_preempt: trying to run inhibitted thread"));
360 pri = td->td_priority;
361 cpri = ctd->td_priority;
362 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
363 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
364 return;
365 if (ctd->td_ksegrp != td->td_ksegrp)
366 return;
367 #ifndef FULL_PREEMPTION
368 if (td->td_priority > PRI_MAX_ITHD) {
369 ctd->td_flags |= TDF_NEEDRESCHED;
370 return;
371 }
372 #endif /* FULL_PREEMPTION */
373
374 if (ctd->td_critnest > 1)
375 ctd->td_owepreempt = 1;
376 else
377 mi_switch(SW_INVOL, NULL);
378
379 #else /* PREEMPTION */
380 ctd->td_flags |= TDF_NEEDRESCHED;
381 #endif /* PREEMPTION */
382 return;
383 }
384
385 #else /* SMP */
386 {
387 struct thread *ctd;
388 #ifdef PREEMPTION
389 int cpri, pri;
390 #endif
391 int worst_pri;
392 struct ksegrp *kg;
393 cpumask_t cpumask,dontuse;
394 struct pcpu *pc;
395 struct pcpu *best_pcpu;
396 struct thread *cputhread;
397
398 mtx_assert(&sched_lock, MA_OWNED);
399 ctd = curthread;
400
401 #ifdef PREEMPTION
402 KASSERT((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
403 ("thread has no (or wrong) sched-private part."));
404 KASSERT((td->td_inhibitors == 0),
405 ("maybe_preempt: trying to run inhibitted thread"));
406 pri = td->td_priority;
407 cpri = ctd->td_priority;
408 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
409 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
410 return;
411 #endif
412
413 #if !defined(KSEG_PEEMPT_BEST_CPU)
414 if (ctd->td_ksegrp != td->td_ksegrp) {
415 #endif
416 kg = td->td_ksegrp;
417
418 /* if someone is ahead of this thread, wait our turn */
419 if (td != TAILQ_FIRST(&kg->kg_runq))
420 return;
421
422 worst_pri = td->td_priority;
423 best_pcpu = NULL;
424 dontuse = stopped_cpus | idle_cpus_mask;
425
426 /*
427 * Find a cpu with the worst priority that runs at thread from
428 * the same ksegrp - if multiple exist give first the last run
429 * cpu and then the current cpu priority
430 */
431
432 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
433 cpumask = pc->pc_cpumask;
434 cputhread = pc->pc_curthread;
435
436 if ((cpumask & dontuse) ||
437 cputhread->td_ksegrp != kg)
438 continue;
439
440 if (cputhread->td_priority > worst_pri) {
441 worst_pri = cputhread->td_priority;
442 best_pcpu = pc;
443 continue;
444 }
445
446 if (cputhread->td_priority == worst_pri &&
447 best_pcpu != NULL &&
448 (td->td_lastcpu == pc->pc_cpuid ||
449 (PCPU_GET(cpumask) == cpumask &&
450 td->td_lastcpu != best_pcpu->pc_cpuid)))
451 best_pcpu = pc;
452 }
453
454 /* Check if we need to preempt someone */
455 if (best_pcpu == NULL)
456 return;
457
458 #if defined(IPI_PREEMPTION) && defined(PREEMPTION)
459 #if !defined(FULL_PREEMPTION)
460 if (td->td_priority <= PRI_MAX_ITHD)
461 #endif /* ! FULL_PREEMPTION */
462 {
463 ipi_selected(best_pcpu->pc_cpumask, IPI_PREEMPT);
464 return;
465 }
466 #endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
467
468 if (PCPU_GET(cpuid) != best_pcpu->pc_cpuid) {
469 best_pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
470 ipi_selected(best_pcpu->pc_cpumask, IPI_AST);
471 return;
472 }
473 #if !defined(KSEG_PEEMPT_BEST_CPU)
474 }
475 #endif
476
477 if (td->td_priority >= ctd->td_priority)
478 return;
479 #ifdef PREEMPTION
480
481 #if !defined(FULL_PREEMPTION)
482 if (td->td_priority > PRI_MAX_ITHD)
483 ctd->td_flags |= TDF_NEEDRESCHED;
484 #endif /* ! FULL_PREEMPTION */
485
486 if (ctd->td_critnest > 1)
487 ctd->td_owepreempt = 1;
488 else
489 mi_switch(SW_INVOL, NULL);
490
491 #else /* PREEMPTION */
492 ctd->td_flags |= TDF_NEEDRESCHED;
493 #endif /* PREEMPTION */
494 return;
495 }
496 #endif /* !SMP */
497
498
499 int limitcount;
500 void
501 setrunqueue(struct thread *td, int flags)
502 {
503 struct ksegrp *kg;
504 struct thread *td2;
505 struct thread *tda;
506
507 CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
508 td, td->td_ksegrp, td->td_proc->p_pid);
509 CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
510 td, td->td_proc->p_comm, td->td_priority, curthread,
511 curthread->td_proc->p_comm);
512 mtx_assert(&sched_lock, MA_OWNED);
513 KASSERT((td->td_inhibitors == 0),
514 ("setrunqueue: trying to run inhibitted thread"));
515 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
516 ("setrunqueue: bad thread state"));
517 TD_SET_RUNQ(td);
518 kg = td->td_ksegrp;
519 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
520 /*
521 * Common path optimisation: Only one of everything
522 * and the KSE is always already attached.
523 * Totally ignore the ksegrp run queue.
524 */
525 if (kg->kg_avail_opennings != 1) {
526 if (limitcount < 1) {
527 limitcount++;
528 printf("pid %d: corrected slot count (%d->1)\n",
529 td->td_proc->p_pid, kg->kg_avail_opennings);
530
531 }
532 kg->kg_avail_opennings = 1;
533 }
534 sched_add(td, flags);
535 return;
536 }
537
538 /*
539 * If the concurrency has reduced, and we would go in the
540 * assigned section, then keep removing entries from the
541 * system run queue, until we are not in that section
542 * or there is room for us to be put in that section.
543 * What we MUST avoid is the case where there are threads of less
544 * priority than the new one scheduled, but it can not
545 * be scheduled itself. That would lead to a non contiguous set
546 * of scheduled threads, and everything would break.
547 */
548 tda = kg->kg_last_assigned;
549 while ((kg->kg_avail_opennings <= 0) &&
550 (tda && (tda->td_priority > td->td_priority))) {
551 /*
552 * None free, but there is one we can commandeer.
553 */
554 CTR2(KTR_RUNQ,
555 "setrunqueue: kg:%p: take slot from td: %p", kg, tda);
556 sched_rem(tda);
557 tda = kg->kg_last_assigned =
558 TAILQ_PREV(tda, threadqueue, td_runq);
559 }
560
561 /*
562 * Add the thread to the ksegrp's run queue at
563 * the appropriate place.
564 */
565 TAILQ_FOREACH(td2, &kg->kg_runq, td_runq) {
566 if (td2->td_priority > td->td_priority) {
567 TAILQ_INSERT_BEFORE(td2, td, td_runq);
568 break;
569 }
570 }
571 if (td2 == NULL) {
572 /* We ran off the end of the TAILQ or it was empty. */
573 TAILQ_INSERT_TAIL(&kg->kg_runq, td, td_runq);
574 }
575
576 /*
577 * If we have a slot to use, then put the thread on the system
578 * run queue and if needed, readjust the last_assigned pointer.
579 * it may be that we need to schedule something anyhow
580 * even if the availabel slots are -ve so that
581 * all the items < last_assigned are scheduled.
582 */
583 if (kg->kg_avail_opennings > 0) {
584 if (tda == NULL) {
585 /*
586 * No pre-existing last assigned so whoever is first
587 * gets the slot.. (maybe us)
588 */
589 td2 = TAILQ_FIRST(&kg->kg_runq);
590 kg->kg_last_assigned = td2;
591 } else if (tda->td_priority > td->td_priority) {
592 td2 = td;
593 } else {
594 /*
595 * We are past last_assigned, so
596 * give the next slot to whatever is next,
597 * which may or may not be us.
598 */
599 td2 = TAILQ_NEXT(tda, td_runq);
600 kg->kg_last_assigned = td2;
601 }
602 sched_add(td2, flags);
603 } else {
604 CTR3(KTR_RUNQ, "setrunqueue: held: td%p kg%p pid%d",
605 td, td->td_ksegrp, td->td_proc->p_pid);
606 if ((flags & SRQ_YIELDING) == 0)
607 maybe_preempt_in_ksegrp(td);
608 }
609 }
610
611 /*
612 * Kernel thread preemption implementation. Critical sections mark
613 * regions of code in which preemptions are not allowed.
614 */
615 void
616 critical_enter(void)
617 {
618 struct thread *td;
619
620 td = curthread;
621 td->td_critnest++;
622 CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td,
623 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
624 }
625
626 void
627 critical_exit(void)
628 {
629 struct thread *td;
630
631 td = curthread;
632 KASSERT(td->td_critnest != 0,
633 ("critical_exit: td_critnest == 0"));
634 #ifdef PREEMPTION
635 if (td->td_critnest == 1) {
636 td->td_critnest = 0;
637 mtx_assert(&sched_lock, MA_NOTOWNED);
638 if (td->td_owepreempt) {
639 td->td_critnest = 1;
640 mtx_lock_spin(&sched_lock);
641 td->td_critnest--;
642 mi_switch(SW_INVOL, NULL);
643 mtx_unlock_spin(&sched_lock);
644 }
645 } else
646 #endif
647 td->td_critnest--;
648
649
650 CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td,
651 (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest);
652 }
653
654 /*
655 * This function is called when a thread is about to be put on run queue
656 * because it has been made runnable or its priority has been adjusted. It
657 * determines if the new thread should be immediately preempted to. If so,
658 * it switches to it and eventually returns true. If not, it returns false
659 * so that the caller may place the thread on an appropriate run queue.
660 */
661 int
662 maybe_preempt(struct thread *td)
663 {
664 #ifdef PREEMPTION
665 struct thread *ctd;
666 int cpri, pri;
667 #endif
668
669 mtx_assert(&sched_lock, MA_OWNED);
670 #ifdef PREEMPTION
671 /*
672 * The new thread should not preempt the current thread if any of the
673 * following conditions are true:
674 *
675 * - The kernel is in the throes of crashing (panicstr).
676 * - The current thread has a higher (numerically lower) or
677 * equivalent priority. Note that this prevents curthread from
678 * trying to preempt to itself.
679 * - It is too early in the boot for context switches (cold is set).
680 * - The current thread has an inhibitor set or is in the process of
681 * exiting. In this case, the current thread is about to switch
682 * out anyways, so there's no point in preempting. If we did,
683 * the current thread would not be properly resumed as well, so
684 * just avoid that whole landmine.
685 * - If the new thread's priority is not a realtime priority and
686 * the current thread's priority is not an idle priority and
687 * FULL_PREEMPTION is disabled.
688 *
689 * If all of these conditions are false, but the current thread is in
690 * a nested critical section, then we have to defer the preemption
691 * until we exit the critical section. Otherwise, switch immediately
692 * to the new thread.
693 */
694 ctd = curthread;
695 KASSERT ((ctd->td_kse != NULL && ctd->td_kse->ke_thread == ctd),
696 ("thread has no (or wrong) sched-private part."));
697 KASSERT((td->td_inhibitors == 0),
698 ("maybe_preempt: trying to run inhibitted thread"));
699 pri = td->td_priority;
700 cpri = ctd->td_priority;
701 if (panicstr != NULL || pri >= cpri || cold /* || dumping */ ||
702 TD_IS_INHIBITED(ctd) || td->td_kse->ke_state != KES_THREAD)
703 return (0);
704 #ifndef FULL_PREEMPTION
705 if (pri > PRI_MAX_ITHD && cpri < PRI_MIN_IDLE)
706 return (0);
707 #endif
708
709 if (ctd->td_critnest > 1) {
710 CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
711 ctd->td_critnest);
712 ctd->td_owepreempt = 1;
713 return (0);
714 }
715
716 /*
717 * Thread is runnable but not yet put on system run queue.
718 */
719 MPASS(TD_ON_RUNQ(td));
720 MPASS(td->td_sched->ke_state != KES_ONRUNQ);
721 if (td->td_proc->p_flag & P_HADTHREADS) {
722 /*
723 * If this is a threaded process we actually ARE on the
724 * ksegrp run queue so take it off that first.
725 * Also undo any damage done to the last_assigned pointer.
726 * XXX Fix setrunqueue so this isn't needed
727 */
728 struct ksegrp *kg;
729
730 kg = td->td_ksegrp;
731 if (kg->kg_last_assigned == td)
732 kg->kg_last_assigned =
733 TAILQ_PREV(td, threadqueue, td_runq);
734 TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
735 }
736
737 TD_SET_RUNNING(td);
738 CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
739 td->td_proc->p_pid, td->td_proc->p_comm);
740 mi_switch(SW_INVOL|SW_PREEMPT, td);
741 return (1);
742 #else
743 return (0);
744 #endif
745 }
746
747 #if 0
748 #ifndef PREEMPTION
749 /* XXX: There should be a non-static version of this. */
750 static void
751 printf_caddr_t(void *data)
752 {
753 printf("%s", (char *)data);
754 }
755 static char preempt_warning[] =
756 "WARNING: Kernel preemption is disabled, expect reduced performance.\n";
757 SYSINIT(preempt_warning, SI_SUB_COPYRIGHT, SI_ORDER_ANY, printf_caddr_t,
758 preempt_warning)
759 #endif
760 #endif
761
762 /************************************************************************
763 * SYSTEM RUN QUEUE manipulations and tests *
764 ************************************************************************/
765 /*
766 * Initialize a run structure.
767 */
768 void
769 runq_init(struct runq *rq)
770 {
771 int i;
772
773 bzero(rq, sizeof *rq);
774 for (i = 0; i < RQ_NQS; i++)
775 TAILQ_INIT(&rq->rq_queues[i]);
776 }
777
778 /*
779 * Clear the status bit of the queue corresponding to priority level pri,
780 * indicating that it is empty.
781 */
782 static __inline void
783 runq_clrbit(struct runq *rq, int pri)
784 {
785 struct rqbits *rqb;
786
787 rqb = &rq->rq_status;
788 CTR4(KTR_RUNQ, "runq_clrbit: bits=%#x %#x bit=%#x word=%d",
789 rqb->rqb_bits[RQB_WORD(pri)],
790 rqb->rqb_bits[RQB_WORD(pri)] & ~RQB_BIT(pri),
791 RQB_BIT(pri), RQB_WORD(pri));
792 rqb->rqb_bits[RQB_WORD(pri)] &= ~RQB_BIT(pri);
793 }
794
795 /*
796 * Find the index of the first non-empty run queue. This is done by
797 * scanning the status bits, a set bit indicates a non-empty queue.
798 */
799 static __inline int
800 runq_findbit(struct runq *rq)
801 {
802 struct rqbits *rqb;
803 int pri;
804 int i;
805
806 rqb = &rq->rq_status;
807 for (i = 0; i < RQB_LEN; i++)
808 if (rqb->rqb_bits[i]) {
809 pri = RQB_FFS(rqb->rqb_bits[i]) + (i << RQB_L2BPW);
810 CTR3(KTR_RUNQ, "runq_findbit: bits=%#x i=%d pri=%d",
811 rqb->rqb_bits[i], i, pri);
812 return (pri);
813 }
814
815 return (-1);
816 }
817
818 /*
819 * Set the status bit of the queue corresponding to priority level pri,
820 * indicating that it is non-empty.
821 */
822 static __inline void
823 runq_setbit(struct runq *rq, int pri)
824 {
825 struct rqbits *rqb;
826
827 rqb = &rq->rq_status;
828 CTR4(KTR_RUNQ, "runq_setbit: bits=%#x %#x bit=%#x word=%d",
829 rqb->rqb_bits[RQB_WORD(pri)],
830 rqb->rqb_bits[RQB_WORD(pri)] | RQB_BIT(pri),
831 RQB_BIT(pri), RQB_WORD(pri));
832 rqb->rqb_bits[RQB_WORD(pri)] |= RQB_BIT(pri);
833 }
834
835 /*
836 * Add the KSE to the queue specified by its priority, and set the
837 * corresponding status bit.
838 */
839 void
840 runq_add(struct runq *rq, struct kse *ke, int flags)
841 {
842 struct rqhead *rqh;
843 int pri;
844
845 pri = ke->ke_thread->td_priority / RQ_PPQ;
846 ke->ke_rqindex = pri;
847 runq_setbit(rq, pri);
848 rqh = &rq->rq_queues[pri];
849 CTR5(KTR_RUNQ, "runq_add: td=%p ke=%p pri=%d %d rqh=%p",
850 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
851 if (flags & SRQ_PREEMPTED) {
852 TAILQ_INSERT_HEAD(rqh, ke, ke_procq);
853 } else {
854 TAILQ_INSERT_TAIL(rqh, ke, ke_procq);
855 }
856 }
857
858 /*
859 * Return true if there are runnable processes of any priority on the run
860 * queue, false otherwise. Has no side effects, does not modify the run
861 * queue structure.
862 */
863 int
864 runq_check(struct runq *rq)
865 {
866 struct rqbits *rqb;
867 int i;
868
869 rqb = &rq->rq_status;
870 for (i = 0; i < RQB_LEN; i++)
871 if (rqb->rqb_bits[i]) {
872 CTR2(KTR_RUNQ, "runq_check: bits=%#x i=%d",
873 rqb->rqb_bits[i], i);
874 return (1);
875 }
876 CTR0(KTR_RUNQ, "runq_check: empty");
877
878 return (0);
879 }
880
881 #if defined(SMP) && defined(SCHED_4BSD)
882 int runq_fuzz = 1;
883 SYSCTL_INT(_kern_sched, OID_AUTO, runq_fuzz, CTLFLAG_RW, &runq_fuzz, 0, "");
884 #endif
885
886 /*
887 * Find the highest priority process on the run queue.
888 */
889 struct kse *
890 runq_choose(struct runq *rq)
891 {
892 struct rqhead *rqh;
893 struct kse *ke;
894 int pri;
895
896 mtx_assert(&sched_lock, MA_OWNED);
897 while ((pri = runq_findbit(rq)) != -1) {
898 rqh = &rq->rq_queues[pri];
899 #if defined(SMP) && defined(SCHED_4BSD)
900 /* fuzz == 1 is normal.. 0 or less are ignored */
901 if (runq_fuzz > 1) {
902 /*
903 * In the first couple of entries, check if
904 * there is one for our CPU as a preference.
905 */
906 int count = runq_fuzz;
907 int cpu = PCPU_GET(cpuid);
908 struct kse *ke2;
909 ke2 = ke = TAILQ_FIRST(rqh);
910
911 while (count-- && ke2) {
912 if (ke->ke_thread->td_lastcpu == cpu) {
913 ke = ke2;
914 break;
915 }
916 ke2 = TAILQ_NEXT(ke2, ke_procq);
917 }
918 } else
919 #endif
920 ke = TAILQ_FIRST(rqh);
921 KASSERT(ke != NULL, ("runq_choose: no proc on busy queue"));
922 CTR3(KTR_RUNQ,
923 "runq_choose: pri=%d kse=%p rqh=%p", pri, ke, rqh);
924 return (ke);
925 }
926 CTR1(KTR_RUNQ, "runq_choose: idleproc pri=%d", pri);
927
928 return (NULL);
929 }
930
931 /*
932 * Remove the KSE from the queue specified by its priority, and clear the
933 * corresponding status bit if the queue becomes empty.
934 * Caller must set ke->ke_state afterwards.
935 */
936 void
937 runq_remove(struct runq *rq, struct kse *ke)
938 {
939 struct rqhead *rqh;
940 int pri;
941
942 KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
943 ("runq_remove: process swapped out"));
944 pri = ke->ke_rqindex;
945 rqh = &rq->rq_queues[pri];
946 CTR5(KTR_RUNQ, "runq_remove: td=%p, ke=%p pri=%d %d rqh=%p",
947 ke->ke_thread, ke, ke->ke_thread->td_priority, pri, rqh);
948 KASSERT(ke != NULL, ("runq_remove: no proc on busy queue"));
949 TAILQ_REMOVE(rqh, ke, ke_procq);
950 if (TAILQ_EMPTY(rqh)) {
951 CTR0(KTR_RUNQ, "runq_remove: empty");
952 runq_clrbit(rq, pri);
953 }
954 }
955
956 /****** functions that are temporarily here ***********/
957 #include <vm/uma.h>
958 extern struct mtx kse_zombie_lock;
959
960 /*
961 * Allocate scheduler specific per-process resources.
962 * The thread and ksegrp have already been linked in.
963 * In this case just set the default concurrency value.
964 *
965 * Called from:
966 * proc_init() (UMA init method)
967 */
968 void
969 sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
970 {
971
972 /* This can go in sched_fork */
973 sched_init_concurrency(kg);
974 }
975
976 /*
977 * thread is being either created or recycled.
978 * Fix up the per-scheduler resources associated with it.
979 * Called from:
980 * sched_fork_thread()
981 * thread_dtor() (*may go away)
982 * thread_init() (*may go away)
983 */
984 void
985 sched_newthread(struct thread *td)
986 {
987 struct td_sched *ke;
988
989 ke = (struct td_sched *) (td + 1);
990 bzero(ke, sizeof(*ke));
991 td->td_sched = ke;
992 ke->ke_thread = td;
993 ke->ke_state = KES_THREAD;
994 }
995
996 /*
997 * Set up an initial concurrency of 1
998 * and set the given thread (if given) to be using that
999 * concurrency slot.
1000 * May be used "offline"..before the ksegrp is attached to the world
1001 * and thus wouldn't need schedlock in that case.
1002 * Called from:
1003 * thr_create()
1004 * proc_init() (UMA) via sched_newproc()
1005 */
1006 void
1007 sched_init_concurrency(struct ksegrp *kg)
1008 {
1009
1010 CTR1(KTR_RUNQ,"kg %p init slots and concurrency to 1", kg);
1011 kg->kg_concurrency = 1;
1012 kg->kg_avail_opennings = 1;
1013 }
1014
1015 /*
1016 * Change the concurrency of an existing ksegrp to N
1017 * Called from:
1018 * kse_create()
1019 * kse_exit()
1020 * thread_exit()
1021 * thread_single()
1022 */
1023 void
1024 sched_set_concurrency(struct ksegrp *kg, int concurrency)
1025 {
1026
1027 CTR4(KTR_RUNQ,"kg %p set concurrency to %d, slots %d -> %d",
1028 kg,
1029 concurrency,
1030 kg->kg_avail_opennings,
1031 kg->kg_avail_opennings + (concurrency - kg->kg_concurrency));
1032 kg->kg_avail_opennings += (concurrency - kg->kg_concurrency);
1033 kg->kg_concurrency = concurrency;
1034 }
1035
1036 /*
1037 * Called from thread_exit() for all exiting thread
1038 *
1039 * Not to be confused with sched_exit_thread()
1040 * that is only called from thread_exit() for threads exiting
1041 * without the rest of the process exiting because it is also called from
1042 * sched_exit() and we wouldn't want to call it twice.
1043 * XXX This can probably be fixed.
1044 */
1045 void
1046 sched_thread_exit(struct thread *td)
1047 {
1048
1049 SLOT_RELEASE(td->td_ksegrp);
1050 slot_fill(td->td_ksegrp);
1051 }
1052
1053 #endif /* KERN_SWITCH_INCLUDE */
Cache object: bf0e0a73eefbc1355b465c8fe3d0fde2
|