FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_ule.c
1 /*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * This file implements the ULE scheduler. ULE supports independent CPU
29 * run queues and fine grain locking. It has superior interactive
30 * performance under load even on uni-processor systems.
31 *
32 * etymology:
33 * ULE is the last three letters in schedule. It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD: releng/7.4/sys/kern/sched_ule.c 213722 2010-10-12 15:26:37Z jhb $");
40
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_kdtrace.h"
43 #include "opt_sched.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kdb.h>
48 #include <sys/kernel.h>
49 #include <sys/ktr.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/proc.h>
53 #include <sys/resource.h>
54 #include <sys/resourcevar.h>
55 #include <sys/sched.h>
56 #include <sys/smp.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysproto.h>
60 #include <sys/turnstile.h>
61 #include <sys/umtx.h>
62 #include <sys/vmmeter.h>
63 #include <sys/cpuset.h>
64 #ifdef KTRACE
65 #include <sys/uio.h>
66 #include <sys/ktrace.h>
67 #endif
68
69 #ifdef HWPMC_HOOKS
70 #include <sys/pmckern.h>
71 #endif
72
73 #ifdef KDTRACE_HOOKS
74 #include <sys/dtrace_bsd.h>
75 int dtrace_vtime_active;
76 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
77 #endif
78
79 #include <machine/cpu.h>
80 #include <machine/smp.h>
81
82 #if !defined(__i386__) && !defined(__amd64__) && !defined(__arm__)
83 #error "This architecture is not currently compatible with ULE"
84 #endif
85
86 #define KTR_ULE 0
87
88 /*
89 * Thread scheduler specific section. All fields are protected
90 * by the thread lock.
91 */
92 struct td_sched {
93 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
94 struct thread *ts_thread; /* Active associated thread. */
95 struct runq *ts_runq; /* Run-queue we're queued on. */
96 short ts_flags; /* TSF_* flags. */
97 u_char ts_rqindex; /* Run queue index. */
98 u_char ts_cpu; /* CPU that we have affinity for. */
99 int ts_slice; /* Ticks of slice remaining. */
100 u_int ts_slptime; /* Number of ticks we vol. slept */
101 u_int ts_runtime; /* Number of ticks we were running */
102 /* The following variables are only used for pctcpu calculation */
103 int ts_ltick; /* Last tick that we were running on */
104 int ts_incrtick; /* Last tick that we incremented on */
105 int ts_ftick; /* First tick that we were running on */
106 int ts_ticks; /* Tick count */
107 #ifdef SMP
108 int ts_rltick; /* Real last tick, for affinity. */
109 #endif
110 };
111 /* flags kept in ts_flags */
112 #define TSF_BOUND 0x0001 /* Thread can not migrate. */
113 #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
114
115 static struct td_sched td_sched0;
116
117 #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
118 #define THREAD_CAN_SCHED(td, cpu) \
119 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
120
121 /*
122 * Cpu percentage computation macros and defines.
123 *
124 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
125 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
126 * SCHED_TICK_MAX: Maximum number of ticks before scaling back.
127 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
128 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
129 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
130 */
131 #define SCHED_TICK_SECS 10
132 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
133 #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
134 #define SCHED_TICK_SHIFT 10
135 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
136 #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
137
138 /*
139 * These macros determine priorities for non-interactive threads. They are
140 * assigned a priority based on their recent cpu utilization as expressed
141 * by the ratio of ticks to the tick total. NHALF priorities at the start
142 * and end of the MIN to MAX timeshare range are only reachable with negative
143 * or positive nice respectively.
144 *
145 * PRI_RANGE: Priority range for utilization dependent priorities.
146 * PRI_NRESV: Number of nice values.
147 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
148 * PRI_NICE: Determines the part of the priority inherited from nice.
149 */
150 #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
151 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
152 #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
153 #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
154 #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN)
155 #define SCHED_PRI_TICKS(ts) \
156 (SCHED_TICK_HZ((ts)) / \
157 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
158 #define SCHED_PRI_NICE(nice) (nice)
159
160 /*
161 * These determine the interactivity of a process. Interactivity differs from
162 * cpu utilization in that it expresses the voluntary time slept vs time ran
163 * while cpu utilization includes all time not running. This more accurately
164 * models the intent of the thread.
165 *
166 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
167 * before throttling back.
168 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
169 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
170 * INTERACT_THRESH: Threshhold for placement on the current runq.
171 */
172 #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
173 #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
174 #define SCHED_INTERACT_MAX (100)
175 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
176 #define SCHED_INTERACT_THRESH (30)
177
178 /*
179 * tickincr: Converts a stathz tick into a hz domain scaled by
180 * the shift factor. Without the shift the error rate
181 * due to rounding would be unacceptably high.
182 * realstathz: stathz is sometimes 0 and run off of hz.
183 * sched_slice: Runtime of each thread before rescheduling.
184 * preempt_thresh: Priority threshold for preemption and remote IPIs.
185 */
186 static int sched_interact = SCHED_INTERACT_THRESH;
187 static int realstathz;
188 static int tickincr;
189 static int sched_slice;
190 #ifdef PREEMPTION
191 #ifdef FULL_PREEMPTION
192 static int preempt_thresh = PRI_MAX_IDLE;
193 #else
194 static int preempt_thresh = PRI_MIN_KERN;
195 #endif
196 #else
197 static int preempt_thresh = 0;
198 #endif
199
200 /*
201 * tdq - per processor runqs and statistics. All fields are protected by the
202 * tdq_lock. The load and lowpri may be accessed without to avoid excess
203 * locking in sched_pickcpu();
204 */
205 struct tdq {
206 struct mtx *tdq_lock; /* Pointer to group lock. */
207 struct runq tdq_realtime; /* real-time run queue. */
208 struct runq tdq_timeshare; /* timeshare run queue. */
209 struct runq tdq_idle; /* Queue of IDLE threads. */
210 int tdq_load; /* Aggregate load. */
211 u_char tdq_idx; /* Current insert index. */
212 u_char tdq_ridx; /* Current removal index. */
213 #ifdef SMP
214 u_char tdq_lowpri; /* Lowest priority thread. */
215 int tdq_transferable; /* Transferable thread count. */
216 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */
217 struct tdq_group *tdq_group; /* Our processor group. */
218 #else
219 int tdq_sysload; /* For loadavg, !ITHD load. */
220 #endif
221 } __aligned(64);
222
223
224 #ifdef SMP
225 /*
226 * tdq groups are groups of processors which can cheaply share threads. When
227 * one processor in the group goes idle it will check the runqs of the other
228 * processors in its group prior to halting and waiting for an interrupt.
229 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
230 * In a numa environment we'd want an idle bitmap per group and a two tiered
231 * load balancer.
232 */
233 struct tdq_group {
234 struct mtx tdg_lock; /* Protects all fields below. */
235 int tdg_cpus; /* Count of CPUs in this tdq group. */
236 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */
237 cpumask_t tdg_idlemask; /* Idle cpus in this group. */
238 cpumask_t tdg_mask; /* Bit mask for first cpu. */
239 int tdg_load; /* Total load of this group. */
240 int tdg_transferable; /* Transferable load of this group. */
241 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */
242 char tdg_name[16]; /* lock name. */
243 } __aligned(64);
244
245 #define SCHED_AFFINITY_DEFAULT (max(1, hz / 300))
246 #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity)
247
248 /*
249 * Run-time tunables.
250 */
251 static int rebalance = 1;
252 static int balance_interval = 128; /* Default set in sched_initticks(). */
253 static int pick_pri = 1;
254 static int affinity;
255 static int tryself = 1;
256 static int steal_htt = 1;
257 static int steal_idle = 1;
258 static int steal_thresh = 2;
259 static int topology = 0;
260
261 /*
262 * One thread queue per processor.
263 */
264 static volatile cpumask_t tdq_idle;
265 static int tdg_maxid;
266 static struct tdq tdq_cpu[MAXCPU];
267 static struct tdq_group tdq_groups[MAXCPU];
268 static struct tdq *balance_tdq;
269 static int balance_group_ticks;
270 static int balance_ticks;
271
272 #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
273 #define TDQ_CPU(x) (&tdq_cpu[(x)])
274 #define TDQ_ID(x) ((int)((x) - tdq_cpu))
275 #define TDQ_GROUP(x) (&tdq_groups[(x)])
276 #define TDG_ID(x) ((int)((x) - tdq_groups))
277 #else /* !SMP */
278 static struct tdq tdq_cpu;
279 static struct mtx tdq_lock;
280
281 #define TDQ_ID(x) (0)
282 #define TDQ_SELF() (&tdq_cpu)
283 #define TDQ_CPU(x) (&tdq_cpu)
284 #endif
285
286 #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type))
287 #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
288 #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
289 #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
290 #define TDQ_LOCKPTR(t) ((t)->tdq_lock)
291
292 static void sched_priority(struct thread *);
293 static void sched_thread_priority(struct thread *, u_char);
294 static int sched_interact_score(struct thread *);
295 static void sched_interact_update(struct thread *);
296 static void sched_interact_fork(struct thread *);
297 static void sched_pctcpu_update(struct td_sched *);
298
299 /* Operations on per processor queues */
300 static struct td_sched * tdq_choose(struct tdq *);
301 static void tdq_setup(struct tdq *);
302 static void tdq_load_add(struct tdq *, struct td_sched *);
303 static void tdq_load_rem(struct tdq *, struct td_sched *);
304 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
305 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
306 void tdq_print(int cpu);
307 static void runq_print(struct runq *rq);
308 static void tdq_add(struct tdq *, struct thread *, int);
309 #ifdef SMP
310 static void tdq_move(struct tdq *, struct tdq *);
311 static int tdq_idled(struct tdq *);
312 static void tdq_notify(struct td_sched *);
313 static struct td_sched *tdq_steal(struct tdq *, int);
314 static struct td_sched *runq_steal(struct runq *, int);
315 static int sched_pickcpu(struct thread *, int);
316 static void sched_balance(void);
317 static void sched_balance_groups(void);
318 static void sched_balance_group(struct tdq_group *);
319 static void sched_balance_pair(struct tdq *, struct tdq *);
320 static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
321 static inline void thread_unblock_switch(struct thread *, struct mtx *);
322 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
323 #endif
324
325 static void sched_setup(void *dummy);
326 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
327
328 static void sched_initticks(void *dummy);
329 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
330 NULL);
331
332 /*
333 * Print the threads waiting on a run-queue.
334 */
335 static void
336 runq_print(struct runq *rq)
337 {
338 struct rqhead *rqh;
339 struct td_sched *ts;
340 int pri;
341 int j;
342 int i;
343
344 for (i = 0; i < RQB_LEN; i++) {
345 printf("\t\trunq bits %d 0x%zx\n",
346 i, rq->rq_status.rqb_bits[i]);
347 for (j = 0; j < RQB_BPW; j++)
348 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
349 pri = j + (i << RQB_L2BPW);
350 rqh = &rq->rq_queues[pri];
351 TAILQ_FOREACH(ts, rqh, ts_procq) {
352 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
353 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
354 }
355 }
356 }
357 }
358
359 /*
360 * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
361 */
362 void
363 tdq_print(int cpu)
364 {
365 struct tdq *tdq;
366
367 tdq = TDQ_CPU(cpu);
368
369 printf("tdq %d:\n", TDQ_ID(tdq));
370 printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq));
371 printf("\tload: %d\n", tdq->tdq_load);
372 printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
373 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
374 printf("\trealtime runq:\n");
375 runq_print(&tdq->tdq_realtime);
376 printf("\ttimeshare runq:\n");
377 runq_print(&tdq->tdq_timeshare);
378 printf("\tidle runq:\n");
379 runq_print(&tdq->tdq_idle);
380 #ifdef SMP
381 printf("\tload transferable: %d\n", tdq->tdq_transferable);
382 printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
383 printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group));
384 printf("\tLock name: %s\n", tdq->tdq_group->tdg_name);
385 #endif
386 }
387
388 #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
389 /*
390 * Add a thread to the actual run-queue. Keeps transferable counts up to
391 * date with what is actually on the run-queue. Selects the correct
392 * queue position for timeshare threads.
393 */
394 static __inline void
395 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
396 {
397 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
398 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
399 #ifdef SMP
400 if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
401 tdq->tdq_transferable++;
402 tdq->tdq_group->tdg_transferable++;
403 ts->ts_flags |= TSF_XFERABLE;
404 }
405 #endif
406 if (ts->ts_runq == &tdq->tdq_timeshare) {
407 u_char pri;
408
409 pri = ts->ts_thread->td_priority;
410 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
411 ("Invalid priority %d on timeshare runq", pri));
412 /*
413 * This queue contains only priorities between MIN and MAX
414 * realtime. Use the whole queue to represent these values.
415 */
416 if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
417 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
418 pri = (pri + tdq->tdq_idx) % RQ_NQS;
419 /*
420 * This effectively shortens the queue by one so we
421 * can have a one slot difference between idx and
422 * ridx while we wait for threads to drain.
423 */
424 if (tdq->tdq_ridx != tdq->tdq_idx &&
425 pri == tdq->tdq_ridx)
426 pri = (unsigned char)(pri - 1) % RQ_NQS;
427 } else
428 pri = tdq->tdq_ridx;
429 runq_add_pri(ts->ts_runq, ts, pri, flags);
430 } else
431 runq_add(ts->ts_runq, ts, flags);
432 }
433
434 /*
435 * Remove a thread from a run-queue. This typically happens when a thread
436 * is selected to run. Running threads are not on the queue and the
437 * transferable count does not reflect them.
438 */
439 static __inline void
440 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
441 {
442 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
443 KASSERT(ts->ts_runq != NULL,
444 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
445 #ifdef SMP
446 if (ts->ts_flags & TSF_XFERABLE) {
447 tdq->tdq_transferable--;
448 tdq->tdq_group->tdg_transferable--;
449 ts->ts_flags &= ~TSF_XFERABLE;
450 }
451 #endif
452 if (ts->ts_runq == &tdq->tdq_timeshare) {
453 if (tdq->tdq_idx != tdq->tdq_ridx)
454 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
455 else
456 runq_remove_idx(ts->ts_runq, ts, NULL);
457 /*
458 * For timeshare threads we update the priority here so
459 * the priority reflects the time we've been sleeping.
460 */
461 ts->ts_ltick = ticks;
462 sched_pctcpu_update(ts);
463 sched_priority(ts->ts_thread);
464 } else
465 runq_remove(ts->ts_runq, ts);
466 }
467
468 /*
469 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
470 * for this thread to the referenced thread queue.
471 */
472 static void
473 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
474 {
475 int class;
476
477 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
478 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
479 class = PRI_BASE(ts->ts_thread->td_pri_class);
480 tdq->tdq_load++;
481 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
482 if (class != PRI_ITHD &&
483 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
484 #ifdef SMP
485 tdq->tdq_group->tdg_load++;
486 #else
487 tdq->tdq_sysload++;
488 #endif
489 }
490
491 /*
492 * Remove the load from a thread that is transitioning to a sleep state or
493 * exiting.
494 */
495 static void
496 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
497 {
498 int class;
499
500 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
501 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
502 class = PRI_BASE(ts->ts_thread->td_pri_class);
503 if (class != PRI_ITHD &&
504 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
505 #ifdef SMP
506 tdq->tdq_group->tdg_load--;
507 #else
508 tdq->tdq_sysload--;
509 #endif
510 KASSERT(tdq->tdq_load != 0,
511 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
512 tdq->tdq_load--;
513 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
514 ts->ts_runq = NULL;
515 }
516
517 #ifdef SMP
518 /*
519 * sched_balance is a simple CPU load balancing algorithm. It operates by
520 * finding the least loaded and most loaded cpu and equalizing their load
521 * by migrating some processes.
522 *
523 * Dealing only with two CPUs at a time has two advantages. Firstly, most
524 * installations will only have 2 cpus. Secondly, load balancing too much at
525 * once can have an unpleasant effect on the system. The scheduler rarely has
526 * enough information to make perfect decisions. So this algorithm chooses
527 * simplicity and more gradual effects on load in larger systems.
528 *
529 */
530 static void
531 sched_balance()
532 {
533 struct tdq_group *high;
534 struct tdq_group *low;
535 struct tdq_group *tdg;
536 struct tdq *tdq;
537 int cnt;
538 int i;
539
540 /*
541 * Select a random time between .5 * balance_interval and
542 * 1.5 * balance_interval.
543 */
544 balance_ticks = max(balance_interval / 2, 1);
545 balance_ticks += random() % balance_interval;
546 if (smp_started == 0 || rebalance == 0)
547 return;
548 tdq = TDQ_SELF();
549 TDQ_UNLOCK(tdq);
550 low = high = NULL;
551 i = random() % (tdg_maxid + 1);
552 for (cnt = 0; cnt <= tdg_maxid; cnt++) {
553 tdg = TDQ_GROUP(i);
554 /*
555 * Find the CPU with the highest load that has some
556 * threads to transfer.
557 */
558 if ((high == NULL || tdg->tdg_load > high->tdg_load)
559 && tdg->tdg_transferable)
560 high = tdg;
561 if (low == NULL || tdg->tdg_load < low->tdg_load)
562 low = tdg;
563 if (++i > tdg_maxid)
564 i = 0;
565 }
566 if (low != NULL && high != NULL && high != low)
567 sched_balance_pair(LIST_FIRST(&high->tdg_members),
568 LIST_FIRST(&low->tdg_members));
569 TDQ_LOCK(tdq);
570 }
571
572 /*
573 * Balance load between CPUs in a group. Will only migrate within the group.
574 */
575 static void
576 sched_balance_groups()
577 {
578 struct tdq *tdq;
579 int i;
580
581 /*
582 * Select a random time between .5 * balance_interval and
583 * 1.5 * balance_interval.
584 */
585 balance_group_ticks = max(balance_interval / 2, 1);
586 balance_group_ticks += random() % balance_interval;
587 if (smp_started == 0 || rebalance == 0)
588 return;
589 tdq = TDQ_SELF();
590 TDQ_UNLOCK(tdq);
591 for (i = 0; i <= tdg_maxid; i++)
592 sched_balance_group(TDQ_GROUP(i));
593 TDQ_LOCK(tdq);
594 }
595
596 /*
597 * Finds the greatest imbalance between two tdqs in a group.
598 */
599 static void
600 sched_balance_group(struct tdq_group *tdg)
601 {
602 struct tdq *tdq;
603 struct tdq *high;
604 struct tdq *low;
605 int load;
606
607 if (tdg->tdg_transferable == 0)
608 return;
609 low = NULL;
610 high = NULL;
611 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
612 load = tdq->tdq_load;
613 if (high == NULL || load > high->tdq_load)
614 high = tdq;
615 if (low == NULL || load < low->tdq_load)
616 low = tdq;
617 }
618 if (high != NULL && low != NULL && high != low)
619 sched_balance_pair(high, low);
620 }
621
622 /*
623 * Lock two thread queues using their address to maintain lock order.
624 */
625 static void
626 tdq_lock_pair(struct tdq *one, struct tdq *two)
627 {
628 if (one < two) {
629 TDQ_LOCK(one);
630 TDQ_LOCK_FLAGS(two, MTX_DUPOK);
631 } else {
632 TDQ_LOCK(two);
633 TDQ_LOCK_FLAGS(one, MTX_DUPOK);
634 }
635 }
636
637 /*
638 * Unlock two thread queues. Order is not important here.
639 */
640 static void
641 tdq_unlock_pair(struct tdq *one, struct tdq *two)
642 {
643 TDQ_UNLOCK(one);
644 TDQ_UNLOCK(two);
645 }
646
647 /*
648 * Transfer load between two imbalanced thread queues.
649 */
650 static void
651 sched_balance_pair(struct tdq *high, struct tdq *low)
652 {
653 int transferable;
654 int high_load;
655 int low_load;
656 int move;
657 int diff;
658 int i;
659
660 tdq_lock_pair(high, low);
661 /*
662 * If we're transfering within a group we have to use this specific
663 * tdq's transferable count, otherwise we can steal from other members
664 * of the group.
665 */
666 if (high->tdq_group == low->tdq_group) {
667 transferable = high->tdq_transferable;
668 high_load = high->tdq_load;
669 low_load = low->tdq_load;
670 } else {
671 transferable = high->tdq_group->tdg_transferable;
672 high_load = high->tdq_group->tdg_load;
673 low_load = low->tdq_group->tdg_load;
674 }
675 /*
676 * Determine what the imbalance is and then adjust that to how many
677 * threads we actually have to give up (transferable).
678 */
679 if (transferable != 0) {
680 diff = high_load - low_load;
681 move = diff / 2;
682 if (diff & 0x1)
683 move++;
684 move = min(move, transferable);
685 for (i = 0; i < move; i++)
686 tdq_move(high, low);
687 /*
688 * IPI the target cpu to force it to reschedule with the new
689 * workload.
690 */
691 ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
692 }
693 tdq_unlock_pair(high, low);
694 return;
695 }
696
697 /*
698 * Move a thread from one thread queue to another.
699 */
700 static void
701 tdq_move(struct tdq *from, struct tdq *to)
702 {
703 struct td_sched *ts;
704 struct thread *td;
705 struct tdq *tdq;
706 int cpu;
707
708 TDQ_LOCK_ASSERT(from, MA_OWNED);
709 TDQ_LOCK_ASSERT(to, MA_OWNED);
710
711 tdq = from;
712 cpu = TDQ_ID(to);
713 ts = tdq_steal(tdq, cpu);
714 if (ts == NULL) {
715 struct tdq_group *tdg;
716
717 tdg = tdq->tdq_group;
718 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
719 if (tdq == from || tdq->tdq_transferable == 0)
720 continue;
721 ts = tdq_steal(tdq, cpu);
722 break;
723 }
724 if (ts == NULL)
725 return;
726 }
727 if (tdq == to)
728 return;
729 td = ts->ts_thread;
730 /*
731 * Although the run queue is locked the thread may be blocked. Lock
732 * it to clear this and acquire the run-queue lock.
733 */
734 thread_lock(td);
735 /* Drop recursive lock on from acquired via thread_lock(). */
736 TDQ_UNLOCK(from);
737 sched_rem(td);
738 ts->ts_cpu = cpu;
739 td->td_lock = TDQ_LOCKPTR(to);
740 tdq_add(to, td, SRQ_YIELDING);
741 }
742
743 /*
744 * This tdq has idled. Try to steal a thread from another cpu and switch
745 * to it.
746 */
747 static int
748 tdq_idled(struct tdq *tdq)
749 {
750 struct tdq_group *tdg;
751 struct tdq *steal;
752 int highload;
753 int highcpu;
754 int cpu;
755
756 if (smp_started == 0 || steal_idle == 0)
757 return (1);
758 /* We don't want to be preempted while we're iterating over tdqs */
759 spinlock_enter();
760 tdg = tdq->tdq_group;
761 /*
762 * If we're in a cpu group, try and steal threads from another cpu in
763 * the group before idling. In a HTT group all cpus share the same
764 * run-queue lock, however, we still need a recursive lock to
765 * call tdq_move().
766 */
767 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
768 TDQ_LOCK(tdq);
769 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
770 if (steal == tdq || steal->tdq_transferable == 0)
771 continue;
772 TDQ_LOCK(steal);
773 goto steal;
774 }
775 TDQ_UNLOCK(tdq);
776 }
777 /*
778 * Find the least loaded CPU with a transferable thread and attempt
779 * to steal it. We make a lockless pass and then verify that the
780 * thread is still available after locking.
781 */
782 for (;;) {
783 highcpu = 0;
784 highload = 0;
785 for (cpu = 0; cpu <= mp_maxid; cpu++) {
786 if (CPU_ABSENT(cpu))
787 continue;
788 steal = TDQ_CPU(cpu);
789 if (steal->tdq_transferable == 0)
790 continue;
791 if (steal->tdq_load < highload)
792 continue;
793 highload = steal->tdq_load;
794 highcpu = cpu;
795 }
796 if (highload < steal_thresh)
797 break;
798 steal = TDQ_CPU(highcpu);
799 if (steal == tdq)
800 break;
801 tdq_lock_pair(tdq, steal);
802 if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
803 goto steal;
804 tdq_unlock_pair(tdq, steal);
805 }
806 spinlock_exit();
807 return (1);
808 steal:
809 spinlock_exit();
810 tdq_move(steal, tdq);
811 TDQ_UNLOCK(steal);
812 mi_switch(SW_VOL, NULL);
813 thread_unlock(curthread);
814
815 return (0);
816 }
817
818 /*
819 * Notify a remote cpu of new work. Sends an IPI if criteria are met.
820 */
821 static void
822 tdq_notify(struct td_sched *ts)
823 {
824 struct thread *ctd;
825 struct pcpu *pcpu;
826 int cpri;
827 int pri;
828 int cpu;
829
830 cpu = ts->ts_cpu;
831 pri = ts->ts_thread->td_priority;
832 pcpu = pcpu_find(cpu);
833 ctd = pcpu->pc_curthread;
834 cpri = ctd->td_priority;
835
836 /*
837 * If our priority is not better than the current priority there is
838 * nothing to do.
839 */
840 if (pri > cpri)
841 return;
842 /*
843 * Always IPI idle.
844 */
845 if (cpri > PRI_MIN_IDLE)
846 goto sendipi;
847 /*
848 * If we're realtime or better and there is timeshare or worse running
849 * send an IPI.
850 */
851 if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
852 goto sendipi;
853 /*
854 * Otherwise only IPI if we exceed the threshold.
855 */
856 if (pri > preempt_thresh)
857 return;
858 sendipi:
859 ipi_selected(1 << cpu, IPI_PREEMPT);
860 }
861
862 /*
863 * Steals load from a timeshare queue. Honors the rotating queue head
864 * index.
865 */
866 static struct td_sched *
867 runq_steal_from(struct runq *rq, int cpu, u_char start)
868 {
869 struct td_sched *ts;
870 struct rqbits *rqb;
871 struct rqhead *rqh;
872 int first;
873 int bit;
874 int pri;
875 int i;
876
877 rqb = &rq->rq_status;
878 bit = start & (RQB_BPW -1);
879 pri = 0;
880 first = 0;
881 again:
882 for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
883 if (rqb->rqb_bits[i] == 0)
884 continue;
885 if (bit != 0) {
886 for (pri = bit; pri < RQB_BPW; pri++)
887 if (rqb->rqb_bits[i] & (1ul << pri))
888 break;
889 if (pri >= RQB_BPW)
890 continue;
891 } else
892 pri = RQB_FFS(rqb->rqb_bits[i]);
893 pri += (i << RQB_L2BPW);
894 rqh = &rq->rq_queues[pri];
895 TAILQ_FOREACH(ts, rqh, ts_procq) {
896 if (first && THREAD_CAN_MIGRATE(ts->ts_thread) &&
897 THREAD_CAN_SCHED(ts->ts_thread, cpu))
898 return (ts);
899 first = 1;
900 }
901 }
902 if (start != 0) {
903 start = 0;
904 goto again;
905 }
906
907 return (NULL);
908 }
909
910 /*
911 * Steals load from a standard linear queue.
912 */
913 static struct td_sched *
914 runq_steal(struct runq *rq, int cpu)
915 {
916 struct rqhead *rqh;
917 struct rqbits *rqb;
918 struct td_sched *ts;
919 int word;
920 int bit;
921
922 rqb = &rq->rq_status;
923 for (word = 0; word < RQB_LEN; word++) {
924 if (rqb->rqb_bits[word] == 0)
925 continue;
926 for (bit = 0; bit < RQB_BPW; bit++) {
927 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
928 continue;
929 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
930 TAILQ_FOREACH(ts, rqh, ts_procq)
931 if (THREAD_CAN_MIGRATE(ts->ts_thread) &&
932 THREAD_CAN_SCHED(ts->ts_thread, cpu))
933 return (ts);
934 }
935 }
936 return (NULL);
937 }
938
939 /*
940 * Attempt to steal a thread in priority order from a thread queue.
941 */
942 static struct td_sched *
943 tdq_steal(struct tdq *tdq, int cpu)
944 {
945 struct td_sched *ts;
946
947 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
948 if ((ts = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
949 return (ts);
950 if ((ts = runq_steal_from(&tdq->tdq_timeshare,
951 cpu, tdq->tdq_ridx)) != NULL)
952 return (ts);
953 return (runq_steal(&tdq->tdq_idle, cpu));
954 }
955
956 /*
957 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
958 * current lock and returns with the assigned queue locked.
959 */
960 static inline struct tdq *
961 sched_setcpu(struct td_sched *ts, int cpu, int flags)
962 {
963 struct thread *td;
964 struct tdq *tdq;
965
966 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
967
968 tdq = TDQ_CPU(cpu);
969 td = ts->ts_thread;
970 ts->ts_cpu = cpu;
971
972 /* If the lock matches just return the queue. */
973 if (td->td_lock == TDQ_LOCKPTR(tdq))
974 return (tdq);
975 #ifdef notyet
976 /*
977 * If the thread isn't running its lockptr is a
978 * turnstile or a sleepqueue. We can just lock_set without
979 * blocking.
980 */
981 if (TD_CAN_RUN(td)) {
982 TDQ_LOCK(tdq);
983 thread_lock_set(td, TDQ_LOCKPTR(tdq));
984 return (tdq);
985 }
986 #endif
987 /*
988 * The hard case, migration, we need to block the thread first to
989 * prevent order reversals with other cpus locks.
990 */
991 spinlock_enter();
992 thread_lock_block(td);
993 TDQ_LOCK(tdq);
994 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
995 spinlock_exit();
996 return (tdq);
997 }
998
999 /*
1000 * Find the thread queue running the lowest priority thread.
1001 */
1002 static int
1003 tdq_lowestpri(struct thread *td)
1004 {
1005 struct tdq *tdq;
1006 int lowpri;
1007 int lowcpu;
1008 int lowload;
1009 int load;
1010 int cpu;
1011 int pri;
1012
1013 lowload = 0;
1014 lowpri = lowcpu = 0;
1015 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1016 if (CPU_ABSENT(cpu))
1017 continue;
1018 if (!THREAD_CAN_SCHED(td, cpu))
1019 continue;
1020 tdq = TDQ_CPU(cpu);
1021 pri = tdq->tdq_lowpri;
1022 load = TDQ_CPU(cpu)->tdq_load;
1023 CTR4(KTR_ULE,
1024 "cpu %d pri %d lowcpu %d lowpri %d",
1025 cpu, pri, lowcpu, lowpri);
1026 if (pri < lowpri)
1027 continue;
1028 if (lowpri && lowpri == pri && load > lowload)
1029 continue;
1030 lowpri = pri;
1031 lowcpu = cpu;
1032 lowload = load;
1033 }
1034
1035 return (lowcpu);
1036 }
1037
1038 /*
1039 * Find the thread queue with the least load.
1040 */
1041 static int
1042 tdq_lowestload(struct thread *td)
1043 {
1044 struct tdq *tdq;
1045 int lowload;
1046 int lowpri;
1047 int lowcpu;
1048 int load;
1049 int cpu;
1050 int pri;
1051
1052 lowcpu = 0;
1053 lowload = TDQ_CPU(0)->tdq_load;
1054 lowpri = TDQ_CPU(0)->tdq_lowpri;
1055 for (cpu = 1; cpu <= mp_maxid; cpu++) {
1056 if (CPU_ABSENT(cpu))
1057 continue;
1058 if (!THREAD_CAN_SCHED(td, cpu))
1059 continue;
1060 tdq = TDQ_CPU(cpu);
1061 load = tdq->tdq_load;
1062 pri = tdq->tdq_lowpri;
1063 CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1064 cpu, load, lowcpu, lowload);
1065 if (load > lowload)
1066 continue;
1067 if (load == lowload && pri < lowpri)
1068 continue;
1069 lowcpu = cpu;
1070 lowload = load;
1071 lowpri = pri;
1072 }
1073
1074 return (lowcpu);
1075 }
1076
1077 /*
1078 * Pick the destination cpu for sched_add(). Respects affinity and makes
1079 * a determination based on load or priority of available processors.
1080 */
1081 static int
1082 sched_pickcpu(struct thread *td, int flags)
1083 {
1084 struct tdq *tdq;
1085 struct td_sched *ts;
1086 cpumask_t mask;
1087 int self;
1088 int pri;
1089 int cpu;
1090
1091 self = PCPU_GET(cpuid);
1092 ts = td->td_sched;
1093 if (smp_started == 0)
1094 return (self);
1095 /*
1096 * Don't migrate a running thread from sched_switch().
1097 */
1098 if (flags & SRQ_OURSELF) {
1099 CTR1(KTR_ULE, "YIELDING %d",
1100 curthread->td_priority);
1101 return (self);
1102 }
1103 pri = ts->ts_thread->td_priority;
1104 cpu = ts->ts_cpu;
1105 if (THREAD_CAN_SCHED(td, cpu)) {
1106 /*
1107 * Regardless of affinity, if the last cpu is idle
1108 * send it there.
1109 */
1110 tdq = TDQ_CPU(cpu);
1111 if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1112 CTR5(KTR_ULE,
1113 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1114 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1115 tdq->tdq_lowpri);
1116 return (ts->ts_cpu);
1117 }
1118 /*
1119 * If we have affinity, try to place it on the cpu we
1120 * last ran on.
1121 */
1122 if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1123 CTR5(KTR_ULE,
1124 "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1125 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1126 tdq->tdq_lowpri);
1127 return (ts->ts_cpu);
1128 }
1129 }
1130
1131 /*
1132 * Look for an idle group.
1133 */
1134 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1135 mask = tdq_idle;
1136 while ((cpu = ffs(mask)) != 0) {
1137 --cpu;
1138 if (THREAD_CAN_SCHED(td, cpu))
1139 return (cpu);
1140 mask &= ~(1 << cpu);
1141 }
1142 /*
1143 * If there are no idle cores see if we can run the thread locally.
1144 * This may improve locality among sleepers and wakers when there
1145 * is shared data.
1146 */
1147 if (tryself && THREAD_CAN_SCHED(td, self) &&
1148 pri < curthread->td_priority) {
1149 CTR1(KTR_ULE, "tryself %d",
1150 curthread->td_priority);
1151 return (self);
1152 }
1153 /*
1154 * Now search for the cpu running the lowest priority thread with
1155 * the least load.
1156 */
1157 if (pick_pri)
1158 cpu = tdq_lowestpri(td);
1159 else
1160 cpu = tdq_lowestload(td);
1161 return (cpu);
1162 }
1163
1164 #endif /* SMP */
1165
1166 /*
1167 * Pick the highest priority task we have and return it.
1168 */
1169 static struct td_sched *
1170 tdq_choose(struct tdq *tdq)
1171 {
1172 struct td_sched *ts;
1173
1174 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1175 ts = runq_choose(&tdq->tdq_realtime);
1176 if (ts != NULL)
1177 return (ts);
1178 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1179 if (ts != NULL) {
1180 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1181 ("tdq_choose: Invalid priority on timeshare queue %d",
1182 ts->ts_thread->td_priority));
1183 return (ts);
1184 }
1185
1186 ts = runq_choose(&tdq->tdq_idle);
1187 if (ts != NULL) {
1188 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1189 ("tdq_choose: Invalid priority on idle queue %d",
1190 ts->ts_thread->td_priority));
1191 return (ts);
1192 }
1193
1194 return (NULL);
1195 }
1196
1197 /*
1198 * Initialize a thread queue.
1199 */
1200 static void
1201 tdq_setup(struct tdq *tdq)
1202 {
1203
1204 if (bootverbose)
1205 printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1206 runq_init(&tdq->tdq_realtime);
1207 runq_init(&tdq->tdq_timeshare);
1208 runq_init(&tdq->tdq_idle);
1209 tdq->tdq_load = 0;
1210 }
1211
1212 #ifdef SMP
1213 static void
1214 tdg_setup(struct tdq_group *tdg)
1215 {
1216 if (bootverbose)
1217 printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1218 snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1219 "sched lock %d", (int)TDG_ID(tdg));
1220 mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1221 MTX_SPIN | MTX_RECURSE);
1222 LIST_INIT(&tdg->tdg_members);
1223 tdg->tdg_load = 0;
1224 tdg->tdg_transferable = 0;
1225 tdg->tdg_cpus = 0;
1226 tdg->tdg_mask = 0;
1227 tdg->tdg_cpumask = 0;
1228 tdg->tdg_idlemask = 0;
1229 }
1230
1231 static void
1232 tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1233 {
1234 if (tdg->tdg_mask == 0)
1235 tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1236 tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1237 tdg->tdg_cpus++;
1238 tdq->tdq_group = tdg;
1239 tdq->tdq_lock = &tdg->tdg_lock;
1240 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1241 if (bootverbose)
1242 printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1243 TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1244 }
1245
1246 static void
1247 sched_setup_topology(void)
1248 {
1249 struct tdq_group *tdg;
1250 struct cpu_group *cg;
1251 int balance_groups;
1252 struct tdq *tdq;
1253 int i;
1254 int j;
1255
1256 topology = 1;
1257 balance_groups = 0;
1258 for (i = 0; i < smp_topology->ct_count; i++) {
1259 cg = &smp_topology->ct_group[i];
1260 tdg = &tdq_groups[i];
1261 /*
1262 * Initialize the group.
1263 */
1264 tdg_setup(tdg);
1265 /*
1266 * Find all of the group members and add them.
1267 */
1268 for (j = 0; j < MAXCPU; j++) {
1269 if ((cg->cg_mask & (1 << j)) != 0) {
1270 tdq = TDQ_CPU(j);
1271 tdq_setup(tdq);
1272 tdg_add(tdg, tdq);
1273 }
1274 }
1275 if (tdg->tdg_cpus > 1)
1276 balance_groups = 1;
1277 }
1278 tdg_maxid = smp_topology->ct_count - 1;
1279 if (balance_groups)
1280 sched_balance_groups();
1281 }
1282
1283 static void
1284 sched_setup_smp(void)
1285 {
1286 struct tdq_group *tdg;
1287 struct tdq *tdq;
1288 int cpus;
1289 int i;
1290
1291 for (cpus = 0, i = 0; i < MAXCPU; i++) {
1292 if (CPU_ABSENT(i))
1293 continue;
1294 tdq = &tdq_cpu[i];
1295 tdg = &tdq_groups[i];
1296 /*
1297 * Setup a tdq group with one member.
1298 */
1299 tdg_setup(tdg);
1300 tdq_setup(tdq);
1301 tdg_add(tdg, tdq);
1302 cpus++;
1303 }
1304 tdg_maxid = cpus - 1;
1305 }
1306
1307 /*
1308 * Fake a topology with one group containing all CPUs.
1309 */
1310 static void
1311 sched_fake_topo(void)
1312 {
1313 #ifdef SCHED_FAKE_TOPOLOGY
1314 static struct cpu_top top;
1315 static struct cpu_group group;
1316
1317 top.ct_count = 1;
1318 top.ct_group = &group;
1319 group.cg_mask = all_cpus;
1320 group.cg_count = mp_ncpus;
1321 group.cg_children = 0;
1322 smp_topology = ⊤
1323 #endif
1324 }
1325 #endif
1326
1327 /*
1328 * Setup the thread queues and initialize the topology based on MD
1329 * information.
1330 */
1331 static void
1332 sched_setup(void *dummy)
1333 {
1334 struct tdq *tdq;
1335
1336 tdq = TDQ_SELF();
1337 #ifdef SMP
1338 sched_fake_topo();
1339 /*
1340 * Setup tdqs based on a topology configuration or vanilla SMP based
1341 * on mp_maxid.
1342 */
1343 if (smp_topology == NULL)
1344 sched_setup_smp();
1345 else
1346 sched_setup_topology();
1347 balance_tdq = tdq;
1348 sched_balance();
1349 #else
1350 tdq_setup(tdq);
1351 mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1352 tdq->tdq_lock = &tdq_lock;
1353 #endif
1354 /*
1355 * To avoid divide-by-zero, we set realstathz a dummy value
1356 * in case which sched_clock() called before sched_initticks().
1357 */
1358 realstathz = hz;
1359 sched_slice = (realstathz/10); /* ~100ms */
1360 tickincr = 1 << SCHED_TICK_SHIFT;
1361
1362 /* Add thread0's load since it's running. */
1363 TDQ_LOCK(tdq);
1364 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1365 tdq_load_add(tdq, &td_sched0);
1366 TDQ_UNLOCK(tdq);
1367 }
1368
1369 /*
1370 * This routine determines the tickincr after stathz and hz are setup.
1371 */
1372 /* ARGSUSED */
1373 static void
1374 sched_initticks(void *dummy)
1375 {
1376 int incr;
1377
1378 realstathz = stathz ? stathz : hz;
1379 sched_slice = (realstathz/10); /* ~100ms */
1380
1381 /*
1382 * tickincr is shifted out by 10 to avoid rounding errors due to
1383 * hz not being evenly divisible by stathz on all platforms.
1384 */
1385 incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1386 /*
1387 * This does not work for values of stathz that are more than
1388 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
1389 */
1390 if (incr == 0)
1391 incr = 1;
1392 tickincr = incr;
1393 #ifdef SMP
1394 /*
1395 * Set the default balance interval now that we know
1396 * what realstathz is.
1397 */
1398 balance_interval = realstathz;
1399 /*
1400 * Set steal thresh to roughly log2(mp_ncpu) but no greater than 4.
1401 * This prevents excess thrashing on large machines and excess idle
1402 * on smaller machines.
1403 */
1404 steal_thresh = min(fls(mp_ncpus) - 1, 3);
1405 affinity = SCHED_AFFINITY_DEFAULT;
1406 #endif
1407 }
1408
1409
1410 /*
1411 * This is the core of the interactivity algorithm. Determines a score based
1412 * on past behavior. It is the ratio of sleep time to run time scaled to
1413 * a [0, 100] integer. This is the voluntary sleep time of a process, which
1414 * differs from the cpu usage because it does not account for time spent
1415 * waiting on a run-queue. Would be prettier if we had floating point.
1416 */
1417 static int
1418 sched_interact_score(struct thread *td)
1419 {
1420 struct td_sched *ts;
1421 int div;
1422
1423 ts = td->td_sched;
1424 /*
1425 * The score is only needed if this is likely to be an interactive
1426 * task. Don't go through the expense of computing it if there's
1427 * no chance.
1428 */
1429 if (sched_interact <= SCHED_INTERACT_HALF &&
1430 ts->ts_runtime >= ts->ts_slptime)
1431 return (SCHED_INTERACT_HALF);
1432
1433 if (ts->ts_runtime > ts->ts_slptime) {
1434 div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1435 return (SCHED_INTERACT_HALF +
1436 (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1437 }
1438 if (ts->ts_slptime > ts->ts_runtime) {
1439 div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1440 return (ts->ts_runtime / div);
1441 }
1442 /* runtime == slptime */
1443 if (ts->ts_runtime)
1444 return (SCHED_INTERACT_HALF);
1445
1446 /*
1447 * This can happen if slptime and runtime are 0.
1448 */
1449 return (0);
1450
1451 }
1452
1453 /*
1454 * Scale the scheduling priority according to the "interactivity" of this
1455 * process.
1456 */
1457 static void
1458 sched_priority(struct thread *td)
1459 {
1460 int score;
1461 int pri;
1462
1463 if (td->td_pri_class != PRI_TIMESHARE)
1464 return;
1465 /*
1466 * If the score is interactive we place the thread in the realtime
1467 * queue with a priority that is less than kernel and interrupt
1468 * priorities. These threads are not subject to nice restrictions.
1469 *
1470 * Scores greater than this are placed on the normal timeshare queue
1471 * where the priority is partially decided by the most recent cpu
1472 * utilization and the rest is decided by nice value.
1473 *
1474 * The nice value of the process has a linear effect on the calculated
1475 * score. Negative nice values make it easier for a thread to be
1476 * considered interactive.
1477 */
1478 score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1479 if (score < sched_interact) {
1480 pri = PRI_MIN_REALTIME;
1481 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1482 * score;
1483 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1484 ("sched_priority: invalid interactive priority %d score %d",
1485 pri, score));
1486 } else {
1487 pri = SCHED_PRI_MIN;
1488 if (td->td_sched->ts_ticks)
1489 pri += SCHED_PRI_TICKS(td->td_sched);
1490 pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1491 KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1492 ("sched_priority: invalid priority %d: nice %d, "
1493 "ticks %d ftick %d ltick %d tick pri %d",
1494 pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1495 td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1496 SCHED_PRI_TICKS(td->td_sched)));
1497 }
1498 sched_user_prio(td, pri);
1499
1500 return;
1501 }
1502
1503 /*
1504 * This routine enforces a maximum limit on the amount of scheduling history
1505 * kept. It is called after either the slptime or runtime is adjusted. This
1506 * function is ugly due to integer math.
1507 */
1508 static void
1509 sched_interact_update(struct thread *td)
1510 {
1511 struct td_sched *ts;
1512 u_int sum;
1513
1514 ts = td->td_sched;
1515 sum = ts->ts_runtime + ts->ts_slptime;
1516 if (sum < SCHED_SLP_RUN_MAX)
1517 return;
1518 /*
1519 * This only happens from two places:
1520 * 1) We have added an unusual amount of run time from fork_exit.
1521 * 2) We have added an unusual amount of sleep time from sched_sleep().
1522 */
1523 if (sum > SCHED_SLP_RUN_MAX * 2) {
1524 if (ts->ts_runtime > ts->ts_slptime) {
1525 ts->ts_runtime = SCHED_SLP_RUN_MAX;
1526 ts->ts_slptime = 1;
1527 } else {
1528 ts->ts_slptime = SCHED_SLP_RUN_MAX;
1529 ts->ts_runtime = 1;
1530 }
1531 return;
1532 }
1533 /*
1534 * If we have exceeded by more than 1/5th then the algorithm below
1535 * will not bring us back into range. Dividing by two here forces
1536 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1537 */
1538 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1539 ts->ts_runtime /= 2;
1540 ts->ts_slptime /= 2;
1541 return;
1542 }
1543 ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1544 ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1545 }
1546
1547 /*
1548 * Scale back the interactivity history when a child thread is created. The
1549 * history is inherited from the parent but the thread may behave totally
1550 * differently. For example, a shell spawning a compiler process. We want
1551 * to learn that the compiler is behaving badly very quickly.
1552 */
1553 static void
1554 sched_interact_fork(struct thread *td)
1555 {
1556 int ratio;
1557 int sum;
1558
1559 sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1560 if (sum > SCHED_SLP_RUN_FORK) {
1561 ratio = sum / SCHED_SLP_RUN_FORK;
1562 td->td_sched->ts_runtime /= ratio;
1563 td->td_sched->ts_slptime /= ratio;
1564 }
1565 }
1566
1567 /*
1568 * Called from proc0_init() to setup the scheduler fields.
1569 */
1570 void
1571 schedinit(void)
1572 {
1573
1574 /*
1575 * Set up the scheduler specific parts of proc0.
1576 */
1577 proc0.p_sched = NULL; /* XXX */
1578 thread0.td_sched = &td_sched0;
1579 td_sched0.ts_ltick = ticks;
1580 td_sched0.ts_ftick = ticks;
1581 td_sched0.ts_thread = &thread0;
1582 }
1583
1584 /*
1585 * This is only somewhat accurate since given many processes of the same
1586 * priority they will switch when their slices run out, which will be
1587 * at most sched_slice stathz ticks.
1588 */
1589 int
1590 sched_rr_interval(void)
1591 {
1592
1593 /* Convert sched_slice to hz */
1594 return (hz/(realstathz/sched_slice));
1595 }
1596
1597 /*
1598 * Update the percent cpu tracking information when it is requested or
1599 * the total history exceeds the maximum. We keep a sliding history of
1600 * tick counts that slowly decays. This is less precise than the 4BSD
1601 * mechanism since it happens with less regular and frequent events.
1602 */
1603 static void
1604 sched_pctcpu_update(struct td_sched *ts)
1605 {
1606
1607 if (ts->ts_ticks == 0)
1608 return;
1609 if (ticks - (hz / 10) < ts->ts_ltick &&
1610 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1611 return;
1612 /*
1613 * Adjust counters and watermark for pctcpu calc.
1614 */
1615 if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1616 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1617 SCHED_TICK_TARG;
1618 else
1619 ts->ts_ticks = 0;
1620 ts->ts_ltick = ticks;
1621 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1622 }
1623
1624 /*
1625 * Adjust the priority of a thread. Move it to the appropriate run-queue
1626 * if necessary. This is the back-end for several priority related
1627 * functions.
1628 */
1629 static void
1630 sched_thread_priority(struct thread *td, u_char prio)
1631 {
1632 struct td_sched *ts;
1633
1634 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1635 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1636 curthread->td_proc->p_comm);
1637 ts = td->td_sched;
1638 THREAD_LOCK_ASSERT(td, MA_OWNED);
1639 if (td->td_priority == prio)
1640 return;
1641
1642 if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1643 /*
1644 * If the priority has been elevated due to priority
1645 * propagation, we may have to move ourselves to a new
1646 * queue. This could be optimized to not re-add in some
1647 * cases.
1648 */
1649 sched_rem(td);
1650 td->td_priority = prio;
1651 sched_add(td, SRQ_BORROWING);
1652 } else {
1653 #ifdef SMP
1654 struct tdq *tdq;
1655
1656 tdq = TDQ_CPU(ts->ts_cpu);
1657 if (prio < tdq->tdq_lowpri)
1658 tdq->tdq_lowpri = prio;
1659 #endif
1660 td->td_priority = prio;
1661 }
1662 }
1663
1664 /*
1665 * Update a thread's priority when it is lent another thread's
1666 * priority.
1667 */
1668 void
1669 sched_lend_prio(struct thread *td, u_char prio)
1670 {
1671
1672 td->td_flags |= TDF_BORROWING;
1673 sched_thread_priority(td, prio);
1674 }
1675
1676 /*
1677 * Restore a thread's priority when priority propagation is
1678 * over. The prio argument is the minimum priority the thread
1679 * needs to have to satisfy other possible priority lending
1680 * requests. If the thread's regular priority is less
1681 * important than prio, the thread will keep a priority boost
1682 * of prio.
1683 */
1684 void
1685 sched_unlend_prio(struct thread *td, u_char prio)
1686 {
1687 u_char base_pri;
1688
1689 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1690 td->td_base_pri <= PRI_MAX_TIMESHARE)
1691 base_pri = td->td_user_pri;
1692 else
1693 base_pri = td->td_base_pri;
1694 if (prio >= base_pri) {
1695 td->td_flags &= ~TDF_BORROWING;
1696 sched_thread_priority(td, base_pri);
1697 } else
1698 sched_lend_prio(td, prio);
1699 }
1700
1701 /*
1702 * Standard entry for setting the priority to an absolute value.
1703 */
1704 void
1705 sched_prio(struct thread *td, u_char prio)
1706 {
1707 u_char oldprio;
1708
1709 /* First, update the base priority. */
1710 td->td_base_pri = prio;
1711
1712 /*
1713 * If the thread is borrowing another thread's priority, don't
1714 * ever lower the priority.
1715 */
1716 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1717 return;
1718
1719 /* Change the real priority. */
1720 oldprio = td->td_priority;
1721 sched_thread_priority(td, prio);
1722
1723 /*
1724 * If the thread is on a turnstile, then let the turnstile update
1725 * its state.
1726 */
1727 if (TD_ON_LOCK(td) && oldprio != prio)
1728 turnstile_adjust(td, oldprio);
1729 }
1730
1731 /*
1732 * Set the base user priority, does not effect current running priority.
1733 */
1734 void
1735 sched_user_prio(struct thread *td, u_char prio)
1736 {
1737 u_char oldprio;
1738
1739 td->td_base_user_pri = prio;
1740 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1741 return;
1742 oldprio = td->td_user_pri;
1743 td->td_user_pri = prio;
1744 }
1745
1746 void
1747 sched_lend_user_prio(struct thread *td, u_char prio)
1748 {
1749 u_char oldprio;
1750
1751 THREAD_LOCK_ASSERT(td, MA_OWNED);
1752 td->td_flags |= TDF_UBORROWING;
1753 oldprio = td->td_user_pri;
1754 td->td_user_pri = prio;
1755 }
1756
1757 void
1758 sched_unlend_user_prio(struct thread *td, u_char prio)
1759 {
1760 u_char base_pri;
1761
1762 THREAD_LOCK_ASSERT(td, MA_OWNED);
1763 base_pri = td->td_base_user_pri;
1764 if (prio >= base_pri) {
1765 td->td_flags &= ~TDF_UBORROWING;
1766 sched_user_prio(td, base_pri);
1767 } else {
1768 sched_lend_user_prio(td, prio);
1769 }
1770 }
1771
1772 /*
1773 * Add the thread passed as 'newtd' to the run queue before selecting
1774 * the next thread to run. This is only used for KSE.
1775 */
1776 static void
1777 sched_switchin(struct tdq *tdq, struct thread *td)
1778 {
1779 #ifdef SMP
1780 spinlock_enter();
1781 TDQ_UNLOCK(tdq);
1782 thread_lock(td);
1783 spinlock_exit();
1784 sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1785 #else
1786 td->td_lock = TDQ_LOCKPTR(tdq);
1787 #endif
1788 tdq_add(tdq, td, SRQ_YIELDING);
1789 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1790 }
1791
1792 /*
1793 * Handle migration from sched_switch(). This happens only for
1794 * cpu binding.
1795 */
1796 static struct mtx *
1797 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1798 {
1799 struct tdq *tdn;
1800
1801 tdn = TDQ_CPU(td->td_sched->ts_cpu);
1802 #ifdef SMP
1803 /*
1804 * Do the lock dance required to avoid LOR. We grab an extra
1805 * spinlock nesting to prevent preemption while we're
1806 * not holding either run-queue lock.
1807 */
1808 spinlock_enter();
1809 thread_lock_block(td); /* This releases the lock on tdq. */
1810
1811 /*
1812 * Acquire both run-queue locks before placing the thread on the new
1813 * run-queue to avoid deadlocks created by placing a thread with a
1814 * blocked lock on the run-queue of a remote processor. The deadlock
1815 * occurs when a third processor attempts to lock the two queues in
1816 * question while the target processor is spinning with its own
1817 * run-queue lock held while waiting for the blocked lock to clear.
1818 */
1819 if (TDQ_LOCKPTR(tdn) == TDQ_LOCKPTR(tdq)) {
1820 TDQ_LOCK(tdq);
1821 tdq_add(tdn, td, flags);
1822 tdq_notify(td->td_sched);
1823 } else {
1824 tdq_lock_pair(tdn, tdq);
1825 tdq_add(tdn, td, flags);
1826 tdq_notify(td->td_sched);
1827 TDQ_UNLOCK(tdn);
1828 }
1829 spinlock_exit();
1830 #endif
1831 return (TDQ_LOCKPTR(tdn));
1832 }
1833
1834 /*
1835 * Variadic version of thread_lock_unblock() that does not assume td_lock
1836 * is blocked.
1837 */
1838 static inline void
1839 thread_unblock_switch(struct thread *td, struct mtx *mtx)
1840 {
1841 atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1842 (uintptr_t)mtx);
1843 }
1844
1845 /*
1846 * Switch threads. This function has to handle threads coming in while
1847 * blocked for some reason, running, or idle. It also must deal with
1848 * migrating a thread from one queue to another as running threads may
1849 * be assigned elsewhere via binding.
1850 */
1851 void
1852 sched_switch(struct thread *td, struct thread *newtd, int flags)
1853 {
1854 struct tdq *tdq;
1855 struct td_sched *ts;
1856 struct mtx *mtx;
1857 int srqflag;
1858 int cpuid;
1859
1860 THREAD_LOCK_ASSERT(td, MA_OWNED);
1861
1862 cpuid = PCPU_GET(cpuid);
1863 tdq = TDQ_CPU(cpuid);
1864 ts = td->td_sched;
1865 mtx = td->td_lock;
1866 #ifdef SMP
1867 ts->ts_rltick = ticks;
1868 if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1869 tdq->tdq_lowpri = newtd->td_priority;
1870 #endif
1871 td->td_lastcpu = td->td_oncpu;
1872 td->td_oncpu = NOCPU;
1873 td->td_flags &= ~TDF_NEEDRESCHED;
1874 td->td_owepreempt = 0;
1875 /*
1876 * The lock pointer in an idle thread should never change. Reset it
1877 * to CAN_RUN as well.
1878 */
1879 if (TD_IS_IDLETHREAD(td)) {
1880 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1881 TD_SET_CAN_RUN(td);
1882 } else if (TD_IS_RUNNING(td)) {
1883 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1884 tdq_load_rem(tdq, ts);
1885 srqflag = (flags & SW_PREEMPT) ?
1886 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1887 SRQ_OURSELF|SRQ_YIELDING;
1888 #ifdef SMP
1889 if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
1890 ts->ts_cpu = sched_pickcpu(td, 0);
1891 #endif
1892 if (ts->ts_cpu == cpuid)
1893 tdq_add(tdq, td, srqflag);
1894 else {
1895 KASSERT(THREAD_CAN_MIGRATE(td) ||
1896 (ts->ts_flags & TSF_BOUND) != 0,
1897 ("Thread %p shouldn't migrate", td));
1898 mtx = sched_switch_migrate(tdq, td, srqflag);
1899 }
1900 } else {
1901 /* This thread must be going to sleep. */
1902 TDQ_LOCK(tdq);
1903 mtx = thread_lock_block(td);
1904 tdq_load_rem(tdq, ts);
1905 }
1906 /*
1907 * We enter here with the thread blocked and assigned to the
1908 * appropriate cpu run-queue or sleep-queue and with the current
1909 * thread-queue locked.
1910 */
1911 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1912 /*
1913 * If KSE assigned a new thread just add it here and let choosethread
1914 * select the best one.
1915 */
1916 if (newtd != NULL)
1917 sched_switchin(tdq, newtd);
1918 newtd = choosethread();
1919 /*
1920 * Call the MD code to switch contexts if necessary.
1921 */
1922 if (td != newtd) {
1923 #ifdef HWPMC_HOOKS
1924 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1925 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1926 #endif
1927 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1928
1929 #ifdef KDTRACE_HOOKS
1930 /*
1931 * If DTrace has set the active vtime enum to anything
1932 * other than INACTIVE (0), then it should have set the
1933 * function to call.
1934 */
1935 if (dtrace_vtime_active)
1936 (*dtrace_vtime_switch_func)(newtd);
1937 #endif
1938 cpu_switch(td, newtd, mtx);
1939 /*
1940 * We may return from cpu_switch on a different cpu. However,
1941 * we always return with td_lock pointing to the current cpu's
1942 * run queue lock.
1943 */
1944 cpuid = PCPU_GET(cpuid);
1945 tdq = TDQ_CPU(cpuid);
1946 #ifdef HWPMC_HOOKS
1947 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1948 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1949 #endif
1950 } else
1951 thread_unblock_switch(td, mtx);
1952 /*
1953 * Assert that all went well and return.
1954 */
1955 #ifdef SMP
1956 /* We should always get here with the lowest priority td possible */
1957 tdq->tdq_lowpri = td->td_priority;
1958 #endif
1959 TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1960 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1961 td->td_oncpu = cpuid;
1962 }
1963
1964 /*
1965 * Adjust thread priorities as a result of a nice request.
1966 */
1967 void
1968 sched_nice(struct proc *p, int nice)
1969 {
1970 struct thread *td;
1971
1972 PROC_LOCK_ASSERT(p, MA_OWNED);
1973 PROC_SLOCK_ASSERT(p, MA_OWNED);
1974
1975 p->p_nice = nice;
1976 FOREACH_THREAD_IN_PROC(p, td) {
1977 thread_lock(td);
1978 sched_priority(td);
1979 sched_prio(td, td->td_base_user_pri);
1980 thread_unlock(td);
1981 }
1982 }
1983
1984 /*
1985 * Record the sleep time for the interactivity scorer.
1986 */
1987 void
1988 sched_sleep(struct thread *td)
1989 {
1990
1991 THREAD_LOCK_ASSERT(td, MA_OWNED);
1992
1993 td->td_slptick = ticks;
1994 }
1995
1996 /*
1997 * Schedule a thread to resume execution and record how long it voluntarily
1998 * slept. We also update the pctcpu, interactivity, and priority.
1999 */
2000 void
2001 sched_wakeup(struct thread *td)
2002 {
2003 struct td_sched *ts;
2004 int slptick;
2005
2006 THREAD_LOCK_ASSERT(td, MA_OWNED);
2007 ts = td->td_sched;
2008 /*
2009 * If we slept for more than a tick update our interactivity and
2010 * priority.
2011 */
2012 slptick = td->td_slptick;
2013 td->td_slptick = 0;
2014 if (slptick && slptick != ticks) {
2015 u_int hzticks;
2016
2017 hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
2018 ts->ts_slptime += hzticks;
2019 sched_interact_update(td);
2020 sched_pctcpu_update(ts);
2021 sched_priority(td);
2022 }
2023 /* Reset the slice value after we sleep. */
2024 ts->ts_slice = sched_slice;
2025 sched_add(td, SRQ_BORING);
2026 }
2027
2028 /*
2029 * Penalize the parent for creating a new child and initialize the child's
2030 * priority.
2031 */
2032 void
2033 sched_fork(struct thread *td, struct thread *child)
2034 {
2035 THREAD_LOCK_ASSERT(td, MA_OWNED);
2036 sched_fork_thread(td, child);
2037 /*
2038 * Penalize the parent and child for forking.
2039 */
2040 sched_interact_fork(child);
2041 sched_priority(child);
2042 td->td_sched->ts_runtime += tickincr;
2043 sched_interact_update(td);
2044 sched_priority(td);
2045 }
2046
2047 /*
2048 * Fork a new thread, may be within the same process.
2049 */
2050 void
2051 sched_fork_thread(struct thread *td, struct thread *child)
2052 {
2053 struct td_sched *ts;
2054 struct td_sched *ts2;
2055
2056 /*
2057 * Initialize child.
2058 */
2059 THREAD_LOCK_ASSERT(td, MA_OWNED);
2060 sched_newthread(child);
2061 child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
2062 child->td_cpuset = cpuset_ref(td->td_cpuset);
2063 ts = td->td_sched;
2064 ts2 = child->td_sched;
2065 ts2->ts_cpu = ts->ts_cpu;
2066 ts2->ts_runq = NULL;
2067 /*
2068 * Grab our parents cpu estimation information and priority.
2069 */
2070 ts2->ts_ticks = ts->ts_ticks;
2071 ts2->ts_ltick = ts->ts_ltick;
2072 ts2->ts_incrtick = ts->ts_incrtick;
2073 ts2->ts_ftick = ts->ts_ftick;
2074 child->td_user_pri = td->td_user_pri;
2075 child->td_base_user_pri = td->td_base_user_pri;
2076 /*
2077 * And update interactivity score.
2078 */
2079 ts2->ts_slptime = ts->ts_slptime;
2080 ts2->ts_runtime = ts->ts_runtime;
2081 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
2082 }
2083
2084 /*
2085 * Adjust the priority class of a thread.
2086 */
2087 void
2088 sched_class(struct thread *td, int class)
2089 {
2090
2091 THREAD_LOCK_ASSERT(td, MA_OWNED);
2092 if (td->td_pri_class == class)
2093 return;
2094
2095 #ifdef SMP
2096 /*
2097 * On SMP if we're on the RUNQ we must adjust the transferable
2098 * count because could be changing to or from an interrupt
2099 * class.
2100 */
2101 if (TD_ON_RUNQ(td)) {
2102 struct tdq *tdq;
2103
2104 tdq = TDQ_CPU(td->td_sched->ts_cpu);
2105 if (THREAD_CAN_MIGRATE(td)) {
2106 tdq->tdq_transferable--;
2107 tdq->tdq_group->tdg_transferable--;
2108 }
2109 td->td_pri_class = class;
2110 if (THREAD_CAN_MIGRATE(td)) {
2111 tdq->tdq_transferable++;
2112 tdq->tdq_group->tdg_transferable++;
2113 }
2114 }
2115 #endif
2116 td->td_pri_class = class;
2117 }
2118
2119 /*
2120 * Return some of the child's priority and interactivity to the parent.
2121 */
2122 void
2123 sched_exit(struct proc *p, struct thread *child)
2124 {
2125 struct thread *td;
2126
2127 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2128 child, child->td_proc->p_comm, child->td_priority);
2129
2130 PROC_SLOCK_ASSERT(p, MA_OWNED);
2131 td = FIRST_THREAD_IN_PROC(p);
2132 sched_exit_thread(td, child);
2133 }
2134
2135 /*
2136 * Penalize another thread for the time spent on this one. This helps to
2137 * worsen the priority and interactivity of processes which schedule batch
2138 * jobs such as make. This has little effect on the make process itself but
2139 * causes new processes spawned by it to receive worse scores immediately.
2140 */
2141 void
2142 sched_exit_thread(struct thread *td, struct thread *child)
2143 {
2144
2145 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2146 child, child->td_proc->p_comm, child->td_priority);
2147
2148 #ifdef KSE
2149 /*
2150 * KSE forks and exits so often that this penalty causes short-lived
2151 * threads to always be non-interactive. This causes mozilla to
2152 * crawl under load.
2153 */
2154 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2155 return;
2156 #endif
2157 /*
2158 * Give the child's runtime to the parent without returning the
2159 * sleep time as a penalty to the parent. This causes shells that
2160 * launch expensive things to mark their children as expensive.
2161 */
2162 thread_lock(td);
2163 td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2164 sched_interact_update(td);
2165 sched_priority(td);
2166 thread_unlock(td);
2167 }
2168
2169 /*
2170 * Fix priorities on return to user-space. Priorities may be elevated due
2171 * to static priorities in msleep() or similar.
2172 */
2173 void
2174 sched_userret(struct thread *td)
2175 {
2176 /*
2177 * XXX we cheat slightly on the locking here to avoid locking in
2178 * the usual case. Setting td_priority here is essentially an
2179 * incomplete workaround for not setting it properly elsewhere.
2180 * Now that some interrupt handlers are threads, not setting it
2181 * properly elsewhere can clobber it in the window between setting
2182 * it here and returning to user mode, so don't waste time setting
2183 * it perfectly here.
2184 */
2185 KASSERT((td->td_flags & TDF_BORROWING) == 0,
2186 ("thread with borrowed priority returning to userland"));
2187 if (td->td_priority != td->td_user_pri) {
2188 thread_lock(td);
2189 td->td_priority = td->td_user_pri;
2190 td->td_base_pri = td->td_user_pri;
2191 thread_unlock(td);
2192 }
2193 }
2194
2195 /*
2196 * Handle a stathz tick. This is really only relevant for timeshare
2197 * threads.
2198 */
2199 void
2200 sched_clock(struct thread *td)
2201 {
2202 struct tdq *tdq;
2203 struct td_sched *ts;
2204
2205 THREAD_LOCK_ASSERT(td, MA_OWNED);
2206 tdq = TDQ_SELF();
2207 #ifdef SMP
2208 /*
2209 * We run the long term load balancer infrequently on the first cpu.
2210 */
2211 if (balance_tdq == tdq) {
2212 if (balance_ticks && --balance_ticks == 0)
2213 sched_balance();
2214 if (balance_group_ticks && --balance_group_ticks == 0)
2215 sched_balance_groups();
2216 }
2217 #endif
2218 /*
2219 * Advance the insert index once for each tick to ensure that all
2220 * threads get a chance to run.
2221 */
2222 if (tdq->tdq_idx == tdq->tdq_ridx) {
2223 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2224 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2225 tdq->tdq_ridx = tdq->tdq_idx;
2226 }
2227 ts = td->td_sched;
2228 if (td->td_pri_class & PRI_FIFO_BIT)
2229 return;
2230 if (td->td_pri_class == PRI_TIMESHARE) {
2231 /*
2232 * We used a tick; charge it to the thread so
2233 * that we can compute our interactivity.
2234 */
2235 td->td_sched->ts_runtime += tickincr;
2236 sched_interact_update(td);
2237 }
2238 /*
2239 * We used up one time slice.
2240 */
2241 if (--ts->ts_slice > 0)
2242 return;
2243 /*
2244 * We're out of time, recompute priorities and requeue.
2245 */
2246 sched_priority(td);
2247 td->td_flags |= TDF_NEEDRESCHED;
2248 }
2249
2250 /*
2251 * Called once per hz tick. Used for cpu utilization information. This
2252 * is easier than trying to scale based on stathz.
2253 */
2254 void
2255 sched_tick(void)
2256 {
2257 struct td_sched *ts;
2258
2259 ts = curthread->td_sched;
2260 /*
2261 * Ticks is updated asynchronously on a single cpu. Check here to
2262 * avoid incrementing ts_ticks multiple times in a single tick.
2263 */
2264 if (ts->ts_incrtick == ticks)
2265 return;
2266 /* Adjust ticks for pctcpu */
2267 ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2268 ts->ts_incrtick = ticks;
2269 ts->ts_ltick = ticks;
2270 /*
2271 * Update if we've exceeded our desired tick threshhold by over one
2272 * second.
2273 */
2274 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2275 sched_pctcpu_update(ts);
2276 }
2277
2278 /*
2279 * Return whether the current CPU has runnable tasks. Used for in-kernel
2280 * cooperative idle threads.
2281 */
2282 int
2283 sched_runnable(void)
2284 {
2285 struct tdq *tdq;
2286 int load;
2287
2288 load = 1;
2289
2290 tdq = TDQ_SELF();
2291 if ((curthread->td_flags & TDF_IDLETD) != 0) {
2292 if (tdq->tdq_load > 0)
2293 goto out;
2294 } else
2295 if (tdq->tdq_load - 1 > 0)
2296 goto out;
2297 load = 0;
2298 out:
2299 return (load);
2300 }
2301
2302 /*
2303 * Choose the highest priority thread to run. The thread is removed from
2304 * the run-queue while running however the load remains. For SMP we set
2305 * the tdq in the global idle bitmask if it idles here.
2306 */
2307 struct thread *
2308 sched_choose(void)
2309 {
2310 #ifdef SMP
2311 struct tdq_group *tdg;
2312 #endif
2313 struct td_sched *ts;
2314 struct tdq *tdq;
2315
2316 tdq = TDQ_SELF();
2317 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2318 ts = tdq_choose(tdq);
2319 if (ts) {
2320 tdq_runq_rem(tdq, ts);
2321 return (ts->ts_thread);
2322 }
2323 #ifdef SMP
2324 /*
2325 * We only set the idled bit when all of the cpus in the group are
2326 * idle. Otherwise we could get into a situation where a thread bounces
2327 * back and forth between two idle cores on seperate physical CPUs.
2328 */
2329 tdg = tdq->tdq_group;
2330 tdg->tdg_idlemask |= PCPU_GET(cpumask);
2331 if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2332 atomic_set_int(&tdq_idle, tdg->tdg_mask);
2333 tdq->tdq_lowpri = PRI_MAX_IDLE;
2334 #endif
2335 return (PCPU_GET(idlethread));
2336 }
2337
2338 /*
2339 * Set owepreempt if necessary. Preemption never happens directly in ULE,
2340 * we always request it once we exit a critical section.
2341 */
2342 static inline void
2343 sched_setpreempt(struct thread *td)
2344 {
2345 struct thread *ctd;
2346 int cpri;
2347 int pri;
2348
2349 ctd = curthread;
2350 pri = td->td_priority;
2351 cpri = ctd->td_priority;
2352 if (td->td_priority < ctd->td_priority)
2353 curthread->td_flags |= TDF_NEEDRESCHED;
2354 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2355 return;
2356 /*
2357 * Always preempt IDLE threads. Otherwise only if the preempting
2358 * thread is an ithread.
2359 */
2360 if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2361 return;
2362 ctd->td_owepreempt = 1;
2363 return;
2364 }
2365
2366 /*
2367 * Add a thread to a thread queue. Initializes priority, slice, runq, and
2368 * add it to the appropriate queue. This is the internal function called
2369 * when the tdq is predetermined.
2370 */
2371 void
2372 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2373 {
2374 struct td_sched *ts;
2375 int class;
2376 #ifdef SMP
2377 int cpumask;
2378 #endif
2379
2380 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2381 KASSERT((td->td_inhibitors == 0),
2382 ("sched_add: trying to run inhibited thread"));
2383 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2384 ("sched_add: bad thread state"));
2385 KASSERT(td->td_flags & TDF_INMEM,
2386 ("sched_add: thread swapped out"));
2387
2388 ts = td->td_sched;
2389 class = PRI_BASE(td->td_pri_class);
2390 TD_SET_RUNQ(td);
2391 if (ts->ts_slice == 0)
2392 ts->ts_slice = sched_slice;
2393 /*
2394 * Pick the run queue based on priority.
2395 */
2396 if (td->td_priority <= PRI_MAX_REALTIME)
2397 ts->ts_runq = &tdq->tdq_realtime;
2398 else if (td->td_priority <= PRI_MAX_TIMESHARE)
2399 ts->ts_runq = &tdq->tdq_timeshare;
2400 else
2401 ts->ts_runq = &tdq->tdq_idle;
2402 #ifdef SMP
2403 cpumask = 1 << ts->ts_cpu;
2404 /*
2405 * If we had been idle, clear our bit in the group and potentially
2406 * the global bitmap.
2407 */
2408 if ((class != PRI_IDLE && class != PRI_ITHD) &&
2409 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2410 /*
2411 * Check to see if our group is unidling, and if so, remove it
2412 * from the global idle mask.
2413 */
2414 if (tdq->tdq_group->tdg_idlemask ==
2415 tdq->tdq_group->tdg_cpumask)
2416 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2417 /*
2418 * Now remove ourselves from the group specific idle mask.
2419 */
2420 tdq->tdq_group->tdg_idlemask &= ~cpumask;
2421 }
2422 if (td->td_priority < tdq->tdq_lowpri)
2423 tdq->tdq_lowpri = td->td_priority;
2424 #endif
2425 tdq_runq_add(tdq, ts, flags);
2426 tdq_load_add(tdq, ts);
2427 }
2428
2429 /*
2430 * Select the target thread queue and add a thread to it. Request
2431 * preemption or IPI a remote processor if required.
2432 */
2433 void
2434 sched_add(struct thread *td, int flags)
2435 {
2436 struct td_sched *ts;
2437 struct tdq *tdq;
2438 #ifdef SMP
2439 int cpuid;
2440 int cpu;
2441 #endif
2442 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2443 td, td->td_proc->p_comm, td->td_priority, curthread,
2444 curthread->td_proc->p_comm);
2445 THREAD_LOCK_ASSERT(td, MA_OWNED);
2446 ts = td->td_sched;
2447 /*
2448 * Recalculate the priority before we select the target cpu or
2449 * run-queue.
2450 */
2451 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2452 sched_priority(td);
2453 #ifdef SMP
2454 cpuid = PCPU_GET(cpuid);
2455 /*
2456 * Pick the destination cpu and if it isn't ours transfer to the
2457 * target cpu.
2458 */
2459 if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td) &&
2460 curthread->td_intr_nesting_level)
2461 ts->ts_cpu = cpuid;
2462 if (!THREAD_CAN_MIGRATE(td))
2463 cpu = ts->ts_cpu;
2464 else
2465 cpu = sched_pickcpu(td, flags);
2466 tdq = sched_setcpu(ts, cpu, flags);
2467 tdq_add(tdq, td, flags);
2468 if (cpu != cpuid) {
2469 tdq_notify(ts);
2470 return;
2471 }
2472 #else
2473 tdq = TDQ_SELF();
2474 TDQ_LOCK(tdq);
2475 /*
2476 * Now that the thread is moving to the run-queue, set the lock
2477 * to the scheduler's lock.
2478 */
2479 thread_lock_set(td, TDQ_LOCKPTR(tdq));
2480 tdq_add(tdq, td, flags);
2481 #endif
2482 if (!(flags & SRQ_YIELDING))
2483 sched_setpreempt(td);
2484 }
2485
2486 /*
2487 * Remove a thread from a run-queue without running it. This is used
2488 * when we're stealing a thread from a remote queue. Otherwise all threads
2489 * exit by calling sched_exit_thread() and sched_throw() themselves.
2490 */
2491 void
2492 sched_rem(struct thread *td)
2493 {
2494 struct tdq *tdq;
2495 struct td_sched *ts;
2496
2497 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2498 td, td->td_proc->p_comm, td->td_priority, curthread,
2499 curthread->td_proc->p_comm);
2500 ts = td->td_sched;
2501 tdq = TDQ_CPU(ts->ts_cpu);
2502 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2503 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2504 KASSERT(TD_ON_RUNQ(td),
2505 ("sched_rem: thread not on run queue"));
2506 tdq_runq_rem(tdq, ts);
2507 tdq_load_rem(tdq, ts);
2508 TD_SET_CAN_RUN(td);
2509 }
2510
2511 /*
2512 * Fetch cpu utilization information. Updates on demand.
2513 */
2514 fixpt_t
2515 sched_pctcpu(struct thread *td)
2516 {
2517 fixpt_t pctcpu;
2518 struct td_sched *ts;
2519
2520 pctcpu = 0;
2521 ts = td->td_sched;
2522 if (ts == NULL)
2523 return (0);
2524
2525 THREAD_LOCK_ASSERT(td, MA_OWNED);
2526 if (ts->ts_ticks) {
2527 int rtick;
2528
2529 sched_pctcpu_update(ts);
2530 /* How many rtick per second ? */
2531 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2532 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2533 }
2534
2535 return (pctcpu);
2536 }
2537
2538 /*
2539 * Enforce affinity settings for a thread. Called after adjustments to
2540 * cpumask.
2541 */
2542 void
2543 sched_affinity(struct thread *td)
2544 {
2545 #ifdef SMP
2546 struct td_sched *ts;
2547
2548 THREAD_LOCK_ASSERT(td, MA_OWNED);
2549 ts = td->td_sched;
2550 if (THREAD_CAN_SCHED(td, ts->ts_cpu))
2551 return;
2552 if (TD_ON_RUNQ(td)) {
2553 sched_rem(td);
2554 sched_add(td, SRQ_BORING);
2555 return;
2556 }
2557 if (!TD_IS_RUNNING(td))
2558 return;
2559 /*
2560 * Force a switch before returning to userspace. If the
2561 * target thread is not running locally send an ipi to force
2562 * the issue.
2563 */
2564 td->td_flags |= TDF_NEEDRESCHED;
2565 if (td != curthread)
2566 ipi_selected(1 << ts->ts_cpu, IPI_PREEMPT);
2567 #endif
2568 }
2569
2570 /*
2571 * Bind a thread to a target cpu.
2572 */
2573 void
2574 sched_bind(struct thread *td, int cpu)
2575 {
2576 struct td_sched *ts;
2577
2578 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2579 KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
2580 ts = td->td_sched;
2581 if (ts->ts_flags & TSF_BOUND)
2582 sched_unbind(td);
2583 KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2584 ts->ts_flags |= TSF_BOUND;
2585 #ifdef SMP
2586 sched_pin();
2587 if (PCPU_GET(cpuid) == cpu)
2588 return;
2589 ts->ts_cpu = cpu;
2590 /* When we return from mi_switch we'll be on the correct cpu. */
2591 mi_switch(SW_VOL, NULL);
2592 #endif
2593 }
2594
2595 /*
2596 * Release a bound thread.
2597 */
2598 void
2599 sched_unbind(struct thread *td)
2600 {
2601 struct td_sched *ts;
2602
2603 THREAD_LOCK_ASSERT(td, MA_OWNED);
2604 KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
2605 ts = td->td_sched;
2606 if ((ts->ts_flags & TSF_BOUND) == 0)
2607 return;
2608 ts->ts_flags &= ~TSF_BOUND;
2609 #ifdef SMP
2610 sched_unpin();
2611 #endif
2612 }
2613
2614 int
2615 sched_is_bound(struct thread *td)
2616 {
2617 THREAD_LOCK_ASSERT(td, MA_OWNED);
2618 return (td->td_sched->ts_flags & TSF_BOUND);
2619 }
2620
2621 /*
2622 * Basic yield call.
2623 */
2624 void
2625 sched_relinquish(struct thread *td)
2626 {
2627 thread_lock(td);
2628 SCHED_STAT_INC(switch_relinquish);
2629 mi_switch(SW_VOL, NULL);
2630 thread_unlock(td);
2631 }
2632
2633 /*
2634 * Return the total system load.
2635 */
2636 int
2637 sched_load(void)
2638 {
2639 #ifdef SMP
2640 int total;
2641 int i;
2642
2643 total = 0;
2644 for (i = 0; i <= tdg_maxid; i++)
2645 total += TDQ_GROUP(i)->tdg_load;
2646 return (total);
2647 #else
2648 return (TDQ_SELF()->tdq_sysload);
2649 #endif
2650 }
2651
2652 int
2653 sched_sizeof_proc(void)
2654 {
2655 return (sizeof(struct proc));
2656 }
2657
2658 int
2659 sched_sizeof_thread(void)
2660 {
2661 return (sizeof(struct thread) + sizeof(struct td_sched));
2662 }
2663
2664 /*
2665 * The actual idle process.
2666 */
2667 void
2668 sched_idletd(void *dummy)
2669 {
2670 struct thread *td;
2671 struct tdq *tdq;
2672
2673 td = curthread;
2674 tdq = TDQ_SELF();
2675 mtx_assert(&Giant, MA_NOTOWNED);
2676 /* ULE relies on preemption for idle interruption. */
2677 for (;;) {
2678 #ifdef SMP
2679 if (tdq_idled(tdq))
2680 cpu_idle();
2681 #else
2682 cpu_idle();
2683 #endif
2684 }
2685 }
2686
2687 /*
2688 * A CPU is entering for the first time or a thread is exiting.
2689 */
2690 void
2691 sched_throw(struct thread *td)
2692 {
2693 struct thread *newtd;
2694 struct tdq *tdq;
2695
2696 tdq = TDQ_SELF();
2697 if (td == NULL) {
2698 /* Correct spinlock nesting and acquire the correct lock. */
2699 TDQ_LOCK(tdq);
2700 spinlock_exit();
2701 } else {
2702 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2703 tdq_load_rem(tdq, td->td_sched);
2704 }
2705 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2706 newtd = choosethread();
2707 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2708 PCPU_SET(switchtime, cpu_ticks());
2709 PCPU_SET(switchticks, ticks);
2710 cpu_throw(td, newtd); /* doesn't return */
2711 }
2712
2713 /*
2714 * This is called from fork_exit(). Just acquire the correct locks and
2715 * let fork do the rest of the work.
2716 */
2717 void
2718 sched_fork_exit(struct thread *td)
2719 {
2720 struct td_sched *ts;
2721 struct tdq *tdq;
2722 int cpuid;
2723
2724 /*
2725 * Finish setting up thread glue so that it begins execution in a
2726 * non-nested critical section with the scheduler lock held.
2727 */
2728 cpuid = PCPU_GET(cpuid);
2729 tdq = TDQ_CPU(cpuid);
2730 ts = td->td_sched;
2731 if (TD_IS_IDLETHREAD(td))
2732 td->td_lock = TDQ_LOCKPTR(tdq);
2733 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2734 td->td_oncpu = cpuid;
2735 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2736 }
2737
2738 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2739 "Scheduler");
2740 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2741 "Scheduler name");
2742 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2743 "Slice size for timeshare threads");
2744 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2745 "Interactivity score threshold");
2746 SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2747 0,"Min priority for preemption, lower priorities have greater precedence");
2748 #ifdef SMP
2749 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2750 "Pick the target cpu based on priority rather than load.");
2751 SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2752 "Number of hz ticks to keep thread affinity for");
2753 SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2754 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2755 "Enables the long-term load balancer");
2756 SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
2757 &balance_interval, 0,
2758 "Average frequency in stathz ticks to run the long-term balancer");
2759 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2760 "Steals work from another hyper-threaded core on idle");
2761 SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2762 "Attempts to steal work from other cores before idling");
2763 SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2764 "Minimum load on remote cpu before we'll steal");
2765 SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2766 "True when a topology has been specified by the MD code.");
2767 #endif
2768
2769 /* ps compat. All cpu percentages from ULE are weighted. */
2770 static int ccpu = 0;
2771 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2772
2773
2774 #define KERN_SWITCH_INCLUDE 1
2775 #include "kern/kern_switch.c"
Cache object: 98828a38be0c81a472e19b41705b2d74
|