FreeBSD/Linux Kernel Cross Reference
sys/kern/sched_ule.c
1 /*-
2 * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
10 * disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * This file implements the ULE scheduler. ULE supports independent CPU
29 * run queues and fine grain locking. It has superior interactive
30 * performance under load even on uni-processor systems.
31 *
32 * etymology:
33 * ULE is the last three letters in schedule. It owes its name to a
34 * generic user created for a scheduling system by Paul Mikesell at
35 * Isilon Systems and a general lack of creativity on the part of the author.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include "opt_hwpmc_hooks.h"
42 #include "opt_sched.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kdb.h>
47 #include <sys/kernel.h>
48 #include <sys/ktr.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/proc.h>
52 #include <sys/resource.h>
53 #include <sys/resourcevar.h>
54 #include <sys/sched.h>
55 #include <sys/smp.h>
56 #include <sys/sx.h>
57 #include <sys/sysctl.h>
58 #include <sys/sysproto.h>
59 #include <sys/turnstile.h>
60 #include <sys/umtx.h>
61 #include <sys/vmmeter.h>
62 #ifdef KTRACE
63 #include <sys/uio.h>
64 #include <sys/ktrace.h>
65 #endif
66
67 #ifdef HWPMC_HOOKS
68 #include <sys/pmckern.h>
69 #endif
70
71 #include <machine/cpu.h>
72 #include <machine/smp.h>
73
74 #if !defined(__i386__) && !defined(__amd64__) && !defined(__arm__)
75 #error "This architecture is not currently compatible with ULE"
76 #endif
77
78 #define KTR_ULE 0
79
80 /*
81 * Thread scheduler specific section. All fields are protected
82 * by the thread lock.
83 */
84 struct td_sched {
85 TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
86 struct thread *ts_thread; /* Active associated thread. */
87 struct runq *ts_runq; /* Run-queue we're queued on. */
88 short ts_flags; /* TSF_* flags. */
89 u_char ts_rqindex; /* Run queue index. */
90 u_char ts_cpu; /* CPU that we have affinity for. */
91 int ts_slice; /* Ticks of slice remaining. */
92 u_int ts_slptime; /* Number of ticks we vol. slept */
93 u_int ts_runtime; /* Number of ticks we were running */
94 /* The following variables are only used for pctcpu calculation */
95 int ts_ltick; /* Last tick that we were running on */
96 int ts_ftick; /* First tick that we were running on */
97 int ts_ticks; /* Tick count */
98 #ifdef SMP
99 int ts_rltick; /* Real last tick, for affinity. */
100 #endif
101 };
102 /* flags kept in ts_flags */
103 #define TSF_BOUND 0x0001 /* Thread can not migrate. */
104 #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
105
106 static struct td_sched td_sched0;
107
108 /*
109 * Cpu percentage computation macros and defines.
110 *
111 * SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
112 * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
113 * SCHED_TICK_MAX: Maximum number of ticks before scaling back.
114 * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
115 * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
116 * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
117 */
118 #define SCHED_TICK_SECS 10
119 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
120 #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
121 #define SCHED_TICK_SHIFT 10
122 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
123 #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
124
125 /*
126 * These macros determine priorities for non-interactive threads. They are
127 * assigned a priority based on their recent cpu utilization as expressed
128 * by the ratio of ticks to the tick total. NHALF priorities at the start
129 * and end of the MIN to MAX timeshare range are only reachable with negative
130 * or positive nice respectively.
131 *
132 * PRI_RANGE: Priority range for utilization dependent priorities.
133 * PRI_NRESV: Number of nice values.
134 * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
135 * PRI_NICE: Determines the part of the priority inherited from nice.
136 */
137 #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
138 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
139 #define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
140 #define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
141 #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN)
142 #define SCHED_PRI_TICKS(ts) \
143 (SCHED_TICK_HZ((ts)) / \
144 (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
145 #define SCHED_PRI_NICE(nice) (nice)
146
147 /*
148 * These determine the interactivity of a process. Interactivity differs from
149 * cpu utilization in that it expresses the voluntary time slept vs time ran
150 * while cpu utilization includes all time not running. This more accurately
151 * models the intent of the thread.
152 *
153 * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
154 * before throttling back.
155 * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
156 * INTERACT_MAX: Maximum interactivity value. Smaller is better.
157 * INTERACT_THRESH: Threshhold for placement on the current runq.
158 */
159 #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
160 #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
161 #define SCHED_INTERACT_MAX (100)
162 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
163 #define SCHED_INTERACT_THRESH (30)
164
165 /*
166 * tickincr: Converts a stathz tick into a hz domain scaled by
167 * the shift factor. Without the shift the error rate
168 * due to rounding would be unacceptably high.
169 * realstathz: stathz is sometimes 0 and run off of hz.
170 * sched_slice: Runtime of each thread before rescheduling.
171 * preempt_thresh: Priority threshold for preemption and remote IPIs.
172 */
173 static int sched_interact = SCHED_INTERACT_THRESH;
174 static int realstathz;
175 static int tickincr;
176 static int sched_slice;
177 #ifdef PREEMPTION
178 #ifdef FULL_PREEMPTION
179 static int preempt_thresh = PRI_MAX_IDLE;
180 #else
181 static int preempt_thresh = PRI_MIN_KERN;
182 #endif
183 #else
184 static int preempt_thresh = 0;
185 #endif
186
187 /*
188 * tdq - per processor runqs and statistics. All fields are protected by the
189 * tdq_lock. The load and lowpri may be accessed without to avoid excess
190 * locking in sched_pickcpu();
191 */
192 struct tdq {
193 struct mtx *tdq_lock; /* Pointer to group lock. */
194 struct runq tdq_realtime; /* real-time run queue. */
195 struct runq tdq_timeshare; /* timeshare run queue. */
196 struct runq tdq_idle; /* Queue of IDLE threads. */
197 int tdq_load; /* Aggregate load. */
198 u_char tdq_idx; /* Current insert index. */
199 u_char tdq_ridx; /* Current removal index. */
200 #ifdef SMP
201 u_char tdq_lowpri; /* Lowest priority thread. */
202 int tdq_transferable; /* Transferable thread count. */
203 LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */
204 struct tdq_group *tdq_group; /* Our processor group. */
205 #else
206 int tdq_sysload; /* For loadavg, !ITHD load. */
207 #endif
208 } __aligned(64);
209
210
211 #ifdef SMP
212 /*
213 * tdq groups are groups of processors which can cheaply share threads. When
214 * one processor in the group goes idle it will check the runqs of the other
215 * processors in its group prior to halting and waiting for an interrupt.
216 * These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
217 * In a numa environment we'd want an idle bitmap per group and a two tiered
218 * load balancer.
219 */
220 struct tdq_group {
221 struct mtx tdg_lock; /* Protects all fields below. */
222 int tdg_cpus; /* Count of CPUs in this tdq group. */
223 cpumask_t tdg_cpumask; /* Mask of cpus in this group. */
224 cpumask_t tdg_idlemask; /* Idle cpus in this group. */
225 cpumask_t tdg_mask; /* Bit mask for first cpu. */
226 int tdg_load; /* Total load of this group. */
227 int tdg_transferable; /* Transferable load of this group. */
228 LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */
229 char tdg_name[16]; /* lock name. */
230 } __aligned(64);
231
232 #define SCHED_AFFINITY_DEFAULT (max(1, hz / 300))
233 #define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity)
234
235 /*
236 * Run-time tunables.
237 */
238 static int rebalance = 1;
239 static int balance_interval = 128; /* Default set in sched_initticks(). */
240 static int pick_pri = 1;
241 static int affinity;
242 static int tryself = 1;
243 static int steal_htt = 1;
244 static int steal_idle = 1;
245 static int steal_thresh = 2;
246 static int topology = 0;
247
248 /*
249 * One thread queue per processor.
250 */
251 static volatile cpumask_t tdq_idle;
252 static int tdg_maxid;
253 static struct tdq tdq_cpu[MAXCPU];
254 static struct tdq_group tdq_groups[MAXCPU];
255 static struct tdq *balance_tdq;
256 static int balance_group_ticks;
257 static int balance_ticks;
258
259 #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
260 #define TDQ_CPU(x) (&tdq_cpu[(x)])
261 #define TDQ_ID(x) ((int)((x) - tdq_cpu))
262 #define TDQ_GROUP(x) (&tdq_groups[(x)])
263 #define TDG_ID(x) ((int)((x) - tdq_groups))
264 #else /* !SMP */
265 static struct tdq tdq_cpu;
266 static struct mtx tdq_lock;
267
268 #define TDQ_ID(x) (0)
269 #define TDQ_SELF() (&tdq_cpu)
270 #define TDQ_CPU(x) (&tdq_cpu)
271 #endif
272
273 #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type))
274 #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
275 #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
276 #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
277 #define TDQ_LOCKPTR(t) ((t)->tdq_lock)
278
279 static void sched_priority(struct thread *);
280 static void sched_thread_priority(struct thread *, u_char);
281 static int sched_interact_score(struct thread *);
282 static void sched_interact_update(struct thread *);
283 static void sched_interact_fork(struct thread *);
284 static void sched_pctcpu_update(struct td_sched *);
285
286 /* Operations on per processor queues */
287 static struct td_sched * tdq_choose(struct tdq *);
288 static void tdq_setup(struct tdq *);
289 static void tdq_load_add(struct tdq *, struct td_sched *);
290 static void tdq_load_rem(struct tdq *, struct td_sched *);
291 static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
292 static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
293 void tdq_print(int cpu);
294 static void runq_print(struct runq *rq);
295 static void tdq_add(struct tdq *, struct thread *, int);
296 #ifdef SMP
297 static void tdq_move(struct tdq *, struct tdq *);
298 static int tdq_idled(struct tdq *);
299 static void tdq_notify(struct td_sched *);
300 static struct td_sched *tdq_steal(struct tdq *);
301 static struct td_sched *runq_steal(struct runq *);
302 static int sched_pickcpu(struct td_sched *, int);
303 static void sched_balance(void);
304 static void sched_balance_groups(void);
305 static void sched_balance_group(struct tdq_group *);
306 static void sched_balance_pair(struct tdq *, struct tdq *);
307 static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
308 static inline struct mtx *thread_block_switch(struct thread *);
309 static inline void thread_unblock_switch(struct thread *, struct mtx *);
310 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
311
312 #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
313 #endif
314
315 static void sched_setup(void *dummy);
316 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
317
318 static void sched_initticks(void *dummy);
319 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
320
321 /*
322 * Print the threads waiting on a run-queue.
323 */
324 static void
325 runq_print(struct runq *rq)
326 {
327 struct rqhead *rqh;
328 struct td_sched *ts;
329 int pri;
330 int j;
331 int i;
332
333 for (i = 0; i < RQB_LEN; i++) {
334 printf("\t\trunq bits %d 0x%zx\n",
335 i, rq->rq_status.rqb_bits[i]);
336 for (j = 0; j < RQB_BPW; j++)
337 if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
338 pri = j + (i << RQB_L2BPW);
339 rqh = &rq->rq_queues[pri];
340 TAILQ_FOREACH(ts, rqh, ts_procq) {
341 printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
342 ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
343 }
344 }
345 }
346 }
347
348 /*
349 * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
350 */
351 void
352 tdq_print(int cpu)
353 {
354 struct tdq *tdq;
355
356 tdq = TDQ_CPU(cpu);
357
358 printf("tdq %d:\n", TDQ_ID(tdq));
359 printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq));
360 printf("\tload: %d\n", tdq->tdq_load);
361 printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
362 printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
363 printf("\trealtime runq:\n");
364 runq_print(&tdq->tdq_realtime);
365 printf("\ttimeshare runq:\n");
366 runq_print(&tdq->tdq_timeshare);
367 printf("\tidle runq:\n");
368 runq_print(&tdq->tdq_idle);
369 #ifdef SMP
370 printf("\tload transferable: %d\n", tdq->tdq_transferable);
371 printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
372 printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group));
373 printf("\tLock name: %s\n", tdq->tdq_group->tdg_name);
374 #endif
375 }
376
377 #define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
378 /*
379 * Add a thread to the actual run-queue. Keeps transferable counts up to
380 * date with what is actually on the run-queue. Selects the correct
381 * queue position for timeshare threads.
382 */
383 static __inline void
384 tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
385 {
386 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
387 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
388 #ifdef SMP
389 if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
390 tdq->tdq_transferable++;
391 tdq->tdq_group->tdg_transferable++;
392 ts->ts_flags |= TSF_XFERABLE;
393 }
394 #endif
395 if (ts->ts_runq == &tdq->tdq_timeshare) {
396 u_char pri;
397
398 pri = ts->ts_thread->td_priority;
399 KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
400 ("Invalid priority %d on timeshare runq", pri));
401 /*
402 * This queue contains only priorities between MIN and MAX
403 * realtime. Use the whole queue to represent these values.
404 */
405 if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
406 pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
407 pri = (pri + tdq->tdq_idx) % RQ_NQS;
408 /*
409 * This effectively shortens the queue by one so we
410 * can have a one slot difference between idx and
411 * ridx while we wait for threads to drain.
412 */
413 if (tdq->tdq_ridx != tdq->tdq_idx &&
414 pri == tdq->tdq_ridx)
415 pri = (unsigned char)(pri - 1) % RQ_NQS;
416 } else
417 pri = tdq->tdq_ridx;
418 runq_add_pri(ts->ts_runq, ts, pri, flags);
419 } else
420 runq_add(ts->ts_runq, ts, flags);
421 }
422
423 /*
424 * Remove a thread from a run-queue. This typically happens when a thread
425 * is selected to run. Running threads are not on the queue and the
426 * transferable count does not reflect them.
427 */
428 static __inline void
429 tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
430 {
431 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
432 KASSERT(ts->ts_runq != NULL,
433 ("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
434 #ifdef SMP
435 if (ts->ts_flags & TSF_XFERABLE) {
436 tdq->tdq_transferable--;
437 tdq->tdq_group->tdg_transferable--;
438 ts->ts_flags &= ~TSF_XFERABLE;
439 }
440 #endif
441 if (ts->ts_runq == &tdq->tdq_timeshare) {
442 if (tdq->tdq_idx != tdq->tdq_ridx)
443 runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
444 else
445 runq_remove_idx(ts->ts_runq, ts, NULL);
446 /*
447 * For timeshare threads we update the priority here so
448 * the priority reflects the time we've been sleeping.
449 */
450 ts->ts_ltick = ticks;
451 sched_pctcpu_update(ts);
452 sched_priority(ts->ts_thread);
453 } else
454 runq_remove(ts->ts_runq, ts);
455 }
456
457 /*
458 * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
459 * for this thread to the referenced thread queue.
460 */
461 static void
462 tdq_load_add(struct tdq *tdq, struct td_sched *ts)
463 {
464 int class;
465
466 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
467 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
468 class = PRI_BASE(ts->ts_thread->td_pri_class);
469 tdq->tdq_load++;
470 CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
471 if (class != PRI_ITHD &&
472 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
473 #ifdef SMP
474 tdq->tdq_group->tdg_load++;
475 #else
476 tdq->tdq_sysload++;
477 #endif
478 }
479
480 /*
481 * Remove the load from a thread that is transitioning to a sleep state or
482 * exiting.
483 */
484 static void
485 tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
486 {
487 int class;
488
489 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
490 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
491 class = PRI_BASE(ts->ts_thread->td_pri_class);
492 if (class != PRI_ITHD &&
493 (ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
494 #ifdef SMP
495 tdq->tdq_group->tdg_load--;
496 #else
497 tdq->tdq_sysload--;
498 #endif
499 KASSERT(tdq->tdq_load != 0,
500 ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
501 tdq->tdq_load--;
502 CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
503 ts->ts_runq = NULL;
504 }
505
506 #ifdef SMP
507 /*
508 * sched_balance is a simple CPU load balancing algorithm. It operates by
509 * finding the least loaded and most loaded cpu and equalizing their load
510 * by migrating some processes.
511 *
512 * Dealing only with two CPUs at a time has two advantages. Firstly, most
513 * installations will only have 2 cpus. Secondly, load balancing too much at
514 * once can have an unpleasant effect on the system. The scheduler rarely has
515 * enough information to make perfect decisions. So this algorithm chooses
516 * simplicity and more gradual effects on load in larger systems.
517 *
518 */
519 static void
520 sched_balance()
521 {
522 struct tdq_group *high;
523 struct tdq_group *low;
524 struct tdq_group *tdg;
525 struct tdq *tdq;
526 int cnt;
527 int i;
528
529 /*
530 * Select a random time between .5 * balance_interval and
531 * 1.5 * balance_interval.
532 */
533 balance_ticks = max(balance_interval / 2, 1);
534 balance_ticks += random() % balance_interval;
535 if (smp_started == 0 || rebalance == 0)
536 return;
537 tdq = TDQ_SELF();
538 TDQ_UNLOCK(tdq);
539 low = high = NULL;
540 i = random() % (tdg_maxid + 1);
541 for (cnt = 0; cnt <= tdg_maxid; cnt++) {
542 tdg = TDQ_GROUP(i);
543 /*
544 * Find the CPU with the highest load that has some
545 * threads to transfer.
546 */
547 if ((high == NULL || tdg->tdg_load > high->tdg_load)
548 && tdg->tdg_transferable)
549 high = tdg;
550 if (low == NULL || tdg->tdg_load < low->tdg_load)
551 low = tdg;
552 if (++i > tdg_maxid)
553 i = 0;
554 }
555 if (low != NULL && high != NULL && high != low)
556 sched_balance_pair(LIST_FIRST(&high->tdg_members),
557 LIST_FIRST(&low->tdg_members));
558 TDQ_LOCK(tdq);
559 }
560
561 /*
562 * Balance load between CPUs in a group. Will only migrate within the group.
563 */
564 static void
565 sched_balance_groups()
566 {
567 struct tdq *tdq;
568 int i;
569
570 /*
571 * Select a random time between .5 * balance_interval and
572 * 1.5 * balance_interval.
573 */
574 balance_group_ticks = max(balance_interval / 2, 1);
575 balance_group_ticks += random() % balance_interval;
576 if (smp_started == 0 || rebalance == 0)
577 return;
578 tdq = TDQ_SELF();
579 TDQ_UNLOCK(tdq);
580 for (i = 0; i <= tdg_maxid; i++)
581 sched_balance_group(TDQ_GROUP(i));
582 TDQ_LOCK(tdq);
583 }
584
585 /*
586 * Finds the greatest imbalance between two tdqs in a group.
587 */
588 static void
589 sched_balance_group(struct tdq_group *tdg)
590 {
591 struct tdq *tdq;
592 struct tdq *high;
593 struct tdq *low;
594 int load;
595
596 if (tdg->tdg_transferable == 0)
597 return;
598 low = NULL;
599 high = NULL;
600 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
601 load = tdq->tdq_load;
602 if (high == NULL || load > high->tdq_load)
603 high = tdq;
604 if (low == NULL || load < low->tdq_load)
605 low = tdq;
606 }
607 if (high != NULL && low != NULL && high != low)
608 sched_balance_pair(high, low);
609 }
610
611 /*
612 * Lock two thread queues using their address to maintain lock order.
613 */
614 static void
615 tdq_lock_pair(struct tdq *one, struct tdq *two)
616 {
617 if (one < two) {
618 TDQ_LOCK(one);
619 TDQ_LOCK_FLAGS(two, MTX_DUPOK);
620 } else {
621 TDQ_LOCK(two);
622 TDQ_LOCK_FLAGS(one, MTX_DUPOK);
623 }
624 }
625
626 /*
627 * Unlock two thread queues. Order is not important here.
628 */
629 static void
630 tdq_unlock_pair(struct tdq *one, struct tdq *two)
631 {
632 TDQ_UNLOCK(one);
633 TDQ_UNLOCK(two);
634 }
635
636 /*
637 * Transfer load between two imbalanced thread queues.
638 */
639 static void
640 sched_balance_pair(struct tdq *high, struct tdq *low)
641 {
642 int transferable;
643 int high_load;
644 int low_load;
645 int move;
646 int diff;
647 int i;
648
649 tdq_lock_pair(high, low);
650 /*
651 * If we're transfering within a group we have to use this specific
652 * tdq's transferable count, otherwise we can steal from other members
653 * of the group.
654 */
655 if (high->tdq_group == low->tdq_group) {
656 transferable = high->tdq_transferable;
657 high_load = high->tdq_load;
658 low_load = low->tdq_load;
659 } else {
660 transferable = high->tdq_group->tdg_transferable;
661 high_load = high->tdq_group->tdg_load;
662 low_load = low->tdq_group->tdg_load;
663 }
664 /*
665 * Determine what the imbalance is and then adjust that to how many
666 * threads we actually have to give up (transferable).
667 */
668 if (transferable != 0) {
669 diff = high_load - low_load;
670 move = diff / 2;
671 if (diff & 0x1)
672 move++;
673 move = min(move, transferable);
674 for (i = 0; i < move; i++)
675 tdq_move(high, low);
676 /*
677 * IPI the target cpu to force it to reschedule with the new
678 * workload.
679 */
680 ipi_selected(1 << TDQ_ID(low), IPI_PREEMPT);
681 }
682 tdq_unlock_pair(high, low);
683 return;
684 }
685
686 /*
687 * Move a thread from one thread queue to another.
688 */
689 static void
690 tdq_move(struct tdq *from, struct tdq *to)
691 {
692 struct td_sched *ts;
693 struct thread *td;
694 struct tdq *tdq;
695 int cpu;
696
697 TDQ_LOCK_ASSERT(from, MA_OWNED);
698 TDQ_LOCK_ASSERT(to, MA_OWNED);
699
700 tdq = from;
701 cpu = TDQ_ID(to);
702 ts = tdq_steal(tdq);
703 if (ts == NULL) {
704 struct tdq_group *tdg;
705
706 tdg = tdq->tdq_group;
707 LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
708 if (tdq == from || tdq->tdq_transferable == 0)
709 continue;
710 ts = tdq_steal(tdq);
711 break;
712 }
713 if (ts == NULL)
714 return;
715 }
716 if (tdq == to)
717 return;
718 td = ts->ts_thread;
719 /*
720 * Although the run queue is locked the thread may be blocked. Lock
721 * it to clear this and acquire the run-queue lock.
722 */
723 thread_lock(td);
724 /* Drop recursive lock on from acquired via thread_lock(). */
725 TDQ_UNLOCK(from);
726 sched_rem(td);
727 ts->ts_cpu = cpu;
728 td->td_lock = TDQ_LOCKPTR(to);
729 tdq_add(to, td, SRQ_YIELDING);
730 }
731
732 /*
733 * This tdq has idled. Try to steal a thread from another cpu and switch
734 * to it.
735 */
736 static int
737 tdq_idled(struct tdq *tdq)
738 {
739 struct tdq_group *tdg;
740 struct tdq *steal;
741 int highload;
742 int highcpu;
743 int cpu;
744
745 if (smp_started == 0 || steal_idle == 0)
746 return (1);
747 /* We don't want to be preempted while we're iterating over tdqs */
748 spinlock_enter();
749 tdg = tdq->tdq_group;
750 /*
751 * If we're in a cpu group, try and steal threads from another cpu in
752 * the group before idling. In a HTT group all cpus share the same
753 * run-queue lock, however, we still need a recursive lock to
754 * call tdq_move().
755 */
756 if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
757 TDQ_LOCK(tdq);
758 LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
759 if (steal == tdq || steal->tdq_transferable == 0)
760 continue;
761 TDQ_LOCK(steal);
762 goto steal;
763 }
764 TDQ_UNLOCK(tdq);
765 }
766 /*
767 * Find the least loaded CPU with a transferable thread and attempt
768 * to steal it. We make a lockless pass and then verify that the
769 * thread is still available after locking.
770 */
771 for (;;) {
772 highcpu = 0;
773 highload = 0;
774 for (cpu = 0; cpu <= mp_maxid; cpu++) {
775 if (CPU_ABSENT(cpu))
776 continue;
777 steal = TDQ_CPU(cpu);
778 if (steal->tdq_transferable == 0)
779 continue;
780 if (steal->tdq_load < highload)
781 continue;
782 highload = steal->tdq_load;
783 highcpu = cpu;
784 }
785 if (highload < steal_thresh)
786 break;
787 steal = TDQ_CPU(highcpu);
788 if (steal == tdq)
789 break;
790 tdq_lock_pair(tdq, steal);
791 if (steal->tdq_load >= steal_thresh && steal->tdq_transferable)
792 goto steal;
793 tdq_unlock_pair(tdq, steal);
794 }
795 spinlock_exit();
796 return (1);
797 steal:
798 spinlock_exit();
799 tdq_move(steal, tdq);
800 TDQ_UNLOCK(steal);
801 mi_switch(SW_VOL, NULL);
802 thread_unlock(curthread);
803
804 return (0);
805 }
806
807 /*
808 * Notify a remote cpu of new work. Sends an IPI if criteria are met.
809 */
810 static void
811 tdq_notify(struct td_sched *ts)
812 {
813 struct thread *ctd;
814 struct pcpu *pcpu;
815 int cpri;
816 int pri;
817 int cpu;
818
819 cpu = ts->ts_cpu;
820 pri = ts->ts_thread->td_priority;
821 pcpu = pcpu_find(cpu);
822 ctd = pcpu->pc_curthread;
823 cpri = ctd->td_priority;
824
825 /*
826 * If our priority is not better than the current priority there is
827 * nothing to do.
828 */
829 if (pri > cpri)
830 return;
831 /*
832 * Always IPI idle.
833 */
834 if (cpri > PRI_MIN_IDLE)
835 goto sendipi;
836 /*
837 * If we're realtime or better and there is timeshare or worse running
838 * send an IPI.
839 */
840 if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
841 goto sendipi;
842 /*
843 * Otherwise only IPI if we exceed the threshold.
844 */
845 if (pri > preempt_thresh)
846 return;
847 sendipi:
848 ctd->td_flags |= TDF_NEEDRESCHED;
849 ipi_selected(1 << cpu, IPI_PREEMPT);
850 }
851
852 /*
853 * Steals load from a timeshare queue. Honors the rotating queue head
854 * index.
855 */
856 static struct td_sched *
857 runq_steal_from(struct runq *rq, u_char start)
858 {
859 struct td_sched *ts;
860 struct rqbits *rqb;
861 struct rqhead *rqh;
862 int first;
863 int bit;
864 int pri;
865 int i;
866
867 rqb = &rq->rq_status;
868 bit = start & (RQB_BPW -1);
869 pri = 0;
870 first = 0;
871 again:
872 for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
873 if (rqb->rqb_bits[i] == 0)
874 continue;
875 if (bit != 0) {
876 for (pri = bit; pri < RQB_BPW; pri++)
877 if (rqb->rqb_bits[i] & (1ul << pri))
878 break;
879 if (pri >= RQB_BPW)
880 continue;
881 } else
882 pri = RQB_FFS(rqb->rqb_bits[i]);
883 pri += (i << RQB_L2BPW);
884 rqh = &rq->rq_queues[pri];
885 TAILQ_FOREACH(ts, rqh, ts_procq) {
886 if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
887 return (ts);
888 first = 1;
889 }
890 }
891 if (start != 0) {
892 start = 0;
893 goto again;
894 }
895
896 return (NULL);
897 }
898
899 /*
900 * Steals load from a standard linear queue.
901 */
902 static struct td_sched *
903 runq_steal(struct runq *rq)
904 {
905 struct rqhead *rqh;
906 struct rqbits *rqb;
907 struct td_sched *ts;
908 int word;
909 int bit;
910
911 rqb = &rq->rq_status;
912 for (word = 0; word < RQB_LEN; word++) {
913 if (rqb->rqb_bits[word] == 0)
914 continue;
915 for (bit = 0; bit < RQB_BPW; bit++) {
916 if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
917 continue;
918 rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
919 TAILQ_FOREACH(ts, rqh, ts_procq)
920 if (THREAD_CAN_MIGRATE(ts->ts_thread))
921 return (ts);
922 }
923 }
924 return (NULL);
925 }
926
927 /*
928 * Attempt to steal a thread in priority order from a thread queue.
929 */
930 static struct td_sched *
931 tdq_steal(struct tdq *tdq)
932 {
933 struct td_sched *ts;
934
935 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
936 if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
937 return (ts);
938 if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
939 return (ts);
940 return (runq_steal(&tdq->tdq_idle));
941 }
942
943 /*
944 * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
945 * current lock and returns with the assigned queue locked.
946 */
947 static inline struct tdq *
948 sched_setcpu(struct td_sched *ts, int cpu, int flags)
949 {
950 struct thread *td;
951 struct tdq *tdq;
952
953 THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
954
955 tdq = TDQ_CPU(cpu);
956 td = ts->ts_thread;
957 ts->ts_cpu = cpu;
958
959 /* If the lock matches just return the queue. */
960 if (td->td_lock == TDQ_LOCKPTR(tdq))
961 return (tdq);
962 #ifdef notyet
963 /*
964 * If the thread isn't running its lockptr is a
965 * turnstile or a sleepqueue. We can just lock_set without
966 * blocking.
967 */
968 if (TD_CAN_RUN(td)) {
969 TDQ_LOCK(tdq);
970 thread_lock_set(td, TDQ_LOCKPTR(tdq));
971 return (tdq);
972 }
973 #endif
974 /*
975 * The hard case, migration, we need to block the thread first to
976 * prevent order reversals with other cpus locks.
977 */
978 thread_lock_block(td);
979 TDQ_LOCK(tdq);
980 thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
981 return (tdq);
982 }
983
984 /*
985 * Find the thread queue running the lowest priority thread.
986 */
987 static int
988 tdq_lowestpri(void)
989 {
990 struct tdq *tdq;
991 int lowpri;
992 int lowcpu;
993 int lowload;
994 int load;
995 int cpu;
996 int pri;
997
998 lowload = 0;
999 lowpri = lowcpu = 0;
1000 for (cpu = 0; cpu <= mp_maxid; cpu++) {
1001 if (CPU_ABSENT(cpu))
1002 continue;
1003 tdq = TDQ_CPU(cpu);
1004 pri = tdq->tdq_lowpri;
1005 load = TDQ_CPU(cpu)->tdq_load;
1006 CTR4(KTR_ULE,
1007 "cpu %d pri %d lowcpu %d lowpri %d",
1008 cpu, pri, lowcpu, lowpri);
1009 if (pri < lowpri)
1010 continue;
1011 if (lowpri && lowpri == pri && load > lowload)
1012 continue;
1013 lowpri = pri;
1014 lowcpu = cpu;
1015 lowload = load;
1016 }
1017
1018 return (lowcpu);
1019 }
1020
1021 /*
1022 * Find the thread queue with the least load.
1023 */
1024 static int
1025 tdq_lowestload(void)
1026 {
1027 struct tdq *tdq;
1028 int lowload;
1029 int lowpri;
1030 int lowcpu;
1031 int load;
1032 int cpu;
1033 int pri;
1034
1035 lowcpu = 0;
1036 lowload = TDQ_CPU(0)->tdq_load;
1037 lowpri = TDQ_CPU(0)->tdq_lowpri;
1038 for (cpu = 1; cpu <= mp_maxid; cpu++) {
1039 if (CPU_ABSENT(cpu))
1040 continue;
1041 tdq = TDQ_CPU(cpu);
1042 load = tdq->tdq_load;
1043 pri = tdq->tdq_lowpri;
1044 CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
1045 cpu, load, lowcpu, lowload);
1046 if (load > lowload)
1047 continue;
1048 if (load == lowload && pri < lowpri)
1049 continue;
1050 lowcpu = cpu;
1051 lowload = load;
1052 lowpri = pri;
1053 }
1054
1055 return (lowcpu);
1056 }
1057
1058 /*
1059 * Pick the destination cpu for sched_add(). Respects affinity and makes
1060 * a determination based on load or priority of available processors.
1061 */
1062 static int
1063 sched_pickcpu(struct td_sched *ts, int flags)
1064 {
1065 struct tdq *tdq;
1066 int self;
1067 int pri;
1068 int cpu;
1069
1070 cpu = self = PCPU_GET(cpuid);
1071 if (smp_started == 0)
1072 return (self);
1073 /*
1074 * Don't migrate a running thread from sched_switch().
1075 */
1076 if (flags & SRQ_OURSELF) {
1077 CTR1(KTR_ULE, "YIELDING %d",
1078 curthread->td_priority);
1079 return (self);
1080 }
1081 pri = ts->ts_thread->td_priority;
1082 cpu = ts->ts_cpu;
1083 /*
1084 * Regardless of affinity, if the last cpu is idle send it there.
1085 */
1086 tdq = TDQ_CPU(cpu);
1087 if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
1088 CTR5(KTR_ULE,
1089 "ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
1090 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1091 tdq->tdq_lowpri);
1092 return (ts->ts_cpu);
1093 }
1094 /*
1095 * If we have affinity, try to place it on the cpu we last ran on.
1096 */
1097 if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
1098 CTR5(KTR_ULE,
1099 "affinity for %d, ltick %d ticks %d pri %d curthread %d",
1100 ts->ts_cpu, ts->ts_rltick, ticks, pri,
1101 tdq->tdq_lowpri);
1102 return (ts->ts_cpu);
1103 }
1104 /*
1105 * Look for an idle group.
1106 */
1107 CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
1108 cpu = ffs(tdq_idle);
1109 if (cpu)
1110 return (--cpu);
1111 /*
1112 * If there are no idle cores see if we can run the thread locally.
1113 * This may improve locality among sleepers and wakers when there
1114 * is shared data.
1115 */
1116 if (tryself && pri < curthread->td_priority) {
1117 CTR1(KTR_ULE, "tryself %d",
1118 curthread->td_priority);
1119 return (self);
1120 }
1121 /*
1122 * Now search for the cpu running the lowest priority thread with
1123 * the least load.
1124 */
1125 if (pick_pri)
1126 cpu = tdq_lowestpri();
1127 else
1128 cpu = tdq_lowestload();
1129 return (cpu);
1130 }
1131
1132 #endif /* SMP */
1133
1134 /*
1135 * Pick the highest priority task we have and return it.
1136 */
1137 static struct td_sched *
1138 tdq_choose(struct tdq *tdq)
1139 {
1140 struct td_sched *ts;
1141
1142 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1143 ts = runq_choose(&tdq->tdq_realtime);
1144 if (ts != NULL)
1145 return (ts);
1146 ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1147 if (ts != NULL) {
1148 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
1149 ("tdq_choose: Invalid priority on timeshare queue %d",
1150 ts->ts_thread->td_priority));
1151 return (ts);
1152 }
1153
1154 ts = runq_choose(&tdq->tdq_idle);
1155 if (ts != NULL) {
1156 KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
1157 ("tdq_choose: Invalid priority on idle queue %d",
1158 ts->ts_thread->td_priority));
1159 return (ts);
1160 }
1161
1162 return (NULL);
1163 }
1164
1165 /*
1166 * Initialize a thread queue.
1167 */
1168 static void
1169 tdq_setup(struct tdq *tdq)
1170 {
1171
1172 if (bootverbose)
1173 printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1174 runq_init(&tdq->tdq_realtime);
1175 runq_init(&tdq->tdq_timeshare);
1176 runq_init(&tdq->tdq_idle);
1177 tdq->tdq_load = 0;
1178 }
1179
1180 #ifdef SMP
1181 static void
1182 tdg_setup(struct tdq_group *tdg)
1183 {
1184 if (bootverbose)
1185 printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
1186 snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
1187 "sched lock %d", (int)TDG_ID(tdg));
1188 mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
1189 MTX_SPIN | MTX_RECURSE);
1190 LIST_INIT(&tdg->tdg_members);
1191 tdg->tdg_load = 0;
1192 tdg->tdg_transferable = 0;
1193 tdg->tdg_cpus = 0;
1194 tdg->tdg_mask = 0;
1195 tdg->tdg_cpumask = 0;
1196 tdg->tdg_idlemask = 0;
1197 }
1198
1199 static void
1200 tdg_add(struct tdq_group *tdg, struct tdq *tdq)
1201 {
1202 if (tdg->tdg_mask == 0)
1203 tdg->tdg_mask |= 1 << TDQ_ID(tdq);
1204 tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
1205 tdg->tdg_cpus++;
1206 tdq->tdq_group = tdg;
1207 tdq->tdq_lock = &tdg->tdg_lock;
1208 LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
1209 if (bootverbose)
1210 printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
1211 TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
1212 }
1213
1214 static void
1215 sched_setup_topology(void)
1216 {
1217 struct tdq_group *tdg;
1218 struct cpu_group *cg;
1219 int balance_groups;
1220 struct tdq *tdq;
1221 int i;
1222 int j;
1223
1224 topology = 1;
1225 balance_groups = 0;
1226 for (i = 0; i < smp_topology->ct_count; i++) {
1227 cg = &smp_topology->ct_group[i];
1228 tdg = &tdq_groups[i];
1229 /*
1230 * Initialize the group.
1231 */
1232 tdg_setup(tdg);
1233 /*
1234 * Find all of the group members and add them.
1235 */
1236 for (j = 0; j < MAXCPU; j++) {
1237 if ((cg->cg_mask & (1 << j)) != 0) {
1238 tdq = TDQ_CPU(j);
1239 tdq_setup(tdq);
1240 tdg_add(tdg, tdq);
1241 }
1242 }
1243 if (tdg->tdg_cpus > 1)
1244 balance_groups = 1;
1245 }
1246 tdg_maxid = smp_topology->ct_count - 1;
1247 if (balance_groups)
1248 sched_balance_groups();
1249 }
1250
1251 static void
1252 sched_setup_smp(void)
1253 {
1254 struct tdq_group *tdg;
1255 struct tdq *tdq;
1256 int cpus;
1257 int i;
1258
1259 for (cpus = 0, i = 0; i < MAXCPU; i++) {
1260 if (CPU_ABSENT(i))
1261 continue;
1262 tdq = &tdq_cpu[i];
1263 tdg = &tdq_groups[i];
1264 /*
1265 * Setup a tdq group with one member.
1266 */
1267 tdg_setup(tdg);
1268 tdq_setup(tdq);
1269 tdg_add(tdg, tdq);
1270 cpus++;
1271 }
1272 tdg_maxid = cpus - 1;
1273 }
1274
1275 /*
1276 * Fake a topology with one group containing all CPUs.
1277 */
1278 static void
1279 sched_fake_topo(void)
1280 {
1281 #ifdef SCHED_FAKE_TOPOLOGY
1282 static struct cpu_top top;
1283 static struct cpu_group group;
1284
1285 top.ct_count = 1;
1286 top.ct_group = &group;
1287 group.cg_mask = all_cpus;
1288 group.cg_count = mp_ncpus;
1289 group.cg_children = 0;
1290 smp_topology = ⊤
1291 #endif
1292 }
1293 #endif
1294
1295 /*
1296 * Setup the thread queues and initialize the topology based on MD
1297 * information.
1298 */
1299 static void
1300 sched_setup(void *dummy)
1301 {
1302 struct tdq *tdq;
1303
1304 tdq = TDQ_SELF();
1305 #ifdef SMP
1306 sched_fake_topo();
1307 /*
1308 * Setup tdqs based on a topology configuration or vanilla SMP based
1309 * on mp_maxid.
1310 */
1311 if (smp_topology == NULL)
1312 sched_setup_smp();
1313 else
1314 sched_setup_topology();
1315 balance_tdq = tdq;
1316 sched_balance();
1317 #else
1318 tdq_setup(tdq);
1319 mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
1320 tdq->tdq_lock = &tdq_lock;
1321 #endif
1322 /*
1323 * To avoid divide-by-zero, we set realstathz a dummy value
1324 * in case which sched_clock() called before sched_initticks().
1325 */
1326 realstathz = hz;
1327 sched_slice = (realstathz/10); /* ~100ms */
1328 tickincr = 1 << SCHED_TICK_SHIFT;
1329
1330 /* Add thread0's load since it's running. */
1331 TDQ_LOCK(tdq);
1332 thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1333 tdq_load_add(tdq, &td_sched0);
1334 TDQ_UNLOCK(tdq);
1335 }
1336
1337 /*
1338 * This routine determines the tickincr after stathz and hz are setup.
1339 */
1340 /* ARGSUSED */
1341 static void
1342 sched_initticks(void *dummy)
1343 {
1344 int incr;
1345
1346 realstathz = stathz ? stathz : hz;
1347 sched_slice = (realstathz/10); /* ~100ms */
1348
1349 /*
1350 * tickincr is shifted out by 10 to avoid rounding errors due to
1351 * hz not being evenly divisible by stathz on all platforms.
1352 */
1353 incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1354 /*
1355 * This does not work for values of stathz that are more than
1356 * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
1357 */
1358 if (incr == 0)
1359 incr = 1;
1360 tickincr = incr;
1361 #ifdef SMP
1362 /*
1363 * Set the default balance interval now that we know
1364 * what realstathz is.
1365 */
1366 balance_interval = realstathz;
1367 /*
1368 * Set steal thresh to log2(mp_ncpu) but no greater than 4. This
1369 * prevents excess thrashing on large machines and excess idle on
1370 * smaller machines.
1371 */
1372 steal_thresh = min(ffs(mp_ncpus) - 1, 4);
1373 affinity = SCHED_AFFINITY_DEFAULT;
1374 #endif
1375 }
1376
1377
1378 /*
1379 * This is the core of the interactivity algorithm. Determines a score based
1380 * on past behavior. It is the ratio of sleep time to run time scaled to
1381 * a [0, 100] integer. This is the voluntary sleep time of a process, which
1382 * differs from the cpu usage because it does not account for time spent
1383 * waiting on a run-queue. Would be prettier if we had floating point.
1384 */
1385 static int
1386 sched_interact_score(struct thread *td)
1387 {
1388 struct td_sched *ts;
1389 int div;
1390
1391 ts = td->td_sched;
1392 /*
1393 * The score is only needed if this is likely to be an interactive
1394 * task. Don't go through the expense of computing it if there's
1395 * no chance.
1396 */
1397 if (sched_interact <= SCHED_INTERACT_HALF &&
1398 ts->ts_runtime >= ts->ts_slptime)
1399 return (SCHED_INTERACT_HALF);
1400
1401 if (ts->ts_runtime > ts->ts_slptime) {
1402 div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1403 return (SCHED_INTERACT_HALF +
1404 (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1405 }
1406 if (ts->ts_slptime > ts->ts_runtime) {
1407 div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1408 return (ts->ts_runtime / div);
1409 }
1410 /* runtime == slptime */
1411 if (ts->ts_runtime)
1412 return (SCHED_INTERACT_HALF);
1413
1414 /*
1415 * This can happen if slptime and runtime are 0.
1416 */
1417 return (0);
1418
1419 }
1420
1421 /*
1422 * Scale the scheduling priority according to the "interactivity" of this
1423 * process.
1424 */
1425 static void
1426 sched_priority(struct thread *td)
1427 {
1428 int score;
1429 int pri;
1430
1431 if (td->td_pri_class != PRI_TIMESHARE)
1432 return;
1433 /*
1434 * If the score is interactive we place the thread in the realtime
1435 * queue with a priority that is less than kernel and interrupt
1436 * priorities. These threads are not subject to nice restrictions.
1437 *
1438 * Scores greater than this are placed on the normal timeshare queue
1439 * where the priority is partially decided by the most recent cpu
1440 * utilization and the rest is decided by nice value.
1441 *
1442 * The nice value of the process has a linear effect on the calculated
1443 * score. Negative nice values make it easier for a thread to be
1444 * considered interactive.
1445 */
1446 score = imax(0, sched_interact_score(td) - td->td_proc->p_nice);
1447 if (score < sched_interact) {
1448 pri = PRI_MIN_REALTIME;
1449 pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
1450 * score;
1451 KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
1452 ("sched_priority: invalid interactive priority %d score %d",
1453 pri, score));
1454 } else {
1455 pri = SCHED_PRI_MIN;
1456 if (td->td_sched->ts_ticks)
1457 pri += SCHED_PRI_TICKS(td->td_sched);
1458 pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1459 KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
1460 ("sched_priority: invalid priority %d: nice %d, "
1461 "ticks %d ftick %d ltick %d tick pri %d",
1462 pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
1463 td->td_sched->ts_ftick, td->td_sched->ts_ltick,
1464 SCHED_PRI_TICKS(td->td_sched)));
1465 }
1466 sched_user_prio(td, pri);
1467
1468 return;
1469 }
1470
1471 /*
1472 * This routine enforces a maximum limit on the amount of scheduling history
1473 * kept. It is called after either the slptime or runtime is adjusted. This
1474 * function is ugly due to integer math.
1475 */
1476 static void
1477 sched_interact_update(struct thread *td)
1478 {
1479 struct td_sched *ts;
1480 u_int sum;
1481
1482 ts = td->td_sched;
1483 sum = ts->ts_runtime + ts->ts_slptime;
1484 if (sum < SCHED_SLP_RUN_MAX)
1485 return;
1486 /*
1487 * This only happens from two places:
1488 * 1) We have added an unusual amount of run time from fork_exit.
1489 * 2) We have added an unusual amount of sleep time from sched_sleep().
1490 */
1491 if (sum > SCHED_SLP_RUN_MAX * 2) {
1492 if (ts->ts_runtime > ts->ts_slptime) {
1493 ts->ts_runtime = SCHED_SLP_RUN_MAX;
1494 ts->ts_slptime = 1;
1495 } else {
1496 ts->ts_slptime = SCHED_SLP_RUN_MAX;
1497 ts->ts_runtime = 1;
1498 }
1499 return;
1500 }
1501 /*
1502 * If we have exceeded by more than 1/5th then the algorithm below
1503 * will not bring us back into range. Dividing by two here forces
1504 * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1505 */
1506 if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1507 ts->ts_runtime /= 2;
1508 ts->ts_slptime /= 2;
1509 return;
1510 }
1511 ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1512 ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1513 }
1514
1515 /*
1516 * Scale back the interactivity history when a child thread is created. The
1517 * history is inherited from the parent but the thread may behave totally
1518 * differently. For example, a shell spawning a compiler process. We want
1519 * to learn that the compiler is behaving badly very quickly.
1520 */
1521 static void
1522 sched_interact_fork(struct thread *td)
1523 {
1524 int ratio;
1525 int sum;
1526
1527 sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
1528 if (sum > SCHED_SLP_RUN_FORK) {
1529 ratio = sum / SCHED_SLP_RUN_FORK;
1530 td->td_sched->ts_runtime /= ratio;
1531 td->td_sched->ts_slptime /= ratio;
1532 }
1533 }
1534
1535 /*
1536 * Called from proc0_init() to setup the scheduler fields.
1537 */
1538 void
1539 schedinit(void)
1540 {
1541
1542 /*
1543 * Set up the scheduler specific parts of proc0.
1544 */
1545 proc0.p_sched = NULL; /* XXX */
1546 thread0.td_sched = &td_sched0;
1547 td_sched0.ts_ltick = ticks;
1548 td_sched0.ts_ftick = ticks;
1549 td_sched0.ts_thread = &thread0;
1550 }
1551
1552 /*
1553 * This is only somewhat accurate since given many processes of the same
1554 * priority they will switch when their slices run out, which will be
1555 * at most sched_slice stathz ticks.
1556 */
1557 int
1558 sched_rr_interval(void)
1559 {
1560
1561 /* Convert sched_slice to hz */
1562 return (hz/(realstathz/sched_slice));
1563 }
1564
1565 /*
1566 * Update the percent cpu tracking information when it is requested or
1567 * the total history exceeds the maximum. We keep a sliding history of
1568 * tick counts that slowly decays. This is less precise than the 4BSD
1569 * mechanism since it happens with less regular and frequent events.
1570 */
1571 static void
1572 sched_pctcpu_update(struct td_sched *ts)
1573 {
1574
1575 if (ts->ts_ticks == 0)
1576 return;
1577 if (ticks - (hz / 10) < ts->ts_ltick &&
1578 SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
1579 return;
1580 /*
1581 * Adjust counters and watermark for pctcpu calc.
1582 */
1583 if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
1584 ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
1585 SCHED_TICK_TARG;
1586 else
1587 ts->ts_ticks = 0;
1588 ts->ts_ltick = ticks;
1589 ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
1590 }
1591
1592 /*
1593 * Adjust the priority of a thread. Move it to the appropriate run-queue
1594 * if necessary. This is the back-end for several priority related
1595 * functions.
1596 */
1597 static void
1598 sched_thread_priority(struct thread *td, u_char prio)
1599 {
1600 struct td_sched *ts;
1601
1602 CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
1603 td, td->td_proc->p_comm, td->td_priority, prio, curthread,
1604 curthread->td_proc->p_comm);
1605 ts = td->td_sched;
1606 THREAD_LOCK_ASSERT(td, MA_OWNED);
1607 if (td->td_priority == prio)
1608 return;
1609
1610 if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1611 /*
1612 * If the priority has been elevated due to priority
1613 * propagation, we may have to move ourselves to a new
1614 * queue. This could be optimized to not re-add in some
1615 * cases.
1616 */
1617 sched_rem(td);
1618 td->td_priority = prio;
1619 sched_add(td, SRQ_BORROWING);
1620 } else {
1621 #ifdef SMP
1622 struct tdq *tdq;
1623
1624 tdq = TDQ_CPU(ts->ts_cpu);
1625 if (prio < tdq->tdq_lowpri)
1626 tdq->tdq_lowpri = prio;
1627 #endif
1628 td->td_priority = prio;
1629 }
1630 }
1631
1632 /*
1633 * Update a thread's priority when it is lent another thread's
1634 * priority.
1635 */
1636 void
1637 sched_lend_prio(struct thread *td, u_char prio)
1638 {
1639
1640 td->td_flags |= TDF_BORROWING;
1641 sched_thread_priority(td, prio);
1642 }
1643
1644 /*
1645 * Restore a thread's priority when priority propagation is
1646 * over. The prio argument is the minimum priority the thread
1647 * needs to have to satisfy other possible priority lending
1648 * requests. If the thread's regular priority is less
1649 * important than prio, the thread will keep a priority boost
1650 * of prio.
1651 */
1652 void
1653 sched_unlend_prio(struct thread *td, u_char prio)
1654 {
1655 u_char base_pri;
1656
1657 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1658 td->td_base_pri <= PRI_MAX_TIMESHARE)
1659 base_pri = td->td_user_pri;
1660 else
1661 base_pri = td->td_base_pri;
1662 if (prio >= base_pri) {
1663 td->td_flags &= ~TDF_BORROWING;
1664 sched_thread_priority(td, base_pri);
1665 } else
1666 sched_lend_prio(td, prio);
1667 }
1668
1669 /*
1670 * Standard entry for setting the priority to an absolute value.
1671 */
1672 void
1673 sched_prio(struct thread *td, u_char prio)
1674 {
1675 u_char oldprio;
1676
1677 /* First, update the base priority. */
1678 td->td_base_pri = prio;
1679
1680 /*
1681 * If the thread is borrowing another thread's priority, don't
1682 * ever lower the priority.
1683 */
1684 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1685 return;
1686
1687 /* Change the real priority. */
1688 oldprio = td->td_priority;
1689 sched_thread_priority(td, prio);
1690
1691 /*
1692 * If the thread is on a turnstile, then let the turnstile update
1693 * its state.
1694 */
1695 if (TD_ON_LOCK(td) && oldprio != prio)
1696 turnstile_adjust(td, oldprio);
1697 }
1698
1699 /*
1700 * Set the base user priority, does not effect current running priority.
1701 */
1702 void
1703 sched_user_prio(struct thread *td, u_char prio)
1704 {
1705 u_char oldprio;
1706
1707 td->td_base_user_pri = prio;
1708 if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
1709 return;
1710 oldprio = td->td_user_pri;
1711 td->td_user_pri = prio;
1712 }
1713
1714 void
1715 sched_lend_user_prio(struct thread *td, u_char prio)
1716 {
1717 u_char oldprio;
1718
1719 THREAD_LOCK_ASSERT(td, MA_OWNED);
1720 td->td_flags |= TDF_UBORROWING;
1721 oldprio = td->td_user_pri;
1722 td->td_user_pri = prio;
1723 }
1724
1725 void
1726 sched_unlend_user_prio(struct thread *td, u_char prio)
1727 {
1728 u_char base_pri;
1729
1730 THREAD_LOCK_ASSERT(td, MA_OWNED);
1731 base_pri = td->td_base_user_pri;
1732 if (prio >= base_pri) {
1733 td->td_flags &= ~TDF_UBORROWING;
1734 sched_user_prio(td, base_pri);
1735 } else {
1736 sched_lend_user_prio(td, prio);
1737 }
1738 }
1739
1740 /*
1741 * Add the thread passed as 'newtd' to the run queue before selecting
1742 * the next thread to run. This is only used for KSE.
1743 */
1744 static void
1745 sched_switchin(struct tdq *tdq, struct thread *td)
1746 {
1747 #ifdef SMP
1748 spinlock_enter();
1749 TDQ_UNLOCK(tdq);
1750 thread_lock(td);
1751 spinlock_exit();
1752 sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
1753 #else
1754 td->td_lock = TDQ_LOCKPTR(tdq);
1755 #endif
1756 tdq_add(tdq, td, SRQ_YIELDING);
1757 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1758 }
1759
1760 /*
1761 * Handle migration from sched_switch(). This happens only for
1762 * cpu binding.
1763 */
1764 static struct mtx *
1765 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1766 {
1767 struct tdq *tdn;
1768
1769 tdn = TDQ_CPU(td->td_sched->ts_cpu);
1770 #ifdef SMP
1771 /*
1772 * Do the lock dance required to avoid LOR. We grab an extra
1773 * spinlock nesting to prevent preemption while we're
1774 * not holding either run-queue lock.
1775 */
1776 spinlock_enter();
1777 thread_block_switch(td); /* This releases the lock on tdq. */
1778 TDQ_LOCK(tdn);
1779 tdq_add(tdn, td, flags);
1780 tdq_notify(td->td_sched);
1781 /*
1782 * After we unlock tdn the new cpu still can't switch into this
1783 * thread until we've unblocked it in cpu_switch(). The lock
1784 * pointers may match in the case of HTT cores. Don't unlock here
1785 * or we can deadlock when the other CPU runs the IPI handler.
1786 */
1787 if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
1788 TDQ_UNLOCK(tdn);
1789 TDQ_LOCK(tdq);
1790 }
1791 spinlock_exit();
1792 #endif
1793 return (TDQ_LOCKPTR(tdn));
1794 }
1795
1796 /*
1797 * Block a thread for switching. Similar to thread_block() but does not
1798 * bump the spin count.
1799 */
1800 static inline struct mtx *
1801 thread_block_switch(struct thread *td)
1802 {
1803 struct mtx *lock;
1804
1805 THREAD_LOCK_ASSERT(td, MA_OWNED);
1806 lock = td->td_lock;
1807 td->td_lock = &blocked_lock;
1808 mtx_unlock_spin(lock);
1809
1810 return (lock);
1811 }
1812
1813 /*
1814 * Release a thread that was blocked with thread_block_switch().
1815 */
1816 static inline void
1817 thread_unblock_switch(struct thread *td, struct mtx *mtx)
1818 {
1819 atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1820 (uintptr_t)mtx);
1821 }
1822
1823 /*
1824 * Switch threads. This function has to handle threads coming in while
1825 * blocked for some reason, running, or idle. It also must deal with
1826 * migrating a thread from one queue to another as running threads may
1827 * be assigned elsewhere via binding.
1828 */
1829 void
1830 sched_switch(struct thread *td, struct thread *newtd, int flags)
1831 {
1832 struct tdq *tdq;
1833 struct td_sched *ts;
1834 struct mtx *mtx;
1835 int srqflag;
1836 int cpuid;
1837
1838 THREAD_LOCK_ASSERT(td, MA_OWNED);
1839
1840 cpuid = PCPU_GET(cpuid);
1841 tdq = TDQ_CPU(cpuid);
1842 ts = td->td_sched;
1843 mtx = td->td_lock;
1844 #ifdef SMP
1845 ts->ts_rltick = ticks;
1846 if (newtd && newtd->td_priority < tdq->tdq_lowpri)
1847 tdq->tdq_lowpri = newtd->td_priority;
1848 #endif
1849 td->td_lastcpu = td->td_oncpu;
1850 td->td_oncpu = NOCPU;
1851 td->td_flags &= ~TDF_NEEDRESCHED;
1852 td->td_owepreempt = 0;
1853 /*
1854 * The lock pointer in an idle thread should never change. Reset it
1855 * to CAN_RUN as well.
1856 */
1857 if (TD_IS_IDLETHREAD(td)) {
1858 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1859 TD_SET_CAN_RUN(td);
1860 } else if (TD_IS_RUNNING(td)) {
1861 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1862 tdq_load_rem(tdq, ts);
1863 srqflag = (flags & SW_PREEMPT) ?
1864 SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
1865 SRQ_OURSELF|SRQ_YIELDING;
1866 if (ts->ts_cpu == cpuid)
1867 tdq_add(tdq, td, srqflag);
1868 else
1869 mtx = sched_switch_migrate(tdq, td, srqflag);
1870 } else {
1871 /* This thread must be going to sleep. */
1872 TDQ_LOCK(tdq);
1873 mtx = thread_block_switch(td);
1874 tdq_load_rem(tdq, ts);
1875 }
1876 /*
1877 * We enter here with the thread blocked and assigned to the
1878 * appropriate cpu run-queue or sleep-queue and with the current
1879 * thread-queue locked.
1880 */
1881 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
1882 /*
1883 * If KSE assigned a new thread just add it here and let choosethread
1884 * select the best one.
1885 */
1886 if (newtd != NULL)
1887 sched_switchin(tdq, newtd);
1888 newtd = choosethread();
1889 /*
1890 * Call the MD code to switch contexts if necessary.
1891 */
1892 if (td != newtd) {
1893 #ifdef HWPMC_HOOKS
1894 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1895 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
1896 #endif
1897 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
1898 cpu_switch(td, newtd, mtx);
1899 /*
1900 * We may return from cpu_switch on a different cpu. However,
1901 * we always return with td_lock pointing to the current cpu's
1902 * run queue lock.
1903 */
1904 cpuid = PCPU_GET(cpuid);
1905 tdq = TDQ_CPU(cpuid);
1906 #ifdef HWPMC_HOOKS
1907 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1908 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
1909 #endif
1910 } else
1911 thread_unblock_switch(td, mtx);
1912 /*
1913 * Assert that all went well and return.
1914 */
1915 #ifdef SMP
1916 /* We should always get here with the lowest priority td possible */
1917 tdq->tdq_lowpri = td->td_priority;
1918 #endif
1919 TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
1920 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
1921 td->td_oncpu = cpuid;
1922 }
1923
1924 /*
1925 * Adjust thread priorities as a result of a nice request.
1926 */
1927 void
1928 sched_nice(struct proc *p, int nice)
1929 {
1930 struct thread *td;
1931
1932 PROC_LOCK_ASSERT(p, MA_OWNED);
1933 PROC_SLOCK_ASSERT(p, MA_OWNED);
1934
1935 p->p_nice = nice;
1936 FOREACH_THREAD_IN_PROC(p, td) {
1937 thread_lock(td);
1938 sched_priority(td);
1939 sched_prio(td, td->td_base_user_pri);
1940 thread_unlock(td);
1941 }
1942 }
1943
1944 /*
1945 * Record the sleep time for the interactivity scorer.
1946 */
1947 void
1948 sched_sleep(struct thread *td)
1949 {
1950
1951 THREAD_LOCK_ASSERT(td, MA_OWNED);
1952
1953 td->td_slptick = ticks;
1954 }
1955
1956 /*
1957 * Schedule a thread to resume execution and record how long it voluntarily
1958 * slept. We also update the pctcpu, interactivity, and priority.
1959 */
1960 void
1961 sched_wakeup(struct thread *td)
1962 {
1963 struct td_sched *ts;
1964 int slptick;
1965
1966 THREAD_LOCK_ASSERT(td, MA_OWNED);
1967 ts = td->td_sched;
1968 /*
1969 * If we slept for more than a tick update our interactivity and
1970 * priority.
1971 */
1972 slptick = td->td_slptick;
1973 td->td_slptick = 0;
1974 if (slptick && slptick != ticks) {
1975 u_int hzticks;
1976
1977 hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
1978 ts->ts_slptime += hzticks;
1979 sched_interact_update(td);
1980 sched_pctcpu_update(ts);
1981 sched_priority(td);
1982 }
1983 /* Reset the slice value after we sleep. */
1984 ts->ts_slice = sched_slice;
1985 sched_add(td, SRQ_BORING);
1986 }
1987
1988 /*
1989 * Penalize the parent for creating a new child and initialize the child's
1990 * priority.
1991 */
1992 void
1993 sched_fork(struct thread *td, struct thread *child)
1994 {
1995 THREAD_LOCK_ASSERT(td, MA_OWNED);
1996 sched_fork_thread(td, child);
1997 /*
1998 * Penalize the parent and child for forking.
1999 */
2000 sched_interact_fork(child);
2001 sched_priority(child);
2002 td->td_sched->ts_runtime += tickincr;
2003 sched_interact_update(td);
2004 sched_priority(td);
2005 }
2006
2007 /*
2008 * Fork a new thread, may be within the same process.
2009 */
2010 void
2011 sched_fork_thread(struct thread *td, struct thread *child)
2012 {
2013 struct td_sched *ts;
2014 struct td_sched *ts2;
2015
2016 /*
2017 * Initialize child.
2018 */
2019 THREAD_LOCK_ASSERT(td, MA_OWNED);
2020 sched_newthread(child);
2021 child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
2022 ts = td->td_sched;
2023 ts2 = child->td_sched;
2024 ts2->ts_cpu = ts->ts_cpu;
2025 ts2->ts_runq = NULL;
2026 /*
2027 * Grab our parents cpu estimation information and priority.
2028 */
2029 ts2->ts_ticks = ts->ts_ticks;
2030 ts2->ts_ltick = ts->ts_ltick;
2031 ts2->ts_ftick = ts->ts_ftick;
2032 child->td_user_pri = td->td_user_pri;
2033 child->td_base_user_pri = td->td_base_user_pri;
2034 /*
2035 * And update interactivity score.
2036 */
2037 ts2->ts_slptime = ts->ts_slptime;
2038 ts2->ts_runtime = ts->ts_runtime;
2039 ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
2040 }
2041
2042 /*
2043 * Adjust the priority class of a thread.
2044 */
2045 void
2046 sched_class(struct thread *td, int class)
2047 {
2048
2049 THREAD_LOCK_ASSERT(td, MA_OWNED);
2050 if (td->td_pri_class == class)
2051 return;
2052
2053 #ifdef SMP
2054 /*
2055 * On SMP if we're on the RUNQ we must adjust the transferable
2056 * count because could be changing to or from an interrupt
2057 * class.
2058 */
2059 if (TD_ON_RUNQ(td)) {
2060 struct tdq *tdq;
2061
2062 tdq = TDQ_CPU(td->td_sched->ts_cpu);
2063 if (THREAD_CAN_MIGRATE(td)) {
2064 tdq->tdq_transferable--;
2065 tdq->tdq_group->tdg_transferable--;
2066 }
2067 td->td_pri_class = class;
2068 if (THREAD_CAN_MIGRATE(td)) {
2069 tdq->tdq_transferable++;
2070 tdq->tdq_group->tdg_transferable++;
2071 }
2072 }
2073 #endif
2074 td->td_pri_class = class;
2075 }
2076
2077 /*
2078 * Return some of the child's priority and interactivity to the parent.
2079 */
2080 void
2081 sched_exit(struct proc *p, struct thread *child)
2082 {
2083 struct thread *td;
2084
2085 CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
2086 child, child->td_proc->p_comm, child->td_priority);
2087
2088 PROC_SLOCK_ASSERT(p, MA_OWNED);
2089 td = FIRST_THREAD_IN_PROC(p);
2090 sched_exit_thread(td, child);
2091 }
2092
2093 /*
2094 * Penalize another thread for the time spent on this one. This helps to
2095 * worsen the priority and interactivity of processes which schedule batch
2096 * jobs such as make. This has little effect on the make process itself but
2097 * causes new processes spawned by it to receive worse scores immediately.
2098 */
2099 void
2100 sched_exit_thread(struct thread *td, struct thread *child)
2101 {
2102
2103 CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
2104 child, child->td_proc->p_comm, child->td_priority);
2105
2106 #ifdef KSE
2107 /*
2108 * KSE forks and exits so often that this penalty causes short-lived
2109 * threads to always be non-interactive. This causes mozilla to
2110 * crawl under load.
2111 */
2112 if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
2113 return;
2114 #endif
2115 /*
2116 * Give the child's runtime to the parent without returning the
2117 * sleep time as a penalty to the parent. This causes shells that
2118 * launch expensive things to mark their children as expensive.
2119 */
2120 thread_lock(td);
2121 td->td_sched->ts_runtime += child->td_sched->ts_runtime;
2122 sched_interact_update(td);
2123 sched_priority(td);
2124 thread_unlock(td);
2125 }
2126
2127 /*
2128 * Fix priorities on return to user-space. Priorities may be elevated due
2129 * to static priorities in msleep() or similar.
2130 */
2131 void
2132 sched_userret(struct thread *td)
2133 {
2134 /*
2135 * XXX we cheat slightly on the locking here to avoid locking in
2136 * the usual case. Setting td_priority here is essentially an
2137 * incomplete workaround for not setting it properly elsewhere.
2138 * Now that some interrupt handlers are threads, not setting it
2139 * properly elsewhere can clobber it in the window between setting
2140 * it here and returning to user mode, so don't waste time setting
2141 * it perfectly here.
2142 */
2143 KASSERT((td->td_flags & TDF_BORROWING) == 0,
2144 ("thread with borrowed priority returning to userland"));
2145 if (td->td_priority != td->td_user_pri) {
2146 thread_lock(td);
2147 td->td_priority = td->td_user_pri;
2148 td->td_base_pri = td->td_user_pri;
2149 thread_unlock(td);
2150 }
2151 }
2152
2153 /*
2154 * Handle a stathz tick. This is really only relevant for timeshare
2155 * threads.
2156 */
2157 void
2158 sched_clock(struct thread *td)
2159 {
2160 struct tdq *tdq;
2161 struct td_sched *ts;
2162
2163 THREAD_LOCK_ASSERT(td, MA_OWNED);
2164 tdq = TDQ_SELF();
2165 #ifdef SMP
2166 /*
2167 * We run the long term load balancer infrequently on the first cpu.
2168 */
2169 if (balance_tdq == tdq) {
2170 if (balance_ticks && --balance_ticks == 0)
2171 sched_balance();
2172 if (balance_group_ticks && --balance_group_ticks == 0)
2173 sched_balance_groups();
2174 }
2175 #endif
2176 /*
2177 * Advance the insert index once for each tick to ensure that all
2178 * threads get a chance to run.
2179 */
2180 if (tdq->tdq_idx == tdq->tdq_ridx) {
2181 tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2182 if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2183 tdq->tdq_ridx = tdq->tdq_idx;
2184 }
2185 ts = td->td_sched;
2186 /*
2187 * We only do slicing code for TIMESHARE threads.
2188 */
2189 if (td->td_pri_class != PRI_TIMESHARE)
2190 return;
2191 /*
2192 * We used a tick; charge it to the thread so that we can compute our
2193 * interactivity.
2194 */
2195 td->td_sched->ts_runtime += tickincr;
2196 sched_interact_update(td);
2197 /*
2198 * We used up one time slice.
2199 */
2200 if (--ts->ts_slice > 0)
2201 return;
2202 /*
2203 * We're out of time, recompute priorities and requeue.
2204 */
2205 sched_priority(td);
2206 td->td_flags |= TDF_NEEDRESCHED;
2207 }
2208
2209 /*
2210 * Called once per hz tick. Used for cpu utilization information. This
2211 * is easier than trying to scale based on stathz.
2212 */
2213 void
2214 sched_tick(void)
2215 {
2216 struct td_sched *ts;
2217
2218 ts = curthread->td_sched;
2219 /* Adjust ticks for pctcpu */
2220 ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
2221 ts->ts_ltick = ticks;
2222 /*
2223 * Update if we've exceeded our desired tick threshhold by over one
2224 * second.
2225 */
2226 if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
2227 sched_pctcpu_update(ts);
2228 }
2229
2230 /*
2231 * Return whether the current CPU has runnable tasks. Used for in-kernel
2232 * cooperative idle threads.
2233 */
2234 int
2235 sched_runnable(void)
2236 {
2237 struct tdq *tdq;
2238 int load;
2239
2240 load = 1;
2241
2242 tdq = TDQ_SELF();
2243 if ((curthread->td_flags & TDF_IDLETD) != 0) {
2244 if (tdq->tdq_load > 0)
2245 goto out;
2246 } else
2247 if (tdq->tdq_load - 1 > 0)
2248 goto out;
2249 load = 0;
2250 out:
2251 return (load);
2252 }
2253
2254 /*
2255 * Choose the highest priority thread to run. The thread is removed from
2256 * the run-queue while running however the load remains. For SMP we set
2257 * the tdq in the global idle bitmask if it idles here.
2258 */
2259 struct thread *
2260 sched_choose(void)
2261 {
2262 #ifdef SMP
2263 struct tdq_group *tdg;
2264 #endif
2265 struct td_sched *ts;
2266 struct tdq *tdq;
2267
2268 tdq = TDQ_SELF();
2269 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2270 ts = tdq_choose(tdq);
2271 if (ts) {
2272 tdq_runq_rem(tdq, ts);
2273 return (ts->ts_thread);
2274 }
2275 #ifdef SMP
2276 /*
2277 * We only set the idled bit when all of the cpus in the group are
2278 * idle. Otherwise we could get into a situation where a thread bounces
2279 * back and forth between two idle cores on seperate physical CPUs.
2280 */
2281 tdg = tdq->tdq_group;
2282 tdg->tdg_idlemask |= PCPU_GET(cpumask);
2283 if (tdg->tdg_idlemask == tdg->tdg_cpumask)
2284 atomic_set_int(&tdq_idle, tdg->tdg_mask);
2285 tdq->tdq_lowpri = PRI_MAX_IDLE;
2286 #endif
2287 return (PCPU_GET(idlethread));
2288 }
2289
2290 /*
2291 * Set owepreempt if necessary. Preemption never happens directly in ULE,
2292 * we always request it once we exit a critical section.
2293 */
2294 static inline void
2295 sched_setpreempt(struct thread *td)
2296 {
2297 struct thread *ctd;
2298 int cpri;
2299 int pri;
2300
2301 ctd = curthread;
2302 pri = td->td_priority;
2303 cpri = ctd->td_priority;
2304 if (td->td_priority < ctd->td_priority)
2305 curthread->td_flags |= TDF_NEEDRESCHED;
2306 if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2307 return;
2308 /*
2309 * Always preempt IDLE threads. Otherwise only if the preempting
2310 * thread is an ithread.
2311 */
2312 if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
2313 return;
2314 ctd->td_owepreempt = 1;
2315 return;
2316 }
2317
2318 /*
2319 * Add a thread to a thread queue. Initializes priority, slice, runq, and
2320 * add it to the appropriate queue. This is the internal function called
2321 * when the tdq is predetermined.
2322 */
2323 void
2324 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2325 {
2326 struct td_sched *ts;
2327 int class;
2328 #ifdef SMP
2329 int cpumask;
2330 #endif
2331
2332 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2333 KASSERT((td->td_inhibitors == 0),
2334 ("sched_add: trying to run inhibited thread"));
2335 KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2336 ("sched_add: bad thread state"));
2337 KASSERT(td->td_flags & TDF_INMEM,
2338 ("sched_add: thread swapped out"));
2339
2340 ts = td->td_sched;
2341 class = PRI_BASE(td->td_pri_class);
2342 TD_SET_RUNQ(td);
2343 if (ts->ts_slice == 0)
2344 ts->ts_slice = sched_slice;
2345 /*
2346 * Pick the run queue based on priority.
2347 */
2348 if (td->td_priority <= PRI_MAX_REALTIME)
2349 ts->ts_runq = &tdq->tdq_realtime;
2350 else if (td->td_priority <= PRI_MAX_TIMESHARE)
2351 ts->ts_runq = &tdq->tdq_timeshare;
2352 else
2353 ts->ts_runq = &tdq->tdq_idle;
2354 #ifdef SMP
2355 cpumask = 1 << ts->ts_cpu;
2356 /*
2357 * If we had been idle, clear our bit in the group and potentially
2358 * the global bitmap.
2359 */
2360 if ((class != PRI_IDLE && class != PRI_ITHD) &&
2361 (tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
2362 /*
2363 * Check to see if our group is unidling, and if so, remove it
2364 * from the global idle mask.
2365 */
2366 if (tdq->tdq_group->tdg_idlemask ==
2367 tdq->tdq_group->tdg_cpumask)
2368 atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
2369 /*
2370 * Now remove ourselves from the group specific idle mask.
2371 */
2372 tdq->tdq_group->tdg_idlemask &= ~cpumask;
2373 }
2374 if (td->td_priority < tdq->tdq_lowpri)
2375 tdq->tdq_lowpri = td->td_priority;
2376 #endif
2377 tdq_runq_add(tdq, ts, flags);
2378 tdq_load_add(tdq, ts);
2379 }
2380
2381 /*
2382 * Select the target thread queue and add a thread to it. Request
2383 * preemption or IPI a remote processor if required.
2384 */
2385 void
2386 sched_add(struct thread *td, int flags)
2387 {
2388 struct td_sched *ts;
2389 struct tdq *tdq;
2390 #ifdef SMP
2391 int cpuid;
2392 int cpu;
2393 #endif
2394 CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
2395 td, td->td_proc->p_comm, td->td_priority, curthread,
2396 curthread->td_proc->p_comm);
2397 THREAD_LOCK_ASSERT(td, MA_OWNED);
2398 ts = td->td_sched;
2399 /*
2400 * Recalculate the priority before we select the target cpu or
2401 * run-queue.
2402 */
2403 if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2404 sched_priority(td);
2405 #ifdef SMP
2406 cpuid = PCPU_GET(cpuid);
2407 /*
2408 * Pick the destination cpu and if it isn't ours transfer to the
2409 * target cpu.
2410 */
2411 if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td))
2412 cpu = cpuid;
2413 else if (!THREAD_CAN_MIGRATE(td))
2414 cpu = ts->ts_cpu;
2415 else
2416 cpu = sched_pickcpu(ts, flags);
2417 tdq = sched_setcpu(ts, cpu, flags);
2418 tdq_add(tdq, td, flags);
2419 if (cpu != cpuid) {
2420 tdq_notify(ts);
2421 return;
2422 }
2423 #else
2424 tdq = TDQ_SELF();
2425 TDQ_LOCK(tdq);
2426 /*
2427 * Now that the thread is moving to the run-queue, set the lock
2428 * to the scheduler's lock.
2429 */
2430 thread_lock_set(td, TDQ_LOCKPTR(tdq));
2431 tdq_add(tdq, td, flags);
2432 #endif
2433 if (!(flags & SRQ_YIELDING))
2434 sched_setpreempt(td);
2435 }
2436
2437 /*
2438 * Remove a thread from a run-queue without running it. This is used
2439 * when we're stealing a thread from a remote queue. Otherwise all threads
2440 * exit by calling sched_exit_thread() and sched_throw() themselves.
2441 */
2442 void
2443 sched_rem(struct thread *td)
2444 {
2445 struct tdq *tdq;
2446 struct td_sched *ts;
2447
2448 CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
2449 td, td->td_proc->p_comm, td->td_priority, curthread,
2450 curthread->td_proc->p_comm);
2451 ts = td->td_sched;
2452 tdq = TDQ_CPU(ts->ts_cpu);
2453 TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2454 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2455 KASSERT(TD_ON_RUNQ(td),
2456 ("sched_rem: thread not on run queue"));
2457 tdq_runq_rem(tdq, ts);
2458 tdq_load_rem(tdq, ts);
2459 TD_SET_CAN_RUN(td);
2460 }
2461
2462 /*
2463 * Fetch cpu utilization information. Updates on demand.
2464 */
2465 fixpt_t
2466 sched_pctcpu(struct thread *td)
2467 {
2468 fixpt_t pctcpu;
2469 struct td_sched *ts;
2470
2471 pctcpu = 0;
2472 ts = td->td_sched;
2473 if (ts == NULL)
2474 return (0);
2475
2476 thread_lock(td);
2477 if (ts->ts_ticks) {
2478 int rtick;
2479
2480 sched_pctcpu_update(ts);
2481 /* How many rtick per second ? */
2482 rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2483 pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2484 }
2485 thread_unlock(td);
2486
2487 return (pctcpu);
2488 }
2489
2490 /*
2491 * Bind a thread to a target cpu.
2492 */
2493 void
2494 sched_bind(struct thread *td, int cpu)
2495 {
2496 struct td_sched *ts;
2497
2498 THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2499 ts = td->td_sched;
2500 if (ts->ts_flags & TSF_BOUND)
2501 sched_unbind(td);
2502 ts->ts_flags |= TSF_BOUND;
2503 #ifdef SMP
2504 sched_pin();
2505 if (PCPU_GET(cpuid) == cpu)
2506 return;
2507 ts->ts_cpu = cpu;
2508 /* When we return from mi_switch we'll be on the correct cpu. */
2509 mi_switch(SW_VOL, NULL);
2510 #endif
2511 }
2512
2513 /*
2514 * Release a bound thread.
2515 */
2516 void
2517 sched_unbind(struct thread *td)
2518 {
2519 struct td_sched *ts;
2520
2521 THREAD_LOCK_ASSERT(td, MA_OWNED);
2522 ts = td->td_sched;
2523 if ((ts->ts_flags & TSF_BOUND) == 0)
2524 return;
2525 ts->ts_flags &= ~TSF_BOUND;
2526 #ifdef SMP
2527 sched_unpin();
2528 #endif
2529 }
2530
2531 int
2532 sched_is_bound(struct thread *td)
2533 {
2534 THREAD_LOCK_ASSERT(td, MA_OWNED);
2535 return (td->td_sched->ts_flags & TSF_BOUND);
2536 }
2537
2538 /*
2539 * Basic yield call.
2540 */
2541 void
2542 sched_relinquish(struct thread *td)
2543 {
2544 thread_lock(td);
2545 SCHED_STAT_INC(switch_relinquish);
2546 mi_switch(SW_VOL, NULL);
2547 thread_unlock(td);
2548 }
2549
2550 /*
2551 * Return the total system load.
2552 */
2553 int
2554 sched_load(void)
2555 {
2556 #ifdef SMP
2557 int total;
2558 int i;
2559
2560 total = 0;
2561 for (i = 0; i <= tdg_maxid; i++)
2562 total += TDQ_GROUP(i)->tdg_load;
2563 return (total);
2564 #else
2565 return (TDQ_SELF()->tdq_sysload);
2566 #endif
2567 }
2568
2569 int
2570 sched_sizeof_proc(void)
2571 {
2572 return (sizeof(struct proc));
2573 }
2574
2575 int
2576 sched_sizeof_thread(void)
2577 {
2578 return (sizeof(struct thread) + sizeof(struct td_sched));
2579 }
2580
2581 /*
2582 * The actual idle process.
2583 */
2584 void
2585 sched_idletd(void *dummy)
2586 {
2587 struct thread *td;
2588 struct tdq *tdq;
2589
2590 td = curthread;
2591 tdq = TDQ_SELF();
2592 mtx_assert(&Giant, MA_NOTOWNED);
2593 /* ULE relies on preemption for idle interruption. */
2594 for (;;) {
2595 #ifdef SMP
2596 if (tdq_idled(tdq))
2597 cpu_idle();
2598 #else
2599 cpu_idle();
2600 #endif
2601 }
2602 }
2603
2604 /*
2605 * A CPU is entering for the first time or a thread is exiting.
2606 */
2607 void
2608 sched_throw(struct thread *td)
2609 {
2610 struct thread *newtd;
2611 struct tdq *tdq;
2612
2613 tdq = TDQ_SELF();
2614 if (td == NULL) {
2615 /* Correct spinlock nesting and acquire the correct lock. */
2616 TDQ_LOCK(tdq);
2617 spinlock_exit();
2618 } else {
2619 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2620 tdq_load_rem(tdq, td->td_sched);
2621 }
2622 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2623 newtd = choosethread();
2624 TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2625 PCPU_SET(switchtime, cpu_ticks());
2626 PCPU_SET(switchticks, ticks);
2627 cpu_throw(td, newtd); /* doesn't return */
2628 }
2629
2630 /*
2631 * This is called from fork_exit(). Just acquire the correct locks and
2632 * let fork do the rest of the work.
2633 */
2634 void
2635 sched_fork_exit(struct thread *td)
2636 {
2637 struct td_sched *ts;
2638 struct tdq *tdq;
2639 int cpuid;
2640
2641 /*
2642 * Finish setting up thread glue so that it begins execution in a
2643 * non-nested critical section with the scheduler lock held.
2644 */
2645 cpuid = PCPU_GET(cpuid);
2646 tdq = TDQ_CPU(cpuid);
2647 ts = td->td_sched;
2648 if (TD_IS_IDLETHREAD(td))
2649 td->td_lock = TDQ_LOCKPTR(tdq);
2650 MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2651 td->td_oncpu = cpuid;
2652 TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2653 }
2654
2655 static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
2656 "Scheduler");
2657 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
2658 "Scheduler name");
2659 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
2660 "Slice size for timeshare threads");
2661 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
2662 "Interactivity score threshold");
2663 SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
2664 0,"Min priority for preemption, lower priorities have greater precedence");
2665 #ifdef SMP
2666 SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
2667 "Pick the target cpu based on priority rather than load.");
2668 SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
2669 "Number of hz ticks to keep thread affinity for");
2670 SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
2671 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
2672 "Enables the long-term load balancer");
2673 SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
2674 &balance_interval, 0,
2675 "Average frequency in stathz ticks to run the long-term balancer");
2676 SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
2677 "Steals work from another hyper-threaded core on idle");
2678 SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
2679 "Attempts to steal work from other cores before idling");
2680 SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
2681 "Minimum load on remote cpu before we'll steal");
2682 SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
2683 "True when a topology has been specified by the MD code.");
2684 #endif
2685
2686 /* ps compat. All cpu percentages from ULE are weighted. */
2687 static int ccpu = 0;
2688 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
2689
2690
2691 #define KERN_SWITCH_INCLUDE 1
2692 #include "kern/kern_switch.c"
Cache object: ad2ec52a95ed4137c5be2cffb5f13f4b
|