FreeBSD/Linux Kernel Cross Reference
sys/sys/sched.h
1 /*-
2 * SPDX-License-Identifier: (BSD-4-Clause AND BSD-2-Clause-FreeBSD)
3 *
4 * Copyright (c) 1996, 1997
5 * HD Associates, Inc. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by HD Associates, Inc
18 * and Jukka Antero Ukkonen.
19 * 4. Neither the name of the author nor the names of any co-contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY HD ASSOCIATES AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL HD ASSOCIATES OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 /*-
37 * Copyright (c) 2002-2008, Jeffrey Roberson <jeff@freebsd.org>
38 * All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice unmodified, this list of conditions, and the following
45 * disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 * $FreeBSD$
62 */
63
64 #ifndef _SCHED_H_
65 #define _SCHED_H_
66
67 #ifdef _KERNEL
68 /*
69 * General scheduling info.
70 *
71 * sched_load:
72 * Total runnable non-ithread threads in the system.
73 *
74 * sched_runnable:
75 * Runnable threads for this processor.
76 */
77 int sched_load(void);
78 int sched_rr_interval(void);
79 int sched_runnable(void);
80
81 /*
82 * Proc related scheduling hooks.
83 */
84 void sched_exit(struct proc *p, struct thread *childtd);
85 void sched_fork(struct thread *td, struct thread *childtd);
86 void sched_fork_exit(struct thread *td);
87 void sched_class(struct thread *td, int class);
88 void sched_nice(struct proc *p, int nice);
89
90 /*
91 * Threads are switched in and out, block on resources, have temporary
92 * priorities inherited from their procs, and use up cpu time.
93 */
94 void sched_ap_entry(void);
95 void sched_exit_thread(struct thread *td, struct thread *child);
96 u_int sched_estcpu(struct thread *td);
97 void sched_fork_thread(struct thread *td, struct thread *child);
98 void sched_ithread_prio(struct thread *td, u_char prio);
99 void sched_lend_prio(struct thread *td, u_char prio);
100 void sched_lend_user_prio(struct thread *td, u_char pri);
101 void sched_lend_user_prio_cond(struct thread *td, u_char pri);
102 fixpt_t sched_pctcpu(struct thread *td);
103 void sched_prio(struct thread *td, u_char prio);
104 void sched_sleep(struct thread *td, int prio);
105 void sched_switch(struct thread *td, int flags);
106 void sched_throw(struct thread *td);
107 void sched_unlend_prio(struct thread *td, u_char prio);
108 void sched_user_prio(struct thread *td, u_char prio);
109 void sched_userret_slowpath(struct thread *td);
110 #ifdef RACCT
111 #ifdef SCHED_4BSD
112 fixpt_t sched_pctcpu_delta(struct thread *td);
113 #endif
114 #endif
115
116 static inline void
117 sched_userret(struct thread *td)
118 {
119
120 /*
121 * XXX we cheat slightly on the locking here to avoid locking in
122 * the usual case. Setting td_priority here is essentially an
123 * incomplete workaround for not setting it properly elsewhere.
124 * Now that some interrupt handlers are threads, not setting it
125 * properly elsewhere can clobber it in the window between setting
126 * it here and returning to user mode, so don't waste time setting
127 * it perfectly here.
128 */
129 KASSERT((td->td_flags & TDF_BORROWING) == 0,
130 ("thread with borrowed priority returning to userland"));
131 if (__predict_false(td->td_priority != td->td_user_pri))
132 sched_userret_slowpath(td);
133 }
134
135 /*
136 * Threads are moved on and off of run queues
137 */
138 void sched_add(struct thread *td, int flags);
139 struct thread *sched_choose(void);
140 void sched_clock(struct thread *td, int cnt);
141 void sched_idletd(void *);
142 void sched_preempt(struct thread *td);
143 void sched_relinquish(struct thread *td);
144 void sched_rem(struct thread *td);
145 void sched_wakeup(struct thread *td, int srqflags);
146
147 /*
148 * Binding makes cpu affinity permanent while pinning is used to temporarily
149 * hold a thread on a particular CPU.
150 */
151 void sched_bind(struct thread *td, int cpu);
152 static __inline void sched_pin(void);
153 void sched_unbind(struct thread *td);
154 static __inline void sched_unpin(void);
155 int sched_is_bound(struct thread *td);
156 void sched_affinity(struct thread *td);
157
158 /*
159 * These procedures tell the process data structure allocation code how
160 * many bytes to actually allocate.
161 */
162 int sched_sizeof_proc(void);
163 int sched_sizeof_thread(void);
164
165 /*
166 * This routine provides a consistent thread name for use with KTR graphing
167 * functions.
168 */
169 char *sched_tdname(struct thread *td);
170 #ifdef KTR
171 void sched_clear_tdname(struct thread *td);
172 #endif
173
174 static __inline void
175 sched_pin(void)
176 {
177 curthread->td_pinned++;
178 atomic_interrupt_fence();
179 }
180
181 static __inline void
182 sched_unpin(void)
183 {
184 atomic_interrupt_fence();
185 curthread->td_pinned--;
186 }
187
188 /* sched_add arguments (formerly setrunqueue) */
189 #define SRQ_BORING 0x0000 /* No special circumstances. */
190 #define SRQ_YIELDING 0x0001 /* We are yielding (from mi_switch). */
191 #define SRQ_OURSELF 0x0002 /* It is ourself (from mi_switch). */
192 #define SRQ_INTR 0x0004 /* It is probably urgent. */
193 #define SRQ_PREEMPTED 0x0008 /* has been preempted.. be kind */
194 #define SRQ_BORROWING 0x0010 /* Priority updated due to prio_lend */
195 #define SRQ_HOLD 0x0020 /* Return holding original td lock */
196 #define SRQ_HOLDTD 0x0040 /* Return holding td lock */
197
198 /* Scheduler stats. */
199 #ifdef SCHED_STATS
200 DPCPU_DECLARE(long, sched_switch_stats[SWT_COUNT]);
201
202 #define SCHED_STAT_DEFINE_VAR(name, ptr, descr) \
203 static void name ## _add_proc(void *dummy __unused) \
204 { \
205 \
206 SYSCTL_ADD_PROC(NULL, \
207 SYSCTL_STATIC_CHILDREN(_kern_sched_stats), OID_AUTO, \
208 #name, CTLTYPE_LONG|CTLFLAG_RD|CTLFLAG_MPSAFE, \
209 ptr, 0, sysctl_dpcpu_long, "LU", descr); \
210 } \
211 SYSINIT(name, SI_SUB_LAST, SI_ORDER_MIDDLE, name ## _add_proc, NULL);
212
213 #define SCHED_STAT_DEFINE(name, descr) \
214 DPCPU_DEFINE(unsigned long, name); \
215 SCHED_STAT_DEFINE_VAR(name, &DPCPU_NAME(name), descr)
216 /*
217 * Sched stats are always incremented in critical sections so no atomic
218 * is necesssary to increment them.
219 */
220 #define SCHED_STAT_INC(var) DPCPU_GET(var)++;
221 #else
222 #define SCHED_STAT_DEFINE_VAR(name, descr, ptr)
223 #define SCHED_STAT_DEFINE(name, descr)
224 #define SCHED_STAT_INC(var) (void)0
225 #endif
226
227 /*
228 * Fixup scheduler state for proc0 and thread0
229 */
230 void schedinit(void);
231
232 /*
233 * Fixup scheduler state for secondary APs
234 */
235 void schedinit_ap(void);
236 #endif /* _KERNEL */
237
238 /* POSIX 1003.1b Process Scheduling */
239
240 /*
241 * POSIX scheduling policies
242 */
243 #define SCHED_FIFO 1
244 #define SCHED_OTHER 2
245 #define SCHED_RR 3
246
247 struct sched_param {
248 int sched_priority;
249 };
250
251 /*
252 * POSIX scheduling declarations for userland.
253 */
254 #ifndef _KERNEL
255 #include <sys/cdefs.h>
256 #include <sys/_timespec.h>
257 #include <sys/_types.h>
258
259 #ifndef _PID_T_DECLARED
260 typedef __pid_t pid_t;
261 #define _PID_T_DECLARED
262 #endif
263
264 __BEGIN_DECLS
265 int sched_get_priority_max(int);
266 int sched_get_priority_min(int);
267 int sched_getparam(pid_t, struct sched_param *);
268 int sched_getscheduler(pid_t);
269 int sched_rr_get_interval(pid_t, struct timespec *);
270 int sched_setparam(pid_t, const struct sched_param *);
271 int sched_setscheduler(pid_t, int, const struct sched_param *);
272 int sched_yield(void);
273 __END_DECLS
274
275 #endif
276 #endif /* !_SCHED_H_ */
Cache object: cc29ace77813855ca7929df3b738fc92
|