1 /*
2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Sergey Glushchenko <deen@smz.com.ua>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * $DragonFly: src/sys/kern/kern_usched.c,v 1.9 2007/07/02 17:06:55 dillon Exp $
35 */
36
37 #include <sys/errno.h>
38 #include <sys/globaldata.h> /* curthread */
39 #include <sys/proc.h>
40 #include <sys/priv.h>
41 #include <sys/sysproto.h> /* struct usched_set_args */
42 #include <sys/systm.h> /* strcmp() */
43 #include <sys/usched.h>
44
45 #include <sys/mplock2.h>
46
47 #include <machine/smp.h>
48
49 static TAILQ_HEAD(, usched) usched_list = TAILQ_HEAD_INITIALIZER(usched_list);
50
51 cpumask_t usched_mastermask = -1;
52
53 /*
54 * Called from very low level boot code, i386/i386/machdep.c/init386().
55 * We cannot do anything fancy. no malloc's, no nothing other then
56 * static initialization.
57 */
58 struct usched *
59 usched_init(void)
60 {
61 const char *defsched;
62
63 defsched = kgetenv("kern.user_scheduler");
64
65 /*
66 * Add various userland schedulers to the system.
67 */
68 usched_ctl(&usched_bsd4, USCH_ADD);
69 usched_ctl(&usched_dfly, USCH_ADD);
70 usched_ctl(&usched_dummy, USCH_ADD);
71 if (defsched == NULL )
72 return(&usched_dfly);
73 if (strcmp(defsched, "bsd4") == 0)
74 return(&usched_bsd4);
75 if (strcmp(defsched, "dfly") == 0)
76 return(&usched_dfly);
77 kprintf("WARNING: Running dummy userland scheduler\n");
78 return(&usched_dummy);
79 }
80
81 /*
82 * USCHED_CTL
83 *
84 * SYNOPSIS:
85 * Add/remove usched to/from list.
86 *
87 * ARGUMENTS:
88 * usched - pointer to target scheduler
89 * action - addition or removal ?
90 *
91 * RETURN VALUES:
92 * 0 - success
93 * EINVAL - error
94 */
95 int
96 usched_ctl(struct usched *usched, int action)
97 {
98 struct usched *item; /* temporaly for TAILQ processing */
99 int error = 0;
100
101 switch(action) {
102 case USCH_ADD:
103 /*
104 * Make sure it isn't already on the list
105 */
106 #ifdef INVARIANTS
107 TAILQ_FOREACH(item, &usched_list, entry) {
108 KKASSERT(item != usched);
109 }
110 #endif
111 /*
112 * Optional callback to the scheduler before we officially
113 * add it to the list.
114 */
115 if (usched->usched_register)
116 usched->usched_register();
117 TAILQ_INSERT_TAIL(&usched_list, usched, entry);
118 break;
119 case USCH_REM:
120 /*
121 * Do not allow the default scheduler to be removed
122 */
123 if (strcmp(usched->name, "bsd4") == 0) {
124 error = EINVAL;
125 break;
126 }
127 TAILQ_FOREACH(item, &usched_list, entry) {
128 if (item == usched)
129 break;
130 }
131 if (item) {
132 if (item->usched_unregister)
133 item->usched_unregister();
134 TAILQ_REMOVE(&usched_list, item, entry);
135 } else {
136 error = EINVAL;
137 }
138 break;
139 default:
140 error = EINVAL;
141 break;
142 }
143 return (error);
144 }
145
146 /*
147 * Called from the scheduler clock on each cpu independently at the
148 * common scheduling rate. If th scheduler clock interrupted a running
149 * lwp the lp will be non-NULL.
150 */
151 void
152 usched_schedulerclock(struct lwp *lp, sysclock_t periodic, sysclock_t time)
153 {
154 struct usched *item;
155
156 TAILQ_FOREACH(item, &usched_list, entry) {
157 if (lp && lp->lwp_proc->p_usched == item)
158 item->schedulerclock(lp, periodic, time);
159 else
160 item->schedulerclock(NULL, periodic, time);
161 }
162 }
163
164 /*
165 * USCHED_SET(syscall)
166 *
167 * SYNOPSIS:
168 * Setting up a proc's usched.
169 *
170 * ARGUMENTS:
171 * pid -
172 * cmd -
173 * data -
174 * bytes -
175 * RETURN VALUES:
176 * 0 - success
177 * EINVAL - error
178 *
179 * MPALMOSTSAFE
180 */
181 int
182 sys_usched_set(struct usched_set_args *uap)
183 {
184 struct proc *p = curthread->td_proc;
185 struct usched *item; /* temporaly for TAILQ processing */
186 int error;
187 char buffer[NAME_LENGTH];
188 cpumask_t mask;
189 struct lwp *lp;
190 int cpuid;
191
192 if (uap->pid != 0 && uap->pid != curthread->td_proc->p_pid)
193 return (EINVAL);
194
195 lp = curthread->td_lwp;
196 get_mplock();
197
198 switch (uap->cmd) {
199 case USCHED_SET_SCHEDULER:
200 if ((error = priv_check(curthread, PRIV_SCHED_SET)) != 0)
201 break;
202 error = copyinstr(uap->data, buffer, sizeof(buffer), NULL);
203 if (error)
204 break;
205 TAILQ_FOREACH(item, &usched_list, entry) {
206 if ((strcmp(item->name, buffer) == 0))
207 break;
208 }
209
210 /*
211 * If the scheduler for a process is being changed, disassociate
212 * the old scheduler before switching to the new one.
213 *
214 * XXX we might have to add an additional ABI call to do a 'full
215 * disassociation' and another ABI call to do a 'full
216 * reassociation'
217 */
218 /* XXX lwp have to deal with multiple lwps here */
219 if (p->p_nthreads != 1) {
220 error = EINVAL;
221 break;
222 }
223 if (item && item != p->p_usched) {
224 /* XXX lwp */
225 p->p_usched->release_curproc(ONLY_LWP_IN_PROC(p));
226 p->p_usched->heuristic_exiting(ONLY_LWP_IN_PROC(p), p);
227 p->p_usched = item;
228 } else if (item == NULL) {
229 error = EINVAL;
230 }
231 break;
232 case USCHED_SET_CPU:
233 if ((error = priv_check(curthread, PRIV_SCHED_CPUSET)) != 0)
234 break;
235 if (uap->bytes != sizeof(int)) {
236 error = EINVAL;
237 break;
238 }
239 error = copyin(uap->data, &cpuid, sizeof(int));
240 if (error)
241 break;
242 if (cpuid < 0 || cpuid >= ncpus) {
243 error = EFBIG;
244 break;
245 }
246 if ((smp_active_mask & CPUMASK(cpuid)) == 0) {
247 error = EINVAL;
248 break;
249 }
250 lp->lwp_cpumask = CPUMASK(cpuid);
251 if (cpuid != mycpu->gd_cpuid) {
252 lwkt_migratecpu(cpuid);
253 p->p_usched->changedcpu(lp);
254 }
255 break;
256 case USCHED_GET_CPU:
257 /* USCHED_GET_CPU doesn't require special privileges. */
258 if (uap->bytes != sizeof(int)) {
259 error = EINVAL;
260 break;
261 }
262 error = copyout(&(mycpu->gd_cpuid), uap->data, sizeof(int));
263 break;
264 case USCHED_ADD_CPU:
265 if ((error = priv_check(curthread, PRIV_SCHED_CPUSET)) != 0)
266 break;
267 if (uap->bytes != sizeof(int)) {
268 error = EINVAL;
269 break;
270 }
271 error = copyin(uap->data, &cpuid, sizeof(int));
272 if (error)
273 break;
274 if (cpuid < 0 || cpuid >= ncpus) {
275 error = EFBIG;
276 break;
277 }
278 if (!(smp_active_mask & CPUMASK(cpuid))) {
279 error = EINVAL;
280 break;
281 }
282 lp->lwp_cpumask |= CPUMASK(cpuid);
283 break;
284 case USCHED_DEL_CPU:
285 /* USCHED_DEL_CPU doesn't require special privileges. */
286 if (uap->bytes != sizeof(int)) {
287 error = EINVAL;
288 break;
289 }
290 error = copyin(uap->data, &cpuid, sizeof(int));
291 if (error)
292 break;
293 if (cpuid < 0 || cpuid >= ncpus) {
294 error = EFBIG;
295 break;
296 }
297 lp = curthread->td_lwp;
298 mask = lp->lwp_cpumask & smp_active_mask & ~CPUMASK(cpuid);
299 if (mask == 0)
300 error = EPERM;
301 else {
302 lp->lwp_cpumask &= ~CPUMASK(cpuid);
303 if ((lp->lwp_cpumask & mycpu->gd_cpumask) == 0) {
304 cpuid = BSFCPUMASK(lp->lwp_cpumask &
305 smp_active_mask);
306 lwkt_migratecpu(cpuid);
307 p->p_usched->changedcpu(lp);
308 }
309 }
310 break;
311 default:
312 error = EINVAL;
313 break;
314 }
315 rel_mplock();
316 return (error);
317 }
318
Cache object: 2d995eab8ab1798a37ef657e92be3de0
|