1 /*-
2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Copyright (c) 2008 Nokia Corporation
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/7.3/sys/kern/kern_cpuset.c 196545 2009-08-25 15:58:07Z bz $");
33
34 #include "opt_ddb.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/priv.h>
44 #include <sys/proc.h>
45 #include <sys/refcount.h>
46 #include <sys/sched.h>
47 #include <sys/smp.h>
48 #include <sys/syscallsubr.h>
49 #include <sys/cpuset.h>
50 #include <sys/sx.h>
51 #include <sys/refcount.h>
52 #include <sys/queue.h>
53 #include <sys/limits.h>
54 #include <sys/bus.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h> /* Must come after sys/proc.h */
57
58 #include <vm/uma.h>
59
60 #ifdef DDB
61 #include <ddb/ddb.h>
62 #endif /* DDB */
63
64 /*
65 * cpusets provide a mechanism for creating and manipulating sets of
66 * processors for the purpose of constraining the scheduling of threads to
67 * specific processors.
68 *
69 * Each process belongs to an identified set, by default this is set 1. Each
70 * thread may further restrict the cpus it may run on to a subset of this
71 * named set. This creates an anonymous set which other threads and processes
72 * may not join by number.
73 *
74 * The named set is referred to herein as the 'base' set to avoid ambiguity.
75 * This set is usually a child of a 'root' set while the anonymous set may
76 * simply be referred to as a mask. In the syscall api these are referred to
77 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
78 *
79 * Threads inherit their set from their creator whether it be anonymous or
80 * not. This means that anonymous sets are immutable because they may be
81 * shared. To modify an anonymous set a new set is created with the desired
82 * mask and the same parent as the existing anonymous set. This gives the
83 * illusion of each thread having a private mask.A
84 *
85 * Via the syscall apis a user may ask to retrieve or modify the root, base,
86 * or mask that is discovered via a pid, tid, or setid. Modifying a set
87 * modifies all numbered and anonymous child sets to comply with the new mask.
88 * Modifying a pid or tid's mask applies only to that tid but must still
89 * exist within the assigned parent set.
90 *
91 * A thread may not be assigned to a a group seperate from other threads in
92 * the process. This is to remove ambiguity when the setid is queried with
93 * a pid argument. There is no other technical limitation.
94 *
95 * This somewhat complex arrangement is intended to make it easy for
96 * applications to query available processors and bind their threads to
97 * specific processors while also allowing administrators to dynamically
98 * reprovision by changing sets which apply to groups of processes.
99 *
100 * A simple application should not concern itself with sets at all and
101 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
102 * meaning 'curthread'. It may query availble cpus for that tid with a
103 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
104 */
105 static uma_zone_t cpuset_zone;
106 static struct mtx cpuset_lock;
107 static struct setlist cpuset_ids;
108 static struct unrhdr *cpuset_unr;
109 static struct cpuset *cpuset_zero;
110
111 cpuset_t *cpuset_root;
112
113 /*
114 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
115 */
116 struct cpuset *
117 cpuset_ref(struct cpuset *set)
118 {
119
120 refcount_acquire(&set->cs_ref);
121 return (set);
122 }
123
124 /*
125 * Walks up the tree from 'set' to find the root. Returns the root
126 * referenced.
127 */
128 static struct cpuset *
129 cpuset_refroot(struct cpuset *set)
130 {
131
132 for (; set->cs_parent != NULL; set = set->cs_parent)
133 if (set->cs_flags & CPU_SET_ROOT)
134 break;
135 cpuset_ref(set);
136
137 return (set);
138 }
139
140 /*
141 * Find the first non-anonymous set starting from 'set'. Returns this set
142 * referenced. May return the passed in set with an extra ref if it is
143 * not anonymous.
144 */
145 static struct cpuset *
146 cpuset_refbase(struct cpuset *set)
147 {
148
149 if (set->cs_id == CPUSET_INVALID)
150 set = set->cs_parent;
151 cpuset_ref(set);
152
153 return (set);
154 }
155
156 /*
157 * Release a reference in a context where it is safe to allocte.
158 */
159 void
160 cpuset_rel(struct cpuset *set)
161 {
162 cpusetid_t id;
163
164 if (refcount_release(&set->cs_ref) == 0)
165 return;
166 mtx_lock_spin(&cpuset_lock);
167 LIST_REMOVE(set, cs_siblings);
168 id = set->cs_id;
169 if (id != CPUSET_INVALID)
170 LIST_REMOVE(set, cs_link);
171 mtx_unlock_spin(&cpuset_lock);
172 cpuset_rel(set->cs_parent);
173 uma_zfree(cpuset_zone, set);
174 if (id != CPUSET_INVALID)
175 free_unr(cpuset_unr, id);
176 }
177
178 /*
179 * Deferred release must be used when in a context that is not safe to
180 * allocate/free. This places any unreferenced sets on the list 'head'.
181 */
182 static void
183 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
184 {
185
186 if (refcount_release(&set->cs_ref) == 0)
187 return;
188 mtx_lock_spin(&cpuset_lock);
189 LIST_REMOVE(set, cs_siblings);
190 if (set->cs_id != CPUSET_INVALID)
191 LIST_REMOVE(set, cs_link);
192 LIST_INSERT_HEAD(head, set, cs_link);
193 mtx_unlock_spin(&cpuset_lock);
194 }
195
196 /*
197 * Complete a deferred release. Removes the set from the list provided to
198 * cpuset_rel_defer.
199 */
200 static void
201 cpuset_rel_complete(struct cpuset *set)
202 {
203 LIST_REMOVE(set, cs_link);
204 cpuset_rel(set->cs_parent);
205 uma_zfree(cpuset_zone, set);
206 }
207
208 /*
209 * Find a set based on an id. Returns it with a ref.
210 */
211 static struct cpuset *
212 cpuset_lookup(cpusetid_t setid, struct thread *td)
213 {
214 struct cpuset *set;
215
216 if (setid == CPUSET_INVALID)
217 return (NULL);
218 mtx_lock_spin(&cpuset_lock);
219 LIST_FOREACH(set, &cpuset_ids, cs_link)
220 if (set->cs_id == setid)
221 break;
222 if (set)
223 cpuset_ref(set);
224 mtx_unlock_spin(&cpuset_lock);
225
226 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
227 if (set != NULL && jailed(td->td_ucred)) {
228 struct cpuset *rset, *jset;
229 struct prison *pr;
230
231 rset = cpuset_refroot(set);
232
233 pr = td->td_ucred->cr_prison;
234 mtx_lock(&pr->pr_mtx);
235 cpuset_ref(pr->pr_cpuset);
236 jset = pr->pr_cpuset;
237 mtx_unlock(&pr->pr_mtx);
238
239 if (jset->cs_id != rset->cs_id) {
240 cpuset_rel(set);
241 set = NULL;
242 }
243 cpuset_rel(jset);
244 cpuset_rel(rset);
245 }
246
247 return (set);
248 }
249
250 /*
251 * Create a set in the space provided in 'set' with the provided parameters.
252 * The set is returned with a single ref. May return EDEADLK if the set
253 * will have no valid cpu based on restrictions from the parent.
254 */
255 static int
256 _cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask,
257 cpusetid_t id)
258 {
259
260 if (!CPU_OVERLAP(&parent->cs_mask, mask))
261 return (EDEADLK);
262 CPU_COPY(mask, &set->cs_mask);
263 LIST_INIT(&set->cs_children);
264 refcount_init(&set->cs_ref, 1);
265 set->cs_flags = 0;
266 mtx_lock_spin(&cpuset_lock);
267 CPU_AND(mask, &parent->cs_mask);
268 set->cs_id = id;
269 set->cs_parent = cpuset_ref(parent);
270 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
271 if (set->cs_id != CPUSET_INVALID)
272 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
273 mtx_unlock_spin(&cpuset_lock);
274
275 return (0);
276 }
277
278 /*
279 * Create a new non-anonymous set with the requested parent and mask. May
280 * return failures if the mask is invalid or a new number can not be
281 * allocated.
282 */
283 static int
284 cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask)
285 {
286 struct cpuset *set;
287 cpusetid_t id;
288 int error;
289
290 id = alloc_unr(cpuset_unr);
291 if (id == -1)
292 return (ENFILE);
293 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
294 error = _cpuset_create(set, parent, mask, id);
295 if (error == 0)
296 return (0);
297 free_unr(cpuset_unr, id);
298 uma_zfree(cpuset_zone, set);
299
300 return (error);
301 }
302
303 /*
304 * Recursively check for errors that would occur from applying mask to
305 * the tree of sets starting at 'set'. Checks for sets that would become
306 * empty as well as RDONLY flags.
307 */
308 static int
309 cpuset_testupdate(struct cpuset *set, cpuset_t *mask)
310 {
311 struct cpuset *nset;
312 cpuset_t newmask;
313 int error;
314
315 mtx_assert(&cpuset_lock, MA_OWNED);
316 if (set->cs_flags & CPU_SET_RDONLY)
317 return (EPERM);
318 if (!CPU_OVERLAP(&set->cs_mask, mask))
319 return (EDEADLK);
320 CPU_COPY(&set->cs_mask, &newmask);
321 CPU_AND(&newmask, mask);
322 error = 0;
323 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
324 if ((error = cpuset_testupdate(nset, &newmask)) != 0)
325 break;
326 return (error);
327 }
328
329 /*
330 * Applies the mask 'mask' without checking for empty sets or permissions.
331 */
332 static void
333 cpuset_update(struct cpuset *set, cpuset_t *mask)
334 {
335 struct cpuset *nset;
336
337 mtx_assert(&cpuset_lock, MA_OWNED);
338 CPU_AND(&set->cs_mask, mask);
339 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
340 cpuset_update(nset, &set->cs_mask);
341
342 return;
343 }
344
345 /*
346 * Modify the set 'set' to use a copy of the mask provided. Apply this new
347 * mask to restrict all children in the tree. Checks for validity before
348 * applying the changes.
349 */
350 static int
351 cpuset_modify(struct cpuset *set, cpuset_t *mask)
352 {
353 struct cpuset *root;
354 int error;
355
356 error = priv_check(curthread, PRIV_SCHED_CPUSET);
357 if (error)
358 return (error);
359 /*
360 * In case we are called from within the jail
361 * we do not allow modifying the dedicated root
362 * cpuset of the jail but may still allow to
363 * change child sets.
364 */
365 if (jailed(curthread->td_ucred) &&
366 set->cs_flags & CPU_SET_ROOT)
367 return (EPERM);
368 /*
369 * Verify that we have access to this set of
370 * cpus.
371 */
372 root = set->cs_parent;
373 if (root && !CPU_SUBSET(&root->cs_mask, mask))
374 return (EINVAL);
375 mtx_lock_spin(&cpuset_lock);
376 error = cpuset_testupdate(set, mask);
377 if (error)
378 goto out;
379 cpuset_update(set, mask);
380 CPU_COPY(mask, &set->cs_mask);
381 out:
382 mtx_unlock_spin(&cpuset_lock);
383
384 return (error);
385 }
386
387 /*
388 * Resolve the 'which' parameter of several cpuset apis.
389 *
390 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
391 * checks for permission via p_cansched().
392 *
393 * For WHICH_SET returns a valid set with a new reference.
394 *
395 * -1 may be supplied for any argument to mean the current proc/thread or
396 * the base set of the current thread. May fail with ESRCH/EPERM.
397 */
398 static int
399 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
400 struct cpuset **setp)
401 {
402 struct cpuset *set;
403 struct thread *td;
404 struct proc *p;
405 int error;
406
407 *pp = p = NULL;
408 *tdp = td = NULL;
409 *setp = set = NULL;
410 switch (which) {
411 case CPU_WHICH_PID:
412 if (id == -1) {
413 PROC_LOCK(curproc);
414 p = curproc;
415 break;
416 }
417 if ((p = pfind(id)) == NULL)
418 return (ESRCH);
419 break;
420 case CPU_WHICH_TID:
421 if (id == -1) {
422 PROC_LOCK(curproc);
423 p = curproc;
424 td = curthread;
425 break;
426 }
427 sx_slock(&allproc_lock);
428 FOREACH_PROC_IN_SYSTEM(p) {
429 PROC_LOCK(p);
430 PROC_SLOCK(p);
431 FOREACH_THREAD_IN_PROC(p, td)
432 if (td->td_tid == id)
433 break;
434 PROC_SUNLOCK(p);
435 if (td != NULL)
436 break;
437 PROC_UNLOCK(p);
438 }
439 sx_sunlock(&allproc_lock);
440 if (td == NULL)
441 return (ESRCH);
442 break;
443 case CPU_WHICH_CPUSET:
444 if (id == -1) {
445 thread_lock(curthread);
446 set = cpuset_refbase(curthread->td_cpuset);
447 thread_unlock(curthread);
448 } else
449 set = cpuset_lookup(id, curthread);
450 if (set) {
451 *setp = set;
452 return (0);
453 }
454 return (ESRCH);
455 case CPU_WHICH_JAIL:
456 {
457 /* Find `set' for prison with given id. */
458 struct prison *pr;
459
460 sx_slock(&allprison_lock);
461 pr = prison_find(id);
462 sx_sunlock(&allprison_lock);
463 if (pr == NULL)
464 return (ESRCH);
465 if (jailed(curthread->td_ucred)) {
466 if (curthread->td_ucred->cr_prison == pr) {
467 cpuset_ref(pr->pr_cpuset);
468 set = pr->pr_cpuset;
469 }
470 } else {
471 cpuset_ref(pr->pr_cpuset);
472 set = pr->pr_cpuset;
473 }
474 mtx_unlock(&pr->pr_mtx);
475 if (set) {
476 *setp = set;
477 return (0);
478 }
479 return (ESRCH);
480 }
481 case CPU_WHICH_IRQ:
482 return (0);
483 default:
484 return (EINVAL);
485 }
486 error = p_cansched(curthread, p);
487 if (error) {
488 PROC_UNLOCK(p);
489 return (error);
490 }
491 if (td == NULL)
492 td = FIRST_THREAD_IN_PROC(p);
493 *pp = p;
494 *tdp = td;
495 return (0);
496 }
497
498 /*
499 * Create an anonymous set with the provided mask in the space provided by
500 * 'fset'. If the passed in set is anonymous we use its parent otherwise
501 * the new set is a child of 'set'.
502 */
503 static int
504 cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask)
505 {
506 struct cpuset *parent;
507
508 if (set->cs_id == CPUSET_INVALID)
509 parent = set->cs_parent;
510 else
511 parent = set;
512 if (!CPU_SUBSET(&parent->cs_mask, mask))
513 return (EDEADLK);
514 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
515 }
516
517 /*
518 * Handle two cases for replacing the base set or mask of an entire process.
519 *
520 * 1) Set is non-null and mask is null. This reparents all anonymous sets
521 * to the provided set and replaces all non-anonymous td_cpusets with the
522 * provided set.
523 * 2) Mask is non-null and set is null. This replaces or creates anonymous
524 * sets for every thread with the existing base as a parent.
525 *
526 * This is overly complicated because we can't allocate while holding a
527 * spinlock and spinlocks must be held while changing and examining thread
528 * state.
529 */
530 static int
531 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
532 {
533 struct setlist freelist;
534 struct setlist droplist;
535 struct cpuset *tdset;
536 struct cpuset *nset;
537 struct thread *td;
538 struct proc *p;
539 int threads;
540 int nfree;
541 int error;
542 /*
543 * The algorithm requires two passes due to locking considerations.
544 *
545 * 1) Lookup the process and acquire the locks in the required order.
546 * 2) If enough cpusets have not been allocated release the locks and
547 * allocate them. Loop.
548 */
549 LIST_INIT(&freelist);
550 LIST_INIT(&droplist);
551 nfree = 0;
552 for (;;) {
553 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
554 if (error)
555 goto out;
556 PROC_SLOCK(p);
557 if (nfree >= p->p_numthreads)
558 break;
559 threads = p->p_numthreads;
560 PROC_SUNLOCK(p);
561 PROC_UNLOCK(p);
562 for (; nfree < threads; nfree++) {
563 nset = uma_zalloc(cpuset_zone, M_WAITOK);
564 LIST_INSERT_HEAD(&freelist, nset, cs_link);
565 }
566 }
567 PROC_LOCK_ASSERT(p, MA_OWNED);
568 PROC_SLOCK_ASSERT(p, MA_OWNED);
569 /*
570 * Now that the appropriate locks are held and we have enough cpusets,
571 * make sure the operation will succeed before applying changes. The
572 * proc lock prevents td_cpuset from changing between calls.
573 */
574 error = 0;
575 FOREACH_THREAD_IN_PROC(p, td) {
576 thread_lock(td);
577 tdset = td->td_cpuset;
578 /*
579 * Verify that a new mask doesn't specify cpus outside of
580 * the set the thread is a member of.
581 */
582 if (mask) {
583 if (tdset->cs_id == CPUSET_INVALID)
584 tdset = tdset->cs_parent;
585 if (!CPU_SUBSET(&tdset->cs_mask, mask))
586 error = EDEADLK;
587 /*
588 * Verify that a new set won't leave an existing thread
589 * mask without a cpu to run on. It can, however, restrict
590 * the set.
591 */
592 } else if (tdset->cs_id == CPUSET_INVALID) {
593 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
594 error = EDEADLK;
595 }
596 thread_unlock(td);
597 if (error)
598 goto unlock_out;
599 }
600 /*
601 * Replace each thread's cpuset while using deferred release. We
602 * must do this because the PROC_SLOCK has to be held while traversing
603 * the thread list and this limits the type of operations allowed.
604 */
605 FOREACH_THREAD_IN_PROC(p, td) {
606 thread_lock(td);
607 /*
608 * If we presently have an anonymous set or are applying a
609 * mask we must create an anonymous shadow set. That is
610 * either parented to our existing base or the supplied set.
611 *
612 * If we have a base set with no anonymous shadow we simply
613 * replace it outright.
614 */
615 tdset = td->td_cpuset;
616 if (tdset->cs_id == CPUSET_INVALID || mask) {
617 nset = LIST_FIRST(&freelist);
618 LIST_REMOVE(nset, cs_link);
619 if (mask)
620 error = cpuset_shadow(tdset, nset, mask);
621 else
622 error = _cpuset_create(nset, set,
623 &tdset->cs_mask, CPUSET_INVALID);
624 if (error) {
625 LIST_INSERT_HEAD(&freelist, nset, cs_link);
626 thread_unlock(td);
627 break;
628 }
629 } else
630 nset = cpuset_ref(set);
631 cpuset_rel_defer(&droplist, tdset);
632 td->td_cpuset = nset;
633 sched_affinity(td);
634 thread_unlock(td);
635 }
636 unlock_out:
637 PROC_SUNLOCK(p);
638 PROC_UNLOCK(p);
639 out:
640 while ((nset = LIST_FIRST(&droplist)) != NULL)
641 cpuset_rel_complete(nset);
642 while ((nset = LIST_FIRST(&freelist)) != NULL) {
643 LIST_REMOVE(nset, cs_link);
644 uma_zfree(cpuset_zone, nset);
645 }
646 return (error);
647 }
648
649 /*
650 * Apply an anonymous mask to a single thread.
651 */
652 int
653 cpuset_setthread(lwpid_t id, cpuset_t *mask)
654 {
655 struct cpuset *nset;
656 struct cpuset *set;
657 struct thread *td;
658 struct proc *p;
659 int error;
660
661 nset = uma_zalloc(cpuset_zone, M_WAITOK);
662 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
663 if (error)
664 goto out;
665 set = NULL;
666 thread_lock(td);
667 error = cpuset_shadow(td->td_cpuset, nset, mask);
668 if (error == 0) {
669 set = td->td_cpuset;
670 td->td_cpuset = nset;
671 sched_affinity(td);
672 nset = NULL;
673 }
674 thread_unlock(td);
675 PROC_UNLOCK(p);
676 if (set)
677 cpuset_rel(set);
678 out:
679 if (nset)
680 uma_zfree(cpuset_zone, nset);
681 return (error);
682 }
683
684 /*
685 * Creates the cpuset for thread0. We make two sets:
686 *
687 * 0 - The root set which should represent all valid processors in the
688 * system. It is initially created with a mask of all processors
689 * because we don't know what processors are valid until cpuset_init()
690 * runs. This set is immutable.
691 * 1 - The default set which all processes are a member of until changed.
692 * This allows an administrator to move all threads off of given cpus to
693 * dedicate them to high priority tasks or save power etc.
694 */
695 struct cpuset *
696 cpuset_thread0(void)
697 {
698 struct cpuset *set;
699 int error;
700
701 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
702 NULL, NULL, UMA_ALIGN_PTR, 0);
703 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
704 /*
705 * Create the root system set for the whole machine. Doesn't use
706 * cpuset_create() due to NULL parent.
707 */
708 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
709 set->cs_mask.__bits[0] = -1;
710 LIST_INIT(&set->cs_children);
711 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
712 set->cs_ref = 1;
713 set->cs_flags = CPU_SET_ROOT;
714 cpuset_zero = set;
715 cpuset_root = &set->cs_mask;
716 /*
717 * Now derive a default, modifiable set from that to give out.
718 */
719 set = uma_zalloc(cpuset_zone, M_WAITOK);
720 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
721 KASSERT(error == 0, ("Error creating default set: %d\n", error));
722 /*
723 * Initialize the unit allocator. 0 and 1 are allocated above.
724 */
725 cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
726
727 return (set);
728 }
729
730 /*
731 * Create a cpuset, which would be cpuset_create() but
732 * mark the new 'set' as root.
733 *
734 * We are not going to reparent the td to it. Use cpuset_setproc_update_set()
735 * for that.
736 *
737 * In case of no error, returns the set in *setp locked with a reference.
738 */
739 int
740 cpuset_create_root(struct thread *td, struct cpuset **setp)
741 {
742 struct cpuset *root;
743 struct cpuset *set;
744 int error;
745
746 KASSERT(td != NULL, ("[%s:%d] invalid td", __func__, __LINE__));
747 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
748
749 thread_lock(td);
750 root = cpuset_refroot(td->td_cpuset);
751 thread_unlock(td);
752
753 error = cpuset_create(setp, td->td_cpuset, &root->cs_mask);
754 cpuset_rel(root);
755 if (error)
756 return (error);
757
758 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data",
759 __func__, __LINE__));
760
761 /* Mark the set as root. */
762 set = *setp;
763 set->cs_flags |= CPU_SET_ROOT;
764
765 return (0);
766 }
767
768 int
769 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
770 {
771 int error;
772
773 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
774 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
775
776 cpuset_ref(set);
777 error = cpuset_setproc(p->p_pid, set, NULL);
778 if (error)
779 return (error);
780 cpuset_rel(set);
781 return (0);
782 }
783
784 /*
785 * This is called once the final set of system cpus is known. Modifies
786 * the root set and all children and mark the root readonly.
787 */
788 static void
789 cpuset_init(void *arg)
790 {
791 cpuset_t mask;
792
793 CPU_ZERO(&mask);
794 #ifdef SMP
795 mask.__bits[0] = all_cpus;
796 #else
797 mask.__bits[0] = 1;
798 #endif
799 if (cpuset_modify(cpuset_zero, &mask))
800 panic("Can't set initial cpuset mask.\n");
801 cpuset_zero->cs_flags |= CPU_SET_RDONLY;
802 }
803 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
804
805 #ifndef _SYS_SYSPROTO_H_
806 struct cpuset_args {
807 cpusetid_t *setid;
808 };
809 #endif
810 int
811 cpuset(struct thread *td, struct cpuset_args *uap)
812 {
813 struct cpuset *root;
814 struct cpuset *set;
815 int error;
816
817 thread_lock(td);
818 root = cpuset_refroot(td->td_cpuset);
819 thread_unlock(td);
820 error = cpuset_create(&set, root, &root->cs_mask);
821 cpuset_rel(root);
822 if (error)
823 return (error);
824 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
825 if (error == 0)
826 error = cpuset_setproc(-1, set, NULL);
827 cpuset_rel(set);
828 return (error);
829 }
830
831 #ifndef _SYS_SYSPROTO_H_
832 struct cpuset_setid_args {
833 cpuwhich_t which;
834 id_t id;
835 cpusetid_t setid;
836 };
837 #endif
838 int
839 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
840 {
841 struct cpuset *set;
842 int error;
843
844 /*
845 * Presently we only support per-process sets.
846 */
847 if (uap->which != CPU_WHICH_PID)
848 return (EINVAL);
849 set = cpuset_lookup(uap->setid, td);
850 if (set == NULL)
851 return (ESRCH);
852 error = cpuset_setproc(uap->id, set, NULL);
853 cpuset_rel(set);
854 return (error);
855 }
856
857 #ifndef _SYS_SYSPROTO_H_
858 struct cpuset_getid_args {
859 cpulevel_t level;
860 cpuwhich_t which;
861 id_t id;
862 cpusetid_t *setid;
863 #endif
864 int
865 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
866 {
867 struct cpuset *nset;
868 struct cpuset *set;
869 struct thread *ttd;
870 struct proc *p;
871 cpusetid_t id;
872 int error;
873
874 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET)
875 return (EINVAL);
876 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
877 if (error)
878 return (error);
879 switch (uap->which) {
880 case CPU_WHICH_TID:
881 case CPU_WHICH_PID:
882 thread_lock(ttd);
883 set = cpuset_refbase(ttd->td_cpuset);
884 thread_unlock(ttd);
885 PROC_UNLOCK(p);
886 break;
887 case CPU_WHICH_CPUSET:
888 case CPU_WHICH_JAIL:
889 break;
890 case CPU_WHICH_IRQ:
891 return (EINVAL);
892 }
893 switch (uap->level) {
894 case CPU_LEVEL_ROOT:
895 nset = cpuset_refroot(set);
896 cpuset_rel(set);
897 set = nset;
898 break;
899 case CPU_LEVEL_CPUSET:
900 break;
901 case CPU_LEVEL_WHICH:
902 break;
903 }
904 id = set->cs_id;
905 cpuset_rel(set);
906 if (error == 0)
907 error = copyout(&id, uap->setid, sizeof(id));
908
909 return (error);
910 }
911
912 #ifndef _SYS_SYSPROTO_H_
913 struct cpuset_getaffinity_args {
914 cpulevel_t level;
915 cpuwhich_t which;
916 id_t id;
917 size_t cpusetsize;
918 cpuset_t *mask;
919 };
920 #endif
921 int
922 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
923 {
924 struct thread *ttd;
925 struct cpuset *nset;
926 struct cpuset *set;
927 struct proc *p;
928 cpuset_t *mask;
929 int error;
930 size_t size;
931
932 if (uap->cpusetsize < sizeof(cpuset_t) ||
933 uap->cpusetsize > CPU_MAXSIZE / NBBY)
934 return (ERANGE);
935 size = uap->cpusetsize;
936 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
937 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
938 if (error)
939 goto out;
940 switch (uap->level) {
941 case CPU_LEVEL_ROOT:
942 case CPU_LEVEL_CPUSET:
943 switch (uap->which) {
944 case CPU_WHICH_TID:
945 case CPU_WHICH_PID:
946 thread_lock(ttd);
947 set = cpuset_ref(ttd->td_cpuset);
948 thread_unlock(ttd);
949 break;
950 case CPU_WHICH_CPUSET:
951 case CPU_WHICH_JAIL:
952 break;
953 case CPU_WHICH_IRQ:
954 error = EINVAL;
955 goto out;
956 }
957 if (uap->level == CPU_LEVEL_ROOT)
958 nset = cpuset_refroot(set);
959 else
960 nset = cpuset_refbase(set);
961 CPU_COPY(&nset->cs_mask, mask);
962 cpuset_rel(nset);
963 break;
964 case CPU_LEVEL_WHICH:
965 switch (uap->which) {
966 case CPU_WHICH_TID:
967 thread_lock(ttd);
968 CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
969 thread_unlock(ttd);
970 break;
971 case CPU_WHICH_PID:
972 PROC_SLOCK(p);
973 FOREACH_THREAD_IN_PROC(p, ttd) {
974 thread_lock(ttd);
975 CPU_OR(mask, &ttd->td_cpuset->cs_mask);
976 thread_unlock(ttd);
977 }
978 PROC_SUNLOCK(p);
979 break;
980 case CPU_WHICH_CPUSET:
981 case CPU_WHICH_JAIL:
982 CPU_COPY(&set->cs_mask, mask);
983 break;
984 case CPU_WHICH_IRQ:
985 error = intr_getaffinity(uap->id, mask);
986 break;
987 }
988 break;
989 default:
990 error = EINVAL;
991 break;
992 }
993 if (set)
994 cpuset_rel(set);
995 if (p)
996 PROC_UNLOCK(p);
997 if (error == 0)
998 error = copyout(mask, uap->mask, size);
999 out:
1000 free(mask, M_TEMP);
1001 return (error);
1002 }
1003
1004 #ifndef _SYS_SYSPROTO_H_
1005 struct cpuset_setaffinity_args {
1006 cpulevel_t level;
1007 cpuwhich_t which;
1008 id_t id;
1009 size_t cpusetsize;
1010 const cpuset_t *mask;
1011 };
1012 #endif
1013 int
1014 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
1015 {
1016 struct cpuset *nset;
1017 struct cpuset *set;
1018 struct thread *ttd;
1019 struct proc *p;
1020 cpuset_t *mask;
1021 int error;
1022
1023 if (uap->cpusetsize < sizeof(cpuset_t) ||
1024 uap->cpusetsize > CPU_MAXSIZE / NBBY)
1025 return (ERANGE);
1026 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
1027 error = copyin(uap->mask, mask, uap->cpusetsize);
1028 if (error)
1029 goto out;
1030 /*
1031 * Verify that no high bits are set.
1032 */
1033 if (uap->cpusetsize > sizeof(cpuset_t)) {
1034 char *end;
1035 char *cp;
1036
1037 end = cp = (char *)&mask->__bits;
1038 end += uap->cpusetsize;
1039 cp += sizeof(cpuset_t);
1040 while (cp != end)
1041 if (*cp++ != 0) {
1042 error = EINVAL;
1043 goto out;
1044 }
1045
1046 }
1047 switch (uap->level) {
1048 case CPU_LEVEL_ROOT:
1049 case CPU_LEVEL_CPUSET:
1050 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
1051 if (error)
1052 break;
1053 switch (uap->which) {
1054 case CPU_WHICH_TID:
1055 case CPU_WHICH_PID:
1056 thread_lock(ttd);
1057 set = cpuset_ref(ttd->td_cpuset);
1058 thread_unlock(ttd);
1059 PROC_UNLOCK(p);
1060 break;
1061 case CPU_WHICH_CPUSET:
1062 case CPU_WHICH_JAIL:
1063 break;
1064 case CPU_WHICH_IRQ:
1065 error = EINVAL;
1066 goto out;
1067 }
1068 if (uap->level == CPU_LEVEL_ROOT)
1069 nset = cpuset_refroot(set);
1070 else
1071 nset = cpuset_refbase(set);
1072 error = cpuset_modify(nset, mask);
1073 cpuset_rel(nset);
1074 cpuset_rel(set);
1075 break;
1076 case CPU_LEVEL_WHICH:
1077 switch (uap->which) {
1078 case CPU_WHICH_TID:
1079 error = cpuset_setthread(uap->id, mask);
1080 break;
1081 case CPU_WHICH_PID:
1082 error = cpuset_setproc(uap->id, NULL, mask);
1083 break;
1084 case CPU_WHICH_CPUSET:
1085 case CPU_WHICH_JAIL:
1086 error = cpuset_which(uap->which, uap->id, &p,
1087 &ttd, &set);
1088 if (error == 0) {
1089 error = cpuset_modify(set, mask);
1090 cpuset_rel(set);
1091 }
1092 break;
1093 case CPU_WHICH_IRQ:
1094 error = intr_setaffinity(uap->id, mask);
1095 break;
1096 default:
1097 error = EINVAL;
1098 break;
1099 }
1100 break;
1101 default:
1102 error = EINVAL;
1103 break;
1104 }
1105 out:
1106 free(mask, M_TEMP);
1107 return (error);
1108 }
1109
1110 #ifdef DDB
1111 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
1112 {
1113 struct cpuset *set;
1114 int cpu, once;
1115
1116 LIST_FOREACH(set, &cpuset_ids, cs_link) {
1117 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
1118 set, set->cs_id, set->cs_ref, set->cs_flags,
1119 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
1120 db_printf(" mask=");
1121 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
1122 if (CPU_ISSET(cpu, &set->cs_mask)) {
1123 if (once == 0) {
1124 db_printf("%d", cpu);
1125 once = 1;
1126 } else
1127 db_printf(",%d", cpu);
1128 }
1129 }
1130 db_printf("\n");
1131 if (db_pager_quit)
1132 break;
1133 }
1134 }
1135 #endif /* DDB */
Cache object: d4fe73ead209a3fc6a0356cdcdfa77f6
|