1 /*-
2 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
3 * All rights reserved.
4 *
5 * Copyright (c) 2008 Nokia Corporation
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice unmodified, this list of conditions, and the following
13 * disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_ddb.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/sysproto.h>
39 #include <sys/jail.h>
40 #include <sys/kernel.h>
41 #include <sys/lock.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
44 #include <sys/priv.h>
45 #include <sys/proc.h>
46 #include <sys/refcount.h>
47 #include <sys/sched.h>
48 #include <sys/smp.h>
49 #include <sys/syscallsubr.h>
50 #include <sys/cpuset.h>
51 #include <sys/sx.h>
52 #include <sys/queue.h>
53 #include <sys/limits.h>
54 #include <sys/bus.h>
55 #include <sys/interrupt.h>
56
57 #include <vm/uma.h>
58
59 #ifdef DDB
60 #include <ddb/ddb.h>
61 #endif /* DDB */
62
63 /*
64 * cpusets provide a mechanism for creating and manipulating sets of
65 * processors for the purpose of constraining the scheduling of threads to
66 * specific processors.
67 *
68 * Each process belongs to an identified set, by default this is set 1. Each
69 * thread may further restrict the cpus it may run on to a subset of this
70 * named set. This creates an anonymous set which other threads and processes
71 * may not join by number.
72 *
73 * The named set is referred to herein as the 'base' set to avoid ambiguity.
74 * This set is usually a child of a 'root' set while the anonymous set may
75 * simply be referred to as a mask. In the syscall api these are referred to
76 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
77 *
78 * Threads inherit their set from their creator whether it be anonymous or
79 * not. This means that anonymous sets are immutable because they may be
80 * shared. To modify an anonymous set a new set is created with the desired
81 * mask and the same parent as the existing anonymous set. This gives the
82 * illusion of each thread having a private mask.A
83 *
84 * Via the syscall apis a user may ask to retrieve or modify the root, base,
85 * or mask that is discovered via a pid, tid, or setid. Modifying a set
86 * modifies all numbered and anonymous child sets to comply with the new mask.
87 * Modifying a pid or tid's mask applies only to that tid but must still
88 * exist within the assigned parent set.
89 *
90 * A thread may not be assigned to a a group seperate from other threads in
91 * the process. This is to remove ambiguity when the setid is queried with
92 * a pid argument. There is no other technical limitation.
93 *
94 * This somewhat complex arrangement is intended to make it easy for
95 * applications to query available processors and bind their threads to
96 * specific processors while also allowing administrators to dynamically
97 * reprovision by changing sets which apply to groups of processes.
98 *
99 * A simple application should not concern itself with sets at all and
100 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
101 * meaning 'curthread'. It may query availble cpus for that tid with a
102 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
103 */
104 static uma_zone_t cpuset_zone;
105 static struct mtx cpuset_lock;
106 static struct setlist cpuset_ids;
107 static struct unrhdr *cpuset_unr;
108 static struct cpuset *cpuset_zero;
109
110 cpuset_t *cpuset_root;
111
112 /*
113 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
114 */
115 struct cpuset *
116 cpuset_ref(struct cpuset *set)
117 {
118
119 refcount_acquire(&set->cs_ref);
120 return (set);
121 }
122
123 /*
124 * Walks up the tree from 'set' to find the root. Returns the root
125 * referenced.
126 */
127 static struct cpuset *
128 cpuset_refroot(struct cpuset *set)
129 {
130
131 for (; set->cs_parent != NULL; set = set->cs_parent)
132 if (set->cs_flags & CPU_SET_ROOT)
133 break;
134 cpuset_ref(set);
135
136 return (set);
137 }
138
139 /*
140 * Find the first non-anonymous set starting from 'set'. Returns this set
141 * referenced. May return the passed in set with an extra ref if it is
142 * not anonymous.
143 */
144 static struct cpuset *
145 cpuset_refbase(struct cpuset *set)
146 {
147
148 if (set->cs_id == CPUSET_INVALID)
149 set = set->cs_parent;
150 cpuset_ref(set);
151
152 return (set);
153 }
154
155 /*
156 * Release a reference in a context where it is safe to allocte.
157 */
158 void
159 cpuset_rel(struct cpuset *set)
160 {
161 cpusetid_t id;
162
163 if (refcount_release(&set->cs_ref) == 0)
164 return;
165 mtx_lock_spin(&cpuset_lock);
166 LIST_REMOVE(set, cs_siblings);
167 id = set->cs_id;
168 if (id != CPUSET_INVALID)
169 LIST_REMOVE(set, cs_link);
170 mtx_unlock_spin(&cpuset_lock);
171 cpuset_rel(set->cs_parent);
172 uma_zfree(cpuset_zone, set);
173 if (id != CPUSET_INVALID)
174 free_unr(cpuset_unr, id);
175 }
176
177 /*
178 * Deferred release must be used when in a context that is not safe to
179 * allocate/free. This places any unreferenced sets on the list 'head'.
180 */
181 static void
182 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
183 {
184
185 if (refcount_release(&set->cs_ref) == 0)
186 return;
187 mtx_lock_spin(&cpuset_lock);
188 LIST_REMOVE(set, cs_siblings);
189 if (set->cs_id != CPUSET_INVALID)
190 LIST_REMOVE(set, cs_link);
191 LIST_INSERT_HEAD(head, set, cs_link);
192 mtx_unlock_spin(&cpuset_lock);
193 }
194
195 /*
196 * Complete a deferred release. Removes the set from the list provided to
197 * cpuset_rel_defer.
198 */
199 static void
200 cpuset_rel_complete(struct cpuset *set)
201 {
202 LIST_REMOVE(set, cs_link);
203 cpuset_rel(set->cs_parent);
204 uma_zfree(cpuset_zone, set);
205 }
206
207 /*
208 * Find a set based on an id. Returns it with a ref.
209 */
210 static struct cpuset *
211 cpuset_lookup(cpusetid_t setid, struct thread *td)
212 {
213 struct cpuset *set;
214
215 if (setid == CPUSET_INVALID)
216 return (NULL);
217 mtx_lock_spin(&cpuset_lock);
218 LIST_FOREACH(set, &cpuset_ids, cs_link)
219 if (set->cs_id == setid)
220 break;
221 if (set)
222 cpuset_ref(set);
223 mtx_unlock_spin(&cpuset_lock);
224
225 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
226 if (set != NULL && jailed(td->td_ucred)) {
227 struct cpuset *jset, *tset;
228
229 jset = td->td_ucred->cr_prison->pr_cpuset;
230 for (tset = set; tset != NULL; tset = tset->cs_parent)
231 if (tset == jset)
232 break;
233 if (tset == NULL) {
234 cpuset_rel(set);
235 set = NULL;
236 }
237 }
238
239 return (set);
240 }
241
242 /*
243 * Create a set in the space provided in 'set' with the provided parameters.
244 * The set is returned with a single ref. May return EDEADLK if the set
245 * will have no valid cpu based on restrictions from the parent.
246 */
247 static int
248 _cpuset_create(struct cpuset *set, struct cpuset *parent, const cpuset_t *mask,
249 cpusetid_t id)
250 {
251
252 if (!CPU_OVERLAP(&parent->cs_mask, mask))
253 return (EDEADLK);
254 CPU_COPY(mask, &set->cs_mask);
255 LIST_INIT(&set->cs_children);
256 refcount_init(&set->cs_ref, 1);
257 set->cs_flags = 0;
258 mtx_lock_spin(&cpuset_lock);
259 CPU_AND(&set->cs_mask, &parent->cs_mask);
260 set->cs_id = id;
261 set->cs_parent = cpuset_ref(parent);
262 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
263 if (set->cs_id != CPUSET_INVALID)
264 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
265 mtx_unlock_spin(&cpuset_lock);
266
267 return (0);
268 }
269
270 /*
271 * Create a new non-anonymous set with the requested parent and mask. May
272 * return failures if the mask is invalid or a new number can not be
273 * allocated.
274 */
275 static int
276 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
277 {
278 struct cpuset *set;
279 cpusetid_t id;
280 int error;
281
282 id = alloc_unr(cpuset_unr);
283 if (id == -1)
284 return (ENFILE);
285 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK);
286 error = _cpuset_create(set, parent, mask, id);
287 if (error == 0)
288 return (0);
289 free_unr(cpuset_unr, id);
290 uma_zfree(cpuset_zone, set);
291
292 return (error);
293 }
294
295 /*
296 * Recursively check for errors that would occur from applying mask to
297 * the tree of sets starting at 'set'. Checks for sets that would become
298 * empty as well as RDONLY flags.
299 */
300 static int
301 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int check_mask)
302 {
303 struct cpuset *nset;
304 cpuset_t newmask;
305 int error;
306
307 mtx_assert(&cpuset_lock, MA_OWNED);
308 if (set->cs_flags & CPU_SET_RDONLY)
309 return (EPERM);
310 if (check_mask) {
311 if (!CPU_OVERLAP(&set->cs_mask, mask))
312 return (EDEADLK);
313 CPU_COPY(&set->cs_mask, &newmask);
314 CPU_AND(&newmask, mask);
315 } else
316 CPU_COPY(mask, &newmask);
317 error = 0;
318 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
319 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
320 break;
321 return (error);
322 }
323
324 /*
325 * Applies the mask 'mask' without checking for empty sets or permissions.
326 */
327 static void
328 cpuset_update(struct cpuset *set, cpuset_t *mask)
329 {
330 struct cpuset *nset;
331
332 mtx_assert(&cpuset_lock, MA_OWNED);
333 CPU_AND(&set->cs_mask, mask);
334 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
335 cpuset_update(nset, &set->cs_mask);
336
337 return;
338 }
339
340 /*
341 * Modify the set 'set' to use a copy of the mask provided. Apply this new
342 * mask to restrict all children in the tree. Checks for validity before
343 * applying the changes.
344 */
345 static int
346 cpuset_modify(struct cpuset *set, cpuset_t *mask)
347 {
348 struct cpuset *root;
349 int error;
350
351 error = priv_check(curthread, PRIV_SCHED_CPUSET);
352 if (error)
353 return (error);
354 /*
355 * In case we are called from within the jail
356 * we do not allow modifying the dedicated root
357 * cpuset of the jail but may still allow to
358 * change child sets.
359 */
360 if (jailed(curthread->td_ucred) &&
361 set->cs_flags & CPU_SET_ROOT)
362 return (EPERM);
363 /*
364 * Verify that we have access to this set of
365 * cpus.
366 */
367 root = set->cs_parent;
368 if (root && !CPU_SUBSET(&root->cs_mask, mask))
369 return (EINVAL);
370 mtx_lock_spin(&cpuset_lock);
371 error = cpuset_testupdate(set, mask, 0);
372 if (error)
373 goto out;
374 CPU_COPY(mask, &set->cs_mask);
375 cpuset_update(set, mask);
376 out:
377 mtx_unlock_spin(&cpuset_lock);
378
379 return (error);
380 }
381
382 /*
383 * Resolve the 'which' parameter of several cpuset apis.
384 *
385 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
386 * checks for permission via p_cansched().
387 *
388 * For WHICH_SET returns a valid set with a new reference.
389 *
390 * -1 may be supplied for any argument to mean the current proc/thread or
391 * the base set of the current thread. May fail with ESRCH/EPERM.
392 */
393 static int
394 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
395 struct cpuset **setp)
396 {
397 struct cpuset *set;
398 struct thread *td;
399 struct proc *p;
400 int error;
401
402 *pp = p = NULL;
403 *tdp = td = NULL;
404 *setp = set = NULL;
405 switch (which) {
406 case CPU_WHICH_PID:
407 if (id == -1) {
408 PROC_LOCK(curproc);
409 p = curproc;
410 break;
411 }
412 if ((p = pfind(id)) == NULL)
413 return (ESRCH);
414 break;
415 case CPU_WHICH_TID:
416 if (id == -1) {
417 PROC_LOCK(curproc);
418 p = curproc;
419 td = curthread;
420 break;
421 }
422 sx_slock(&allproc_lock);
423 FOREACH_PROC_IN_SYSTEM(p) {
424 PROC_LOCK(p);
425 FOREACH_THREAD_IN_PROC(p, td)
426 if (td->td_tid == id)
427 break;
428 if (td != NULL)
429 break;
430 PROC_UNLOCK(p);
431 }
432 sx_sunlock(&allproc_lock);
433 if (td == NULL)
434 return (ESRCH);
435 break;
436 case CPU_WHICH_CPUSET:
437 if (id == -1) {
438 thread_lock(curthread);
439 set = cpuset_refbase(curthread->td_cpuset);
440 thread_unlock(curthread);
441 } else
442 set = cpuset_lookup(id, curthread);
443 if (set) {
444 *setp = set;
445 return (0);
446 }
447 return (ESRCH);
448 case CPU_WHICH_JAIL:
449 {
450 /* Find `set' for prison with given id. */
451 struct prison *pr;
452
453 sx_slock(&allprison_lock);
454 pr = prison_find_child(curthread->td_ucred->cr_prison, id);
455 sx_sunlock(&allprison_lock);
456 if (pr == NULL)
457 return (ESRCH);
458 cpuset_ref(pr->pr_cpuset);
459 *setp = pr->pr_cpuset;
460 mtx_unlock(&pr->pr_mtx);
461 return (0);
462 }
463 case CPU_WHICH_IRQ:
464 return (0);
465 default:
466 return (EINVAL);
467 }
468 error = p_cansched(curthread, p);
469 if (error) {
470 PROC_UNLOCK(p);
471 return (error);
472 }
473 if (td == NULL)
474 td = FIRST_THREAD_IN_PROC(p);
475 *pp = p;
476 *tdp = td;
477 return (0);
478 }
479
480 /*
481 * Create an anonymous set with the provided mask in the space provided by
482 * 'fset'. If the passed in set is anonymous we use its parent otherwise
483 * the new set is a child of 'set'.
484 */
485 static int
486 cpuset_shadow(struct cpuset *set, struct cpuset *fset, const cpuset_t *mask)
487 {
488 struct cpuset *parent;
489
490 if (set->cs_id == CPUSET_INVALID)
491 parent = set->cs_parent;
492 else
493 parent = set;
494 if (!CPU_SUBSET(&parent->cs_mask, mask))
495 return (EDEADLK);
496 return (_cpuset_create(fset, parent, mask, CPUSET_INVALID));
497 }
498
499 /*
500 * Handle two cases for replacing the base set or mask of an entire process.
501 *
502 * 1) Set is non-null and mask is null. This reparents all anonymous sets
503 * to the provided set and replaces all non-anonymous td_cpusets with the
504 * provided set.
505 * 2) Mask is non-null and set is null. This replaces or creates anonymous
506 * sets for every thread with the existing base as a parent.
507 *
508 * This is overly complicated because we can't allocate while holding a
509 * spinlock and spinlocks must be held while changing and examining thread
510 * state.
511 */
512 static int
513 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask)
514 {
515 struct setlist freelist;
516 struct setlist droplist;
517 struct cpuset *tdset;
518 struct cpuset *nset;
519 struct thread *td;
520 struct proc *p;
521 int threads;
522 int nfree;
523 int error;
524 /*
525 * The algorithm requires two passes due to locking considerations.
526 *
527 * 1) Lookup the process and acquire the locks in the required order.
528 * 2) If enough cpusets have not been allocated release the locks and
529 * allocate them. Loop.
530 */
531 LIST_INIT(&freelist);
532 LIST_INIT(&droplist);
533 nfree = 0;
534 for (;;) {
535 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
536 if (error)
537 goto out;
538 if (nfree >= p->p_numthreads)
539 break;
540 threads = p->p_numthreads;
541 PROC_UNLOCK(p);
542 for (; nfree < threads; nfree++) {
543 nset = uma_zalloc(cpuset_zone, M_WAITOK);
544 LIST_INSERT_HEAD(&freelist, nset, cs_link);
545 }
546 }
547 PROC_LOCK_ASSERT(p, MA_OWNED);
548 /*
549 * Now that the appropriate locks are held and we have enough cpusets,
550 * make sure the operation will succeed before applying changes. The
551 * proc lock prevents td_cpuset from changing between calls.
552 */
553 error = 0;
554 FOREACH_THREAD_IN_PROC(p, td) {
555 thread_lock(td);
556 tdset = td->td_cpuset;
557 /*
558 * Verify that a new mask doesn't specify cpus outside of
559 * the set the thread is a member of.
560 */
561 if (mask) {
562 if (tdset->cs_id == CPUSET_INVALID)
563 tdset = tdset->cs_parent;
564 if (!CPU_SUBSET(&tdset->cs_mask, mask))
565 error = EDEADLK;
566 /*
567 * Verify that a new set won't leave an existing thread
568 * mask without a cpu to run on. It can, however, restrict
569 * the set.
570 */
571 } else if (tdset->cs_id == CPUSET_INVALID) {
572 if (!CPU_OVERLAP(&set->cs_mask, &tdset->cs_mask))
573 error = EDEADLK;
574 }
575 thread_unlock(td);
576 if (error)
577 goto unlock_out;
578 }
579 /*
580 * Replace each thread's cpuset while using deferred release. We
581 * must do this because the thread lock must be held while operating
582 * on the thread and this limits the type of operations allowed.
583 */
584 FOREACH_THREAD_IN_PROC(p, td) {
585 thread_lock(td);
586 /*
587 * If we presently have an anonymous set or are applying a
588 * mask we must create an anonymous shadow set. That is
589 * either parented to our existing base or the supplied set.
590 *
591 * If we have a base set with no anonymous shadow we simply
592 * replace it outright.
593 */
594 tdset = td->td_cpuset;
595 if (tdset->cs_id == CPUSET_INVALID || mask) {
596 nset = LIST_FIRST(&freelist);
597 LIST_REMOVE(nset, cs_link);
598 if (mask)
599 error = cpuset_shadow(tdset, nset, mask);
600 else
601 error = _cpuset_create(nset, set,
602 &tdset->cs_mask, CPUSET_INVALID);
603 if (error) {
604 LIST_INSERT_HEAD(&freelist, nset, cs_link);
605 thread_unlock(td);
606 break;
607 }
608 } else
609 nset = cpuset_ref(set);
610 cpuset_rel_defer(&droplist, tdset);
611 td->td_cpuset = nset;
612 sched_affinity(td);
613 thread_unlock(td);
614 }
615 unlock_out:
616 PROC_UNLOCK(p);
617 out:
618 while ((nset = LIST_FIRST(&droplist)) != NULL)
619 cpuset_rel_complete(nset);
620 while ((nset = LIST_FIRST(&freelist)) != NULL) {
621 LIST_REMOVE(nset, cs_link);
622 uma_zfree(cpuset_zone, nset);
623 }
624 return (error);
625 }
626
627 /*
628 * Apply an anonymous mask to a single thread.
629 */
630 int
631 cpuset_setthread(lwpid_t id, cpuset_t *mask)
632 {
633 struct cpuset *nset;
634 struct cpuset *set;
635 struct thread *td;
636 struct proc *p;
637 int error;
638
639 nset = uma_zalloc(cpuset_zone, M_WAITOK);
640 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
641 if (error)
642 goto out;
643 set = NULL;
644 thread_lock(td);
645 error = cpuset_shadow(td->td_cpuset, nset, mask);
646 if (error == 0) {
647 set = td->td_cpuset;
648 td->td_cpuset = nset;
649 sched_affinity(td);
650 nset = NULL;
651 }
652 thread_unlock(td);
653 PROC_UNLOCK(p);
654 if (set)
655 cpuset_rel(set);
656 out:
657 if (nset)
658 uma_zfree(cpuset_zone, nset);
659 return (error);
660 }
661
662 /*
663 * Creates the cpuset for thread0. We make two sets:
664 *
665 * 0 - The root set which should represent all valid processors in the
666 * system. It is initially created with a mask of all processors
667 * because we don't know what processors are valid until cpuset_init()
668 * runs. This set is immutable.
669 * 1 - The default set which all processes are a member of until changed.
670 * This allows an administrator to move all threads off of given cpus to
671 * dedicate them to high priority tasks or save power etc.
672 */
673 struct cpuset *
674 cpuset_thread0(void)
675 {
676 struct cpuset *set;
677 int error;
678
679 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
680 NULL, NULL, UMA_ALIGN_PTR, 0);
681 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
682 /*
683 * Create the root system set for the whole machine. Doesn't use
684 * cpuset_create() due to NULL parent.
685 */
686 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
687 CPU_FILL(&set->cs_mask);
688 LIST_INIT(&set->cs_children);
689 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
690 set->cs_ref = 1;
691 set->cs_flags = CPU_SET_ROOT;
692 cpuset_zero = set;
693 cpuset_root = &set->cs_mask;
694 /*
695 * Now derive a default, modifiable set from that to give out.
696 */
697 set = uma_zalloc(cpuset_zone, M_WAITOK);
698 error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1);
699 KASSERT(error == 0, ("Error creating default set: %d\n", error));
700 /*
701 * Initialize the unit allocator. 0 and 1 are allocated above.
702 */
703 cpuset_unr = new_unrhdr(2, INT_MAX, NULL);
704
705 return (set);
706 }
707
708 /*
709 * Create a cpuset, which would be cpuset_create() but
710 * mark the new 'set' as root.
711 *
712 * We are not going to reparent the td to it. Use cpuset_setproc_update_set()
713 * for that.
714 *
715 * In case of no error, returns the set in *setp locked with a reference.
716 */
717 int
718 cpuset_create_root(struct prison *pr, struct cpuset **setp)
719 {
720 struct cpuset *set;
721 int error;
722
723 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
724 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
725
726 error = cpuset_create(setp, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
727 if (error)
728 return (error);
729
730 KASSERT(*setp != NULL, ("[%s:%d] cpuset_create returned invalid data",
731 __func__, __LINE__));
732
733 /* Mark the set as root. */
734 set = *setp;
735 set->cs_flags |= CPU_SET_ROOT;
736
737 return (0);
738 }
739
740 int
741 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
742 {
743 int error;
744
745 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
746 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
747
748 cpuset_ref(set);
749 error = cpuset_setproc(p->p_pid, set, NULL);
750 if (error)
751 return (error);
752 cpuset_rel(set);
753 return (0);
754 }
755
756 /*
757 * This is called once the final set of system cpus is known. Modifies
758 * the root set and all children and mark the root readonly.
759 */
760 static void
761 cpuset_init(void *arg)
762 {
763 cpuset_t mask;
764
765 CPU_ZERO(&mask);
766 #ifdef SMP
767 mask.__bits[0] = all_cpus;
768 #else
769 mask.__bits[0] = 1;
770 #endif
771 if (cpuset_modify(cpuset_zero, &mask))
772 panic("Can't set initial cpuset mask.\n");
773 cpuset_zero->cs_flags |= CPU_SET_RDONLY;
774 }
775 SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL);
776
777 #ifndef _SYS_SYSPROTO_H_
778 struct cpuset_args {
779 cpusetid_t *setid;
780 };
781 #endif
782 int
783 cpuset(struct thread *td, struct cpuset_args *uap)
784 {
785 struct cpuset *root;
786 struct cpuset *set;
787 int error;
788
789 thread_lock(td);
790 root = cpuset_refroot(td->td_cpuset);
791 thread_unlock(td);
792 error = cpuset_create(&set, root, &root->cs_mask);
793 cpuset_rel(root);
794 if (error)
795 return (error);
796 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
797 if (error == 0)
798 error = cpuset_setproc(-1, set, NULL);
799 cpuset_rel(set);
800 return (error);
801 }
802
803 #ifndef _SYS_SYSPROTO_H_
804 struct cpuset_setid_args {
805 cpuwhich_t which;
806 id_t id;
807 cpusetid_t setid;
808 };
809 #endif
810 int
811 cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
812 {
813 struct cpuset *set;
814 int error;
815
816 /*
817 * Presently we only support per-process sets.
818 */
819 if (uap->which != CPU_WHICH_PID)
820 return (EINVAL);
821 set = cpuset_lookup(uap->setid, td);
822 if (set == NULL)
823 return (ESRCH);
824 error = cpuset_setproc(uap->id, set, NULL);
825 cpuset_rel(set);
826 return (error);
827 }
828
829 #ifndef _SYS_SYSPROTO_H_
830 struct cpuset_getid_args {
831 cpulevel_t level;
832 cpuwhich_t which;
833 id_t id;
834 cpusetid_t *setid;
835 #endif
836 int
837 cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
838 {
839 struct cpuset *nset;
840 struct cpuset *set;
841 struct thread *ttd;
842 struct proc *p;
843 cpusetid_t id;
844 int error;
845
846 if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET)
847 return (EINVAL);
848 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
849 if (error)
850 return (error);
851 switch (uap->which) {
852 case CPU_WHICH_TID:
853 case CPU_WHICH_PID:
854 thread_lock(ttd);
855 set = cpuset_refbase(ttd->td_cpuset);
856 thread_unlock(ttd);
857 PROC_UNLOCK(p);
858 break;
859 case CPU_WHICH_CPUSET:
860 case CPU_WHICH_JAIL:
861 break;
862 case CPU_WHICH_IRQ:
863 return (EINVAL);
864 }
865 switch (uap->level) {
866 case CPU_LEVEL_ROOT:
867 nset = cpuset_refroot(set);
868 cpuset_rel(set);
869 set = nset;
870 break;
871 case CPU_LEVEL_CPUSET:
872 break;
873 case CPU_LEVEL_WHICH:
874 break;
875 }
876 id = set->cs_id;
877 cpuset_rel(set);
878 if (error == 0)
879 error = copyout(&id, uap->setid, sizeof(id));
880
881 return (error);
882 }
883
884 #ifndef _SYS_SYSPROTO_H_
885 struct cpuset_getaffinity_args {
886 cpulevel_t level;
887 cpuwhich_t which;
888 id_t id;
889 size_t cpusetsize;
890 cpuset_t *mask;
891 };
892 #endif
893 int
894 cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
895 {
896 struct thread *ttd;
897 struct cpuset *nset;
898 struct cpuset *set;
899 struct proc *p;
900 cpuset_t *mask;
901 int error;
902 size_t size;
903
904 if (uap->cpusetsize < sizeof(cpuset_t) ||
905 uap->cpusetsize > CPU_MAXSIZE / NBBY)
906 return (ERANGE);
907 size = uap->cpusetsize;
908 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
909 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
910 if (error)
911 goto out;
912 switch (uap->level) {
913 case CPU_LEVEL_ROOT:
914 case CPU_LEVEL_CPUSET:
915 switch (uap->which) {
916 case CPU_WHICH_TID:
917 case CPU_WHICH_PID:
918 thread_lock(ttd);
919 set = cpuset_ref(ttd->td_cpuset);
920 thread_unlock(ttd);
921 break;
922 case CPU_WHICH_CPUSET:
923 case CPU_WHICH_JAIL:
924 break;
925 case CPU_WHICH_IRQ:
926 error = EINVAL;
927 goto out;
928 }
929 if (uap->level == CPU_LEVEL_ROOT)
930 nset = cpuset_refroot(set);
931 else
932 nset = cpuset_refbase(set);
933 CPU_COPY(&nset->cs_mask, mask);
934 cpuset_rel(nset);
935 break;
936 case CPU_LEVEL_WHICH:
937 switch (uap->which) {
938 case CPU_WHICH_TID:
939 thread_lock(ttd);
940 CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
941 thread_unlock(ttd);
942 break;
943 case CPU_WHICH_PID:
944 FOREACH_THREAD_IN_PROC(p, ttd) {
945 thread_lock(ttd);
946 CPU_OR(mask, &ttd->td_cpuset->cs_mask);
947 thread_unlock(ttd);
948 }
949 break;
950 case CPU_WHICH_CPUSET:
951 case CPU_WHICH_JAIL:
952 CPU_COPY(&set->cs_mask, mask);
953 break;
954 case CPU_WHICH_IRQ:
955 error = intr_getaffinity(uap->id, mask);
956 break;
957 }
958 break;
959 default:
960 error = EINVAL;
961 break;
962 }
963 if (set)
964 cpuset_rel(set);
965 if (p)
966 PROC_UNLOCK(p);
967 if (error == 0)
968 error = copyout(mask, uap->mask, size);
969 out:
970 free(mask, M_TEMP);
971 return (error);
972 }
973
974 #ifndef _SYS_SYSPROTO_H_
975 struct cpuset_setaffinity_args {
976 cpulevel_t level;
977 cpuwhich_t which;
978 id_t id;
979 size_t cpusetsize;
980 const cpuset_t *mask;
981 };
982 #endif
983 int
984 cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
985 {
986 struct cpuset *nset;
987 struct cpuset *set;
988 struct thread *ttd;
989 struct proc *p;
990 cpuset_t *mask;
991 int error;
992
993 if (uap->cpusetsize < sizeof(cpuset_t) ||
994 uap->cpusetsize > CPU_MAXSIZE / NBBY)
995 return (ERANGE);
996 mask = malloc(uap->cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
997 error = copyin(uap->mask, mask, uap->cpusetsize);
998 if (error)
999 goto out;
1000 /*
1001 * Verify that no high bits are set.
1002 */
1003 if (uap->cpusetsize > sizeof(cpuset_t)) {
1004 char *end;
1005 char *cp;
1006
1007 end = cp = (char *)&mask->__bits;
1008 end += uap->cpusetsize;
1009 cp += sizeof(cpuset_t);
1010 while (cp != end)
1011 if (*cp++ != 0) {
1012 error = EINVAL;
1013 goto out;
1014 }
1015
1016 }
1017 switch (uap->level) {
1018 case CPU_LEVEL_ROOT:
1019 case CPU_LEVEL_CPUSET:
1020 error = cpuset_which(uap->which, uap->id, &p, &ttd, &set);
1021 if (error)
1022 break;
1023 switch (uap->which) {
1024 case CPU_WHICH_TID:
1025 case CPU_WHICH_PID:
1026 thread_lock(ttd);
1027 set = cpuset_ref(ttd->td_cpuset);
1028 thread_unlock(ttd);
1029 PROC_UNLOCK(p);
1030 break;
1031 case CPU_WHICH_CPUSET:
1032 case CPU_WHICH_JAIL:
1033 break;
1034 case CPU_WHICH_IRQ:
1035 error = EINVAL;
1036 goto out;
1037 }
1038 if (uap->level == CPU_LEVEL_ROOT)
1039 nset = cpuset_refroot(set);
1040 else
1041 nset = cpuset_refbase(set);
1042 error = cpuset_modify(nset, mask);
1043 cpuset_rel(nset);
1044 cpuset_rel(set);
1045 break;
1046 case CPU_LEVEL_WHICH:
1047 switch (uap->which) {
1048 case CPU_WHICH_TID:
1049 error = cpuset_setthread(uap->id, mask);
1050 break;
1051 case CPU_WHICH_PID:
1052 error = cpuset_setproc(uap->id, NULL, mask);
1053 break;
1054 case CPU_WHICH_CPUSET:
1055 case CPU_WHICH_JAIL:
1056 error = cpuset_which(uap->which, uap->id, &p,
1057 &ttd, &set);
1058 if (error == 0) {
1059 error = cpuset_modify(set, mask);
1060 cpuset_rel(set);
1061 }
1062 break;
1063 case CPU_WHICH_IRQ:
1064 error = intr_setaffinity(uap->id, mask);
1065 break;
1066 default:
1067 error = EINVAL;
1068 break;
1069 }
1070 break;
1071 default:
1072 error = EINVAL;
1073 break;
1074 }
1075 out:
1076 free(mask, M_TEMP);
1077 return (error);
1078 }
1079
1080 #ifdef DDB
1081 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
1082 {
1083 struct cpuset *set;
1084 int cpu, once;
1085
1086 LIST_FOREACH(set, &cpuset_ids, cs_link) {
1087 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
1088 set, set->cs_id, set->cs_ref, set->cs_flags,
1089 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
1090 db_printf(" mask=");
1091 for (once = 0, cpu = 0; cpu < CPU_SETSIZE; cpu++) {
1092 if (CPU_ISSET(cpu, &set->cs_mask)) {
1093 if (once == 0) {
1094 db_printf("%d", cpu);
1095 once = 1;
1096 } else
1097 db_printf(",%d", cpu);
1098 }
1099 }
1100 db_printf("\n");
1101 if (db_pager_quit)
1102 break;
1103 }
1104 }
1105 #endif /* DDB */
Cache object: b0eb2316b45b9398549d737e13db87a5
|