1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
5 * All rights reserved.
6 *
7 * Copyright (c) 2008 Nokia Corporation
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice unmodified, this list of conditions, and the following
15 * disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #include "opt_ddb.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 #include <sys/ctype.h>
42 #include <sys/sysproto.h>
43 #include <sys/jail.h>
44 #include <sys/kernel.h>
45 #include <sys/lock.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/priv.h>
49 #include <sys/proc.h>
50 #include <sys/refcount.h>
51 #include <sys/sched.h>
52 #include <sys/smp.h>
53 #include <sys/syscallsubr.h>
54 #include <sys/sysent.h>
55 #include <sys/capsicum.h>
56 #include <sys/cpuset.h>
57 #include <sys/domainset.h>
58 #include <sys/sx.h>
59 #include <sys/queue.h>
60 #include <sys/libkern.h>
61 #include <sys/limits.h>
62 #include <sys/bus.h>
63 #include <sys/interrupt.h>
64 #include <sys/vmmeter.h>
65
66 #include <vm/uma.h>
67 #include <vm/vm.h>
68 #include <vm/vm_object.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/vm_extern.h>
72 #include <vm/vm_param.h>
73 #include <vm/vm_phys.h>
74 #include <vm/vm_pagequeue.h>
75
76 #ifdef DDB
77 #include <ddb/ddb.h>
78 #endif /* DDB */
79
80 /*
81 * cpusets provide a mechanism for creating and manipulating sets of
82 * processors for the purpose of constraining the scheduling of threads to
83 * specific processors.
84 *
85 * Each process belongs to an identified set, by default this is set 1. Each
86 * thread may further restrict the cpus it may run on to a subset of this
87 * named set. This creates an anonymous set which other threads and processes
88 * may not join by number.
89 *
90 * The named set is referred to herein as the 'base' set to avoid ambiguity.
91 * This set is usually a child of a 'root' set while the anonymous set may
92 * simply be referred to as a mask. In the syscall api these are referred to
93 * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
94 *
95 * Threads inherit their set from their creator whether it be anonymous or
96 * not. This means that anonymous sets are immutable because they may be
97 * shared. To modify an anonymous set a new set is created with the desired
98 * mask and the same parent as the existing anonymous set. This gives the
99 * illusion of each thread having a private mask.
100 *
101 * Via the syscall apis a user may ask to retrieve or modify the root, base,
102 * or mask that is discovered via a pid, tid, or setid. Modifying a set
103 * modifies all numbered and anonymous child sets to comply with the new mask.
104 * Modifying a pid or tid's mask applies only to that tid but must still
105 * exist within the assigned parent set.
106 *
107 * A thread may not be assigned to a group separate from other threads in
108 * the process. This is to remove ambiguity when the setid is queried with
109 * a pid argument. There is no other technical limitation.
110 *
111 * This somewhat complex arrangement is intended to make it easy for
112 * applications to query available processors and bind their threads to
113 * specific processors while also allowing administrators to dynamically
114 * reprovision by changing sets which apply to groups of processes.
115 *
116 * A simple application should not concern itself with sets at all and
117 * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
118 * meaning 'curthread'. It may query available cpus for that tid with a
119 * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
120 */
121
122 LIST_HEAD(domainlist, domainset);
123 struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
124 struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
125 struct domainset __read_mostly domainset_roundrobin;
126
127 static uma_zone_t cpuset_zone;
128 static uma_zone_t domainset_zone;
129 static struct mtx cpuset_lock;
130 static struct setlist cpuset_ids;
131 static struct domainlist cpuset_domains;
132 static struct unrhdr *cpuset_unr;
133 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel;
134 static struct domainset domainset0, domainset2;
135
136 /* Return the size of cpuset_t at the kernel level */
137 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
138 SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
139
140 cpuset_t *cpuset_root;
141 cpuset_t cpuset_domain[MAXMEMDOM];
142
143 static int domainset_valid(const struct domainset *, const struct domainset *);
144
145 /*
146 * Find the first non-anonymous set starting from 'set'.
147 */
148 static struct cpuset *
149 cpuset_getbase(struct cpuset *set)
150 {
151
152 if (set->cs_id == CPUSET_INVALID)
153 set = set->cs_parent;
154 return (set);
155 }
156
157 /*
158 * Walks up the tree from 'set' to find the root.
159 */
160 static struct cpuset *
161 cpuset_getroot(struct cpuset *set)
162 {
163
164 while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL)
165 set = set->cs_parent;
166 return (set);
167 }
168
169 /*
170 * Acquire a reference to a cpuset, all pointers must be tracked with refs.
171 */
172 struct cpuset *
173 cpuset_ref(struct cpuset *set)
174 {
175
176 refcount_acquire(&set->cs_ref);
177 return (set);
178 }
179
180 /*
181 * Walks up the tree from 'set' to find the root. Returns the root
182 * referenced.
183 */
184 static struct cpuset *
185 cpuset_refroot(struct cpuset *set)
186 {
187
188 return (cpuset_ref(cpuset_getroot(set)));
189 }
190
191 /*
192 * Find the first non-anonymous set starting from 'set'. Returns this set
193 * referenced. May return the passed in set with an extra ref if it is
194 * not anonymous.
195 */
196 static struct cpuset *
197 cpuset_refbase(struct cpuset *set)
198 {
199
200 return (cpuset_ref(cpuset_getbase(set)));
201 }
202
203 /*
204 * Release a reference in a context where it is safe to allocate.
205 */
206 void
207 cpuset_rel(struct cpuset *set)
208 {
209 cpusetid_t id;
210
211 if (refcount_release_if_not_last(&set->cs_ref))
212 return;
213 mtx_lock_spin(&cpuset_lock);
214 if (!refcount_release(&set->cs_ref)) {
215 mtx_unlock_spin(&cpuset_lock);
216 return;
217 }
218 LIST_REMOVE(set, cs_siblings);
219 id = set->cs_id;
220 if (id != CPUSET_INVALID)
221 LIST_REMOVE(set, cs_link);
222 mtx_unlock_spin(&cpuset_lock);
223 cpuset_rel(set->cs_parent);
224 uma_zfree(cpuset_zone, set);
225 if (id != CPUSET_INVALID)
226 free_unr(cpuset_unr, id);
227 }
228
229 /*
230 * Deferred release must be used when in a context that is not safe to
231 * allocate/free. This places any unreferenced sets on the list 'head'.
232 */
233 static void
234 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
235 {
236
237 if (refcount_release_if_not_last(&set->cs_ref))
238 return;
239 mtx_lock_spin(&cpuset_lock);
240 if (!refcount_release(&set->cs_ref)) {
241 mtx_unlock_spin(&cpuset_lock);
242 return;
243 }
244 LIST_REMOVE(set, cs_siblings);
245 if (set->cs_id != CPUSET_INVALID)
246 LIST_REMOVE(set, cs_link);
247 LIST_INSERT_HEAD(head, set, cs_link);
248 mtx_unlock_spin(&cpuset_lock);
249 }
250
251 /*
252 * Complete a deferred release. Removes the set from the list provided to
253 * cpuset_rel_defer.
254 */
255 static void
256 cpuset_rel_complete(struct cpuset *set)
257 {
258 cpusetid_t id;
259
260 id = set->cs_id;
261 LIST_REMOVE(set, cs_link);
262 cpuset_rel(set->cs_parent);
263 uma_zfree(cpuset_zone, set);
264 if (id != CPUSET_INVALID)
265 free_unr(cpuset_unr, id);
266 }
267
268 /*
269 * Find a set based on an id. Returns it with a ref.
270 */
271 static struct cpuset *
272 cpuset_lookup(cpusetid_t setid, struct thread *td)
273 {
274 struct cpuset *set;
275
276 if (setid == CPUSET_INVALID)
277 return (NULL);
278 mtx_lock_spin(&cpuset_lock);
279 LIST_FOREACH(set, &cpuset_ids, cs_link)
280 if (set->cs_id == setid)
281 break;
282 if (set)
283 cpuset_ref(set);
284 mtx_unlock_spin(&cpuset_lock);
285
286 KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
287 if (set != NULL && jailed(td->td_ucred)) {
288 struct cpuset *jset, *tset;
289
290 jset = td->td_ucred->cr_prison->pr_cpuset;
291 for (tset = set; tset != NULL; tset = tset->cs_parent)
292 if (tset == jset)
293 break;
294 if (tset == NULL) {
295 cpuset_rel(set);
296 set = NULL;
297 }
298 }
299
300 return (set);
301 }
302
303 /*
304 * Initialize a set in the space provided in 'set' with the provided parameters.
305 * The set is returned with a single ref. May return EDEADLK if the set
306 * will have no valid cpu based on restrictions from the parent.
307 */
308 static int
309 cpuset_init(struct cpuset *set, struct cpuset *parent,
310 const cpuset_t *mask, struct domainset *domain, cpusetid_t id)
311 {
312
313 if (domain == NULL)
314 domain = parent->cs_domain;
315 if (mask == NULL)
316 mask = &parent->cs_mask;
317 if (!CPU_OVERLAP(&parent->cs_mask, mask))
318 return (EDEADLK);
319 /* The domain must be prepared ahead of time. */
320 if (!domainset_valid(parent->cs_domain, domain))
321 return (EDEADLK);
322 CPU_COPY(mask, &set->cs_mask);
323 LIST_INIT(&set->cs_children);
324 refcount_init(&set->cs_ref, 1);
325 set->cs_flags = 0;
326 mtx_lock_spin(&cpuset_lock);
327 set->cs_domain = domain;
328 CPU_AND(&set->cs_mask, &parent->cs_mask);
329 set->cs_id = id;
330 set->cs_parent = cpuset_ref(parent);
331 LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
332 if (set->cs_id != CPUSET_INVALID)
333 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
334 mtx_unlock_spin(&cpuset_lock);
335
336 return (0);
337 }
338
339 /*
340 * Create a new non-anonymous set with the requested parent and mask. May
341 * return failures if the mask is invalid or a new number can not be
342 * allocated.
343 *
344 * If *setp is not NULL, then it will be used as-is. The caller must take
345 * into account that *setp will be inserted at the head of cpuset_ids and
346 * plan any potentially conflicting cs_link usage accordingly.
347 */
348 static int
349 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
350 {
351 struct cpuset *set;
352 cpusetid_t id;
353 int error;
354 bool dofree;
355
356 id = alloc_unr(cpuset_unr);
357 if (id == -1)
358 return (ENFILE);
359 dofree = (*setp == NULL);
360 if (*setp != NULL)
361 set = *setp;
362 else
363 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
364 error = cpuset_init(set, parent, mask, NULL, id);
365 if (error == 0)
366 return (0);
367 free_unr(cpuset_unr, id);
368 if (dofree)
369 uma_zfree(cpuset_zone, set);
370
371 return (error);
372 }
373
374 static void
375 cpuset_freelist_add(struct setlist *list, int count)
376 {
377 struct cpuset *set;
378 int i;
379
380 for (i = 0; i < count; i++) {
381 set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK);
382 LIST_INSERT_HEAD(list, set, cs_link);
383 }
384 }
385
386 static void
387 cpuset_freelist_init(struct setlist *list, int count)
388 {
389
390 LIST_INIT(list);
391 cpuset_freelist_add(list, count);
392 }
393
394 static void
395 cpuset_freelist_free(struct setlist *list)
396 {
397 struct cpuset *set;
398
399 while ((set = LIST_FIRST(list)) != NULL) {
400 LIST_REMOVE(set, cs_link);
401 uma_zfree(cpuset_zone, set);
402 }
403 }
404
405 static void
406 domainset_freelist_add(struct domainlist *list, int count)
407 {
408 struct domainset *set;
409 int i;
410
411 for (i = 0; i < count; i++) {
412 set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK);
413 LIST_INSERT_HEAD(list, set, ds_link);
414 }
415 }
416
417 static void
418 domainset_freelist_init(struct domainlist *list, int count)
419 {
420
421 LIST_INIT(list);
422 domainset_freelist_add(list, count);
423 }
424
425 static void
426 domainset_freelist_free(struct domainlist *list)
427 {
428 struct domainset *set;
429
430 while ((set = LIST_FIRST(list)) != NULL) {
431 LIST_REMOVE(set, ds_link);
432 uma_zfree(domainset_zone, set);
433 }
434 }
435
436 /* Copy a domainset preserving mask and policy. */
437 static void
438 domainset_copy(const struct domainset *from, struct domainset *to)
439 {
440
441 DOMAINSET_COPY(&from->ds_mask, &to->ds_mask);
442 to->ds_policy = from->ds_policy;
443 to->ds_prefer = from->ds_prefer;
444 }
445
446 /* Return 1 if mask and policy are equal, otherwise 0. */
447 static int
448 domainset_equal(const struct domainset *one, const struct domainset *two)
449 {
450
451 return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 &&
452 one->ds_policy == two->ds_policy &&
453 one->ds_prefer == two->ds_prefer);
454 }
455
456 /* Return 1 if child is a valid subset of parent. */
457 static int
458 domainset_valid(const struct domainset *parent, const struct domainset *child)
459 {
460 if (child->ds_policy != DOMAINSET_POLICY_PREFER)
461 return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask));
462 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
463 }
464
465 static int
466 domainset_restrict(const struct domainset *parent,
467 const struct domainset *child)
468 {
469 if (child->ds_policy != DOMAINSET_POLICY_PREFER)
470 return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask));
471 return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
472 }
473
474 /*
475 * Lookup or create a domainset. The key is provided in ds_mask and
476 * ds_policy. If the domainset does not yet exist the storage in
477 * 'domain' is used to insert. Otherwise this storage is freed to the
478 * domainset_zone and the existing domainset is returned.
479 */
480 static struct domainset *
481 _domainset_create(struct domainset *domain, struct domainlist *freelist)
482 {
483 struct domainset *ndomain;
484 int i, j;
485
486 KASSERT(domain->ds_cnt <= vm_ndomains,
487 ("invalid domain count in domainset %p", domain));
488 KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER ||
489 domain->ds_prefer < vm_ndomains,
490 ("invalid preferred domain in domains %p", domain));
491
492 mtx_lock_spin(&cpuset_lock);
493 LIST_FOREACH(ndomain, &cpuset_domains, ds_link)
494 if (domainset_equal(ndomain, domain))
495 break;
496 /*
497 * If the domain does not yet exist we insert it and initialize
498 * various iteration helpers which are not part of the key.
499 */
500 if (ndomain == NULL) {
501 LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link);
502 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
503 for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
504 if (DOMAINSET_ISSET(i, &domain->ds_mask))
505 domain->ds_order[j++] = i;
506 }
507 mtx_unlock_spin(&cpuset_lock);
508 if (ndomain == NULL)
509 return (domain);
510 if (freelist != NULL)
511 LIST_INSERT_HEAD(freelist, domain, ds_link);
512 else
513 uma_zfree(domainset_zone, domain);
514 return (ndomain);
515
516 }
517
518 /*
519 * Are any of the domains in the mask empty? If so, silently
520 * remove them and update the domainset accordingly. If only empty
521 * domains are present, we must return failure.
522 */
523 static bool
524 domainset_empty_vm(struct domainset *domain)
525 {
526 domainset_t empty;
527 int i, j;
528
529 DOMAINSET_ZERO(&empty);
530 for (i = 0; i < vm_ndomains; i++)
531 if (VM_DOMAIN_EMPTY(i))
532 DOMAINSET_SET(i, &empty);
533 if (DOMAINSET_SUBSET(&empty, &domain->ds_mask))
534 return (true);
535
536 /* Remove empty domains from the set and recompute. */
537 DOMAINSET_NAND(&domain->ds_mask, &empty);
538 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
539 for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
540 if (DOMAINSET_ISSET(i, &domain->ds_mask))
541 domain->ds_order[j++] = i;
542
543 /* Convert a PREFER policy referencing an empty domain to RR. */
544 if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
545 DOMAINSET_ISSET(domain->ds_prefer, &empty)) {
546 domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
547 domain->ds_prefer = -1;
548 }
549
550 return (false);
551 }
552
553 /*
554 * Create or lookup a domainset based on the key held in 'domain'.
555 */
556 struct domainset *
557 domainset_create(const struct domainset *domain)
558 {
559 struct domainset *ndomain;
560
561 /*
562 * Validate the policy. It must specify a useable policy number with
563 * only valid domains. Preferred must include the preferred domain
564 * in the mask.
565 */
566 if (domain->ds_policy <= DOMAINSET_POLICY_INVALID ||
567 domain->ds_policy > DOMAINSET_POLICY_MAX)
568 return (NULL);
569 if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
570 !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask))
571 return (NULL);
572 if (!DOMAINSET_SUBSET(&domainset0.ds_mask, &domain->ds_mask))
573 return (NULL);
574 ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO);
575 domainset_copy(domain, ndomain);
576 return _domainset_create(ndomain, NULL);
577 }
578
579 /*
580 * Update thread domainset pointers.
581 */
582 static void
583 domainset_notify(void)
584 {
585 struct thread *td;
586 struct proc *p;
587
588 sx_slock(&allproc_lock);
589 FOREACH_PROC_IN_SYSTEM(p) {
590 PROC_LOCK(p);
591 if (p->p_state == PRS_NEW) {
592 PROC_UNLOCK(p);
593 continue;
594 }
595 FOREACH_THREAD_IN_PROC(p, td) {
596 thread_lock(td);
597 td->td_domain.dr_policy = td->td_cpuset->cs_domain;
598 thread_unlock(td);
599 }
600 PROC_UNLOCK(p);
601 }
602 sx_sunlock(&allproc_lock);
603 kernel_object->domain.dr_policy = cpuset_kernel->cs_domain;
604 }
605
606 /*
607 * Create a new set that is a subset of a parent.
608 */
609 static struct domainset *
610 domainset_shadow(const struct domainset *pdomain,
611 const struct domainset *domain, struct domainlist *freelist)
612 {
613 struct domainset *ndomain;
614
615 ndomain = LIST_FIRST(freelist);
616 LIST_REMOVE(ndomain, ds_link);
617
618 /*
619 * Initialize the key from the request.
620 */
621 domainset_copy(domain, ndomain);
622
623 /*
624 * Restrict the key by the parent.
625 */
626 DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask);
627
628 return _domainset_create(ndomain, freelist);
629 }
630
631 /*
632 * Recursively check for errors that would occur from applying mask to
633 * the tree of sets starting at 'set'. Checks for sets that would become
634 * empty as well as RDONLY flags.
635 */
636 static int
637 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
638 {
639 struct cpuset *nset;
640 cpuset_t newmask;
641 int error;
642
643 mtx_assert(&cpuset_lock, MA_OWNED);
644 if (set->cs_flags & CPU_SET_RDONLY)
645 return (EPERM);
646 if (augment_mask) {
647 CPU_COPY(&set->cs_mask, &newmask);
648 CPU_AND(&newmask, mask);
649 } else
650 CPU_COPY(mask, &newmask);
651
652 if (CPU_EMPTY(&newmask))
653 return (EDEADLK);
654 error = 0;
655 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
656 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
657 break;
658 return (error);
659 }
660
661 /*
662 * Applies the mask 'mask' without checking for empty sets or permissions.
663 */
664 static void
665 cpuset_update(struct cpuset *set, cpuset_t *mask)
666 {
667 struct cpuset *nset;
668
669 mtx_assert(&cpuset_lock, MA_OWNED);
670 CPU_AND(&set->cs_mask, mask);
671 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
672 cpuset_update(nset, &set->cs_mask);
673
674 return;
675 }
676
677 /*
678 * Modify the set 'set' to use a copy of the mask provided. Apply this new
679 * mask to restrict all children in the tree. Checks for validity before
680 * applying the changes.
681 */
682 static int
683 cpuset_modify(struct cpuset *set, cpuset_t *mask)
684 {
685 struct cpuset *root;
686 int error;
687
688 error = priv_check(curthread, PRIV_SCHED_CPUSET);
689 if (error)
690 return (error);
691 /*
692 * In case we are called from within the jail,
693 * we do not allow modifying the dedicated root
694 * cpuset of the jail but may still allow to
695 * change child sets, including subordinate jails'
696 * roots.
697 */
698 if ((set->cs_flags & CPU_SET_ROOT) != 0 &&
699 jailed(curthread->td_ucred) &&
700 set == curthread->td_ucred->cr_prison->pr_cpuset)
701 return (EPERM);
702 /*
703 * Verify that we have access to this set of
704 * cpus.
705 */
706 if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) {
707 KASSERT(set->cs_parent != NULL,
708 ("jail.cpuset=%d is not a proper child of parent jail's root.",
709 set->cs_id));
710
711 /*
712 * cpuset_getroot() cannot work here due to how top-level jail
713 * roots are constructed. Top-level jails are parented to
714 * thread0's cpuset (i.e. cpuset 1) rather than the system root.
715 */
716 root = set->cs_parent;
717 } else {
718 root = cpuset_getroot(set);
719 }
720 mtx_lock_spin(&cpuset_lock);
721 if (root && !CPU_SUBSET(&root->cs_mask, mask)) {
722 error = EINVAL;
723 goto out;
724 }
725 error = cpuset_testupdate(set, mask, 0);
726 if (error)
727 goto out;
728 CPU_COPY(mask, &set->cs_mask);
729 cpuset_update(set, mask);
730 out:
731 mtx_unlock_spin(&cpuset_lock);
732
733 return (error);
734 }
735
736 /*
737 * Recursively check for errors that would occur from applying mask to
738 * the tree of sets starting at 'set'. Checks for sets that would become
739 * empty as well as RDONLY flags.
740 */
741 static int
742 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset,
743 struct domainset *orig, int *count, int augment_mask __unused)
744 {
745 struct cpuset *nset;
746 struct domainset *domain;
747 struct domainset newset;
748 int error;
749
750 mtx_assert(&cpuset_lock, MA_OWNED);
751 if (set->cs_flags & CPU_SET_RDONLY)
752 return (EPERM);
753 domain = set->cs_domain;
754 domainset_copy(domain, &newset);
755 if (!domainset_equal(domain, orig)) {
756 if (!domainset_restrict(domain, dset))
757 return (EDEADLK);
758 DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask);
759 /* Count the number of domains that are changing. */
760 (*count)++;
761 }
762 error = 0;
763 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
764 if ((error = cpuset_testupdate_domain(nset, &newset, domain,
765 count, 1)) != 0)
766 break;
767 return (error);
768 }
769
770 /*
771 * Applies the mask 'mask' without checking for empty sets or permissions.
772 */
773 static void
774 cpuset_update_domain(struct cpuset *set, struct domainset *domain,
775 struct domainset *orig, struct domainlist *domains)
776 {
777 struct cpuset *nset;
778
779 mtx_assert(&cpuset_lock, MA_OWNED);
780 /*
781 * If this domainset has changed from the parent we must calculate
782 * a new set. Otherwise it simply inherits from the parent. When
783 * we inherit from the parent we get a new mask and policy. If the
784 * set is modified from the parent we keep the policy and only
785 * update the mask.
786 */
787 if (set->cs_domain != orig) {
788 orig = set->cs_domain;
789 set->cs_domain = domainset_shadow(domain, orig, domains);
790 } else
791 set->cs_domain = domain;
792 LIST_FOREACH(nset, &set->cs_children, cs_siblings)
793 cpuset_update_domain(nset, set->cs_domain, orig, domains);
794
795 return;
796 }
797
798 /*
799 * Modify the set 'set' to use a copy the domainset provided. Apply this new
800 * mask to restrict all children in the tree. Checks for validity before
801 * applying the changes.
802 */
803 static int
804 cpuset_modify_domain(struct cpuset *set, struct domainset *domain)
805 {
806 struct domainlist domains;
807 struct domainset temp;
808 struct domainset *dset;
809 struct cpuset *root;
810 int ndomains, needed;
811 int error;
812
813 error = priv_check(curthread, PRIV_SCHED_CPUSET);
814 if (error)
815 return (error);
816 /*
817 * In case we are called from within the jail
818 * we do not allow modifying the dedicated root
819 * cpuset of the jail but may still allow to
820 * change child sets.
821 */
822 if (jailed(curthread->td_ucred) &&
823 set->cs_flags & CPU_SET_ROOT)
824 return (EPERM);
825 domainset_freelist_init(&domains, 0);
826 domain = domainset_create(domain);
827 ndomains = 0;
828
829 mtx_lock_spin(&cpuset_lock);
830 for (;;) {
831 root = cpuset_getroot(set);
832 dset = root->cs_domain;
833 /*
834 * Verify that we have access to this set of domains.
835 */
836 if (!domainset_valid(dset, domain)) {
837 error = EINVAL;
838 goto out;
839 }
840 /*
841 * If applying prefer we keep the current set as the fallback.
842 */
843 if (domain->ds_policy == DOMAINSET_POLICY_PREFER)
844 DOMAINSET_COPY(&set->cs_domain->ds_mask,
845 &domain->ds_mask);
846 /*
847 * Determine whether we can apply this set of domains and
848 * how many new domain structures it will require.
849 */
850 domainset_copy(domain, &temp);
851 needed = 0;
852 error = cpuset_testupdate_domain(set, &temp, set->cs_domain,
853 &needed, 0);
854 if (error)
855 goto out;
856 if (ndomains >= needed)
857 break;
858
859 /* Dropping the lock; we'll need to re-evaluate again. */
860 mtx_unlock_spin(&cpuset_lock);
861 domainset_freelist_add(&domains, needed - ndomains);
862 ndomains = needed;
863 mtx_lock_spin(&cpuset_lock);
864 }
865 dset = set->cs_domain;
866 cpuset_update_domain(set, domain, dset, &domains);
867 out:
868 mtx_unlock_spin(&cpuset_lock);
869 domainset_freelist_free(&domains);
870 if (error == 0)
871 domainset_notify();
872
873 return (error);
874 }
875
876 /*
877 * Resolve the 'which' parameter of several cpuset apis.
878 *
879 * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also
880 * checks for permission via p_cansched().
881 *
882 * For WHICH_SET returns a valid set with a new reference.
883 *
884 * -1 may be supplied for any argument to mean the current proc/thread or
885 * the base set of the current thread. May fail with ESRCH/EPERM.
886 */
887 int
888 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
889 struct cpuset **setp)
890 {
891 struct cpuset *set;
892 struct thread *td;
893 struct proc *p;
894 int error;
895
896 *pp = p = NULL;
897 *tdp = td = NULL;
898 *setp = set = NULL;
899 switch (which) {
900 case CPU_WHICH_PID:
901 if (id == -1) {
902 PROC_LOCK(curproc);
903 p = curproc;
904 break;
905 }
906 if ((p = pfind(id)) == NULL)
907 return (ESRCH);
908 break;
909 case CPU_WHICH_TID:
910 if (id == -1) {
911 PROC_LOCK(curproc);
912 p = curproc;
913 td = curthread;
914 break;
915 }
916 td = tdfind(id, -1);
917 if (td == NULL)
918 return (ESRCH);
919 p = td->td_proc;
920 break;
921 case CPU_WHICH_CPUSET:
922 if (id == -1) {
923 thread_lock(curthread);
924 set = cpuset_refbase(curthread->td_cpuset);
925 thread_unlock(curthread);
926 } else
927 set = cpuset_lookup(id, curthread);
928 if (set) {
929 *setp = set;
930 return (0);
931 }
932 return (ESRCH);
933 case CPU_WHICH_JAIL:
934 {
935 /* Find `set' for prison with given id. */
936 struct prison *pr;
937
938 sx_slock(&allprison_lock);
939 pr = prison_find_child(curthread->td_ucred->cr_prison, id);
940 sx_sunlock(&allprison_lock);
941 if (pr == NULL)
942 return (ESRCH);
943 cpuset_ref(pr->pr_cpuset);
944 *setp = pr->pr_cpuset;
945 mtx_unlock(&pr->pr_mtx);
946 return (0);
947 }
948 case CPU_WHICH_IRQ:
949 case CPU_WHICH_DOMAIN:
950 return (0);
951 default:
952 return (EINVAL);
953 }
954 error = p_cansched(curthread, p);
955 if (error) {
956 PROC_UNLOCK(p);
957 return (error);
958 }
959 if (td == NULL)
960 td = FIRST_THREAD_IN_PROC(p);
961 *pp = p;
962 *tdp = td;
963 return (0);
964 }
965
966 static int
967 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask,
968 const struct domainset *domain)
969 {
970 struct cpuset *parent;
971 struct domainset *dset;
972
973 parent = cpuset_getbase(set);
974 /*
975 * If we are restricting a cpu mask it must be a subset of the
976 * parent or invalid CPUs have been specified.
977 */
978 if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask))
979 return (EINVAL);
980
981 /*
982 * If we are restricting a domain mask it must be a subset of the
983 * parent or invalid domains have been specified.
984 */
985 dset = parent->cs_domain;
986 if (domain != NULL && !domainset_valid(dset, domain))
987 return (EINVAL);
988
989 return (0);
990 }
991
992 /*
993 * Create an anonymous set with the provided mask in the space provided by
994 * 'nset'. If the passed in set is anonymous we use its parent otherwise
995 * the new set is a child of 'set'.
996 */
997 static int
998 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp,
999 const cpuset_t *mask, const struct domainset *domain,
1000 struct setlist *cpusets, struct domainlist *domains)
1001 {
1002 struct cpuset *parent;
1003 struct cpuset *nset;
1004 struct domainset *dset;
1005 struct domainset *d;
1006 int error;
1007
1008 error = cpuset_testshadow(set, mask, domain);
1009 if (error)
1010 return (error);
1011
1012 parent = cpuset_getbase(set);
1013 dset = parent->cs_domain;
1014 if (mask == NULL)
1015 mask = &set->cs_mask;
1016 if (domain != NULL)
1017 d = domainset_shadow(dset, domain, domains);
1018 else
1019 d = set->cs_domain;
1020 nset = LIST_FIRST(cpusets);
1021 error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID);
1022 if (error == 0) {
1023 LIST_REMOVE(nset, cs_link);
1024 *nsetp = nset;
1025 }
1026 return (error);
1027 }
1028
1029 static struct cpuset *
1030 cpuset_update_thread(struct thread *td, struct cpuset *nset)
1031 {
1032 struct cpuset *tdset;
1033
1034 tdset = td->td_cpuset;
1035 td->td_cpuset = nset;
1036 td->td_domain.dr_policy = nset->cs_domain;
1037 sched_affinity(td);
1038
1039 return (tdset);
1040 }
1041
1042 static int
1043 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask,
1044 struct domainset *domain)
1045 {
1046 struct cpuset *parent;
1047
1048 parent = cpuset_getbase(tdset);
1049 if (mask == NULL)
1050 mask = &tdset->cs_mask;
1051 if (domain == NULL)
1052 domain = tdset->cs_domain;
1053 return cpuset_testshadow(parent, mask, domain);
1054 }
1055
1056 static int
1057 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask,
1058 struct domainset *domain, struct cpuset **nsetp,
1059 struct setlist *freelist, struct domainlist *domainlist)
1060 {
1061 struct cpuset *parent;
1062
1063 parent = cpuset_getbase(tdset);
1064 if (mask == NULL)
1065 mask = &tdset->cs_mask;
1066 if (domain == NULL)
1067 domain = tdset->cs_domain;
1068 return cpuset_shadow(parent, nsetp, mask, domain, freelist,
1069 domainlist);
1070 }
1071
1072 static int
1073 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
1074 cpuset_t *mask, struct domainset *domain)
1075 {
1076 struct cpuset *parent;
1077
1078 parent = cpuset_getbase(tdset);
1079
1080 /*
1081 * If the thread restricted its mask then apply that same
1082 * restriction to the new set, otherwise take it wholesale.
1083 */
1084 if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
1085 CPU_COPY(&tdset->cs_mask, mask);
1086 CPU_AND(mask, &set->cs_mask);
1087 } else
1088 CPU_COPY(&set->cs_mask, mask);
1089
1090 /*
1091 * If the thread restricted the domain then we apply the
1092 * restriction to the new set but retain the policy.
1093 */
1094 if (tdset->cs_domain != parent->cs_domain) {
1095 domainset_copy(tdset->cs_domain, domain);
1096 DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask);
1097 } else
1098 domainset_copy(set->cs_domain, domain);
1099
1100 if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask))
1101 return (EDEADLK);
1102
1103 return (0);
1104 }
1105
1106 static int
1107 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set)
1108 {
1109 struct domainset domain;
1110 cpuset_t mask;
1111
1112 if (tdset->cs_id != CPUSET_INVALID)
1113 return (0);
1114 return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1115 }
1116
1117 static int
1118 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set,
1119 struct cpuset **nsetp, struct setlist *freelist,
1120 struct domainlist *domainlist)
1121 {
1122 struct domainset domain;
1123 cpuset_t mask;
1124 int error;
1125
1126 /*
1127 * If we're replacing on a thread that has not constrained the
1128 * original set we can simply accept the new set.
1129 */
1130 if (tdset->cs_id != CPUSET_INVALID) {
1131 *nsetp = cpuset_ref(set);
1132 return (0);
1133 }
1134 error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
1135 if (error)
1136 return (error);
1137
1138 return cpuset_shadow(set, nsetp, &mask, &domain, freelist,
1139 domainlist);
1140 }
1141
1142 /*
1143 * Handle three cases for updating an entire process.
1144 *
1145 * 1) Set is non-null. This reparents all anonymous sets to the provided
1146 * set and replaces all non-anonymous td_cpusets with the provided set.
1147 * 2) Mask is non-null. This replaces or creates anonymous sets for every
1148 * thread with the existing base as a parent.
1149 * 3) domain is non-null. This creates anonymous sets for every thread
1150 * and replaces the domain set.
1151 *
1152 * This is overly complicated because we can't allocate while holding a
1153 * spinlock and spinlocks must be held while changing and examining thread
1154 * state.
1155 */
1156 static int
1157 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask,
1158 struct domainset *domain)
1159 {
1160 struct setlist freelist;
1161 struct setlist droplist;
1162 struct domainlist domainlist;
1163 struct cpuset *nset;
1164 struct thread *td;
1165 struct proc *p;
1166 int threads;
1167 int nfree;
1168 int error;
1169
1170 /*
1171 * The algorithm requires two passes due to locking considerations.
1172 *
1173 * 1) Lookup the process and acquire the locks in the required order.
1174 * 2) If enough cpusets have not been allocated release the locks and
1175 * allocate them. Loop.
1176 */
1177 cpuset_freelist_init(&freelist, 1);
1178 domainset_freelist_init(&domainlist, 1);
1179 nfree = 1;
1180 LIST_INIT(&droplist);
1181 nfree = 0;
1182 for (;;) {
1183 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
1184 if (error)
1185 goto out;
1186 if (nfree >= p->p_numthreads)
1187 break;
1188 threads = p->p_numthreads;
1189 PROC_UNLOCK(p);
1190 if (nfree < threads) {
1191 cpuset_freelist_add(&freelist, threads - nfree);
1192 domainset_freelist_add(&domainlist, threads - nfree);
1193 nfree = threads;
1194 }
1195 }
1196 PROC_LOCK_ASSERT(p, MA_OWNED);
1197 /*
1198 * Now that the appropriate locks are held and we have enough cpusets,
1199 * make sure the operation will succeed before applying changes. The
1200 * proc lock prevents td_cpuset from changing between calls.
1201 */
1202 error = 0;
1203 FOREACH_THREAD_IN_PROC(p, td) {
1204 thread_lock(td);
1205 if (set != NULL)
1206 error = cpuset_setproc_test_setthread(td->td_cpuset,
1207 set);
1208 else
1209 error = cpuset_setproc_test_maskthread(td->td_cpuset,
1210 mask, domain);
1211 thread_unlock(td);
1212 if (error)
1213 goto unlock_out;
1214 }
1215 /*
1216 * Replace each thread's cpuset while using deferred release. We
1217 * must do this because the thread lock must be held while operating
1218 * on the thread and this limits the type of operations allowed.
1219 */
1220 FOREACH_THREAD_IN_PROC(p, td) {
1221 thread_lock(td);
1222 if (set != NULL)
1223 error = cpuset_setproc_setthread(td->td_cpuset, set,
1224 &nset, &freelist, &domainlist);
1225 else
1226 error = cpuset_setproc_maskthread(td->td_cpuset, mask,
1227 domain, &nset, &freelist, &domainlist);
1228 if (error) {
1229 thread_unlock(td);
1230 break;
1231 }
1232 cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset));
1233 thread_unlock(td);
1234 }
1235 unlock_out:
1236 PROC_UNLOCK(p);
1237 out:
1238 while ((nset = LIST_FIRST(&droplist)) != NULL)
1239 cpuset_rel_complete(nset);
1240 cpuset_freelist_free(&freelist);
1241 domainset_freelist_free(&domainlist);
1242 return (error);
1243 }
1244
1245 static int
1246 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen)
1247 {
1248 size_t bytes;
1249 int i, once;
1250 char *p;
1251
1252 once = 0;
1253 p = buf;
1254 for (i = 0; i < __bitset_words(setlen); i++) {
1255 if (once != 0) {
1256 if (bufsiz < 1)
1257 return (0);
1258 *p = ',';
1259 p++;
1260 bufsiz--;
1261 } else
1262 once = 1;
1263 if (bufsiz < sizeof(__STRING(ULONG_MAX)))
1264 return (0);
1265 bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]);
1266 p += bytes;
1267 bufsiz -= bytes;
1268 }
1269 return (p - buf);
1270 }
1271
1272 static int
1273 bitset_strscan(struct bitset *set, int setlen, const char *buf)
1274 {
1275 int i, ret;
1276 const char *p;
1277
1278 BIT_ZERO(setlen, set);
1279 p = buf;
1280 for (i = 0; i < __bitset_words(setlen); i++) {
1281 if (*p == ',') {
1282 p++;
1283 continue;
1284 }
1285 ret = sscanf(p, "%lx", &set->__bits[i]);
1286 if (ret == 0 || ret == -1)
1287 break;
1288 while (isxdigit(*p))
1289 p++;
1290 }
1291 return (p - buf);
1292 }
1293
1294 /*
1295 * Return a string representing a valid layout for a cpuset_t object.
1296 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1297 */
1298 char *
1299 cpusetobj_strprint(char *buf, const cpuset_t *set)
1300 {
1301
1302 bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set,
1303 CPU_SETSIZE);
1304 return (buf);
1305 }
1306
1307 /*
1308 * Build a valid cpuset_t object from a string representation.
1309 * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
1310 */
1311 int
1312 cpusetobj_strscan(cpuset_t *set, const char *buf)
1313 {
1314 char p;
1315
1316 if (strlen(buf) > CPUSETBUFSIZ - 1)
1317 return (-1);
1318
1319 p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)];
1320 if (p != '\0')
1321 return (-1);
1322
1323 return (0);
1324 }
1325
1326 /*
1327 * Handle a domainset specifier in the sysctl tree. A poiner to a pointer to
1328 * a domainset is in arg1. If the user specifies a valid domainset the
1329 * pointer is updated.
1330 *
1331 * Format is:
1332 * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred
1333 */
1334 int
1335 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS)
1336 {
1337 char buf[DOMAINSETBUFSIZ];
1338 struct domainset *dset;
1339 struct domainset key;
1340 int policy, prefer, error;
1341 char *p;
1342
1343 dset = *(struct domainset **)arg1;
1344 error = 0;
1345
1346 if (dset != NULL) {
1347 p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ,
1348 (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE);
1349 sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer);
1350 } else
1351 sprintf(buf, "<NULL>");
1352 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
1353 if (error != 0 || req->newptr == NULL)
1354 return (error);
1355
1356 /*
1357 * Read in and validate the string.
1358 */
1359 memset(&key, 0, sizeof(key));
1360 p = &buf[bitset_strscan((struct bitset *)&key.ds_mask,
1361 DOMAINSET_SETSIZE, buf)];
1362 if (p == buf)
1363 return (EINVAL);
1364 if (sscanf(p, ":%d:%d", &policy, &prefer) != 2)
1365 return (EINVAL);
1366 key.ds_policy = policy;
1367 key.ds_prefer = prefer;
1368
1369 /* Domainset_create() validates the policy.*/
1370 dset = domainset_create(&key);
1371 if (dset == NULL)
1372 return (EINVAL);
1373 *(struct domainset **)arg1 = dset;
1374
1375 return (error);
1376 }
1377
1378 /*
1379 * Apply an anonymous mask or a domain to a single thread.
1380 */
1381 static int
1382 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain)
1383 {
1384 struct setlist cpusets;
1385 struct domainlist domainlist;
1386 struct cpuset *nset;
1387 struct cpuset *set;
1388 struct thread *td;
1389 struct proc *p;
1390 int error;
1391
1392 cpuset_freelist_init(&cpusets, 1);
1393 domainset_freelist_init(&domainlist, domain != NULL);
1394 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
1395 if (error)
1396 goto out;
1397 set = NULL;
1398 thread_lock(td);
1399 error = cpuset_shadow(td->td_cpuset, &nset, mask, domain,
1400 &cpusets, &domainlist);
1401 if (error == 0)
1402 set = cpuset_update_thread(td, nset);
1403 thread_unlock(td);
1404 PROC_UNLOCK(p);
1405 if (set)
1406 cpuset_rel(set);
1407 out:
1408 cpuset_freelist_free(&cpusets);
1409 domainset_freelist_free(&domainlist);
1410 return (error);
1411 }
1412
1413 /*
1414 * Apply an anonymous mask to a single thread.
1415 */
1416 int
1417 cpuset_setthread(lwpid_t id, cpuset_t *mask)
1418 {
1419
1420 return _cpuset_setthread(id, mask, NULL);
1421 }
1422
1423 /*
1424 * Apply new cpumask to the ithread.
1425 */
1426 int
1427 cpuset_setithread(lwpid_t id, int cpu)
1428 {
1429 cpuset_t mask;
1430
1431 CPU_ZERO(&mask);
1432 if (cpu == NOCPU)
1433 CPU_COPY(cpuset_root, &mask);
1434 else
1435 CPU_SET(cpu, &mask);
1436 return _cpuset_setthread(id, &mask, NULL);
1437 }
1438
1439 /*
1440 * Initialize static domainsets after NUMA information is available. This is
1441 * called before memory allocators are initialized.
1442 */
1443 void
1444 domainset_init(void)
1445 {
1446 struct domainset *dset;
1447 int i;
1448
1449 dset = &domainset_roundrobin;
1450 DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1451 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1452 dset->ds_prefer = -1;
1453 _domainset_create(dset, NULL);
1454
1455 for (i = 0; i < vm_ndomains; i++) {
1456 dset = &domainset_fixed[i];
1457 DOMAINSET_ZERO(&dset->ds_mask);
1458 DOMAINSET_SET(i, &dset->ds_mask);
1459 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
1460 _domainset_create(dset, NULL);
1461
1462 dset = &domainset_prefer[i];
1463 DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1464 dset->ds_policy = DOMAINSET_POLICY_PREFER;
1465 dset->ds_prefer = i;
1466 _domainset_create(dset, NULL);
1467 }
1468 }
1469
1470 /*
1471 * Create the domainset for cpuset 0, 1 and cpuset 2.
1472 */
1473 void
1474 domainset_zero(void)
1475 {
1476 struct domainset *dset, *tmp;
1477
1478 mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
1479
1480 dset = &domainset0;
1481 DOMAINSET_COPY(&all_domains, &dset->ds_mask);
1482 dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH;
1483 dset->ds_prefer = -1;
1484 curthread->td_domain.dr_policy = _domainset_create(dset, NULL);
1485
1486 domainset_copy(dset, &domainset2);
1487 domainset2.ds_policy = DOMAINSET_POLICY_INTERLEAVE;
1488 kernel_object->domain.dr_policy = _domainset_create(&domainset2, NULL);
1489
1490 /* Remove empty domains from the global policies. */
1491 LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp)
1492 if (domainset_empty_vm(dset))
1493 LIST_REMOVE(dset, ds_link);
1494 }
1495
1496 /*
1497 * Creates system-wide cpusets and the cpuset for thread0 including three
1498 * sets:
1499 *
1500 * 0 - The root set which should represent all valid processors in the
1501 * system. This set is immutable.
1502 * 1 - The default set which all processes are a member of until changed.
1503 * This allows an administrator to move all threads off of given cpus to
1504 * dedicate them to high priority tasks or save power etc.
1505 * 2 - The kernel set which allows restriction and policy to be applied only
1506 * to kernel threads and the kernel_object.
1507 */
1508 struct cpuset *
1509 cpuset_thread0(void)
1510 {
1511 struct cpuset *set;
1512 int i;
1513 int error __unused;
1514
1515 cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
1516 NULL, NULL, UMA_ALIGN_CACHE, 0);
1517 domainset_zone = uma_zcreate("domainset", sizeof(struct domainset),
1518 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
1519
1520 /*
1521 * Create the root system set (0) for the whole machine. Doesn't use
1522 * cpuset_create() due to NULL parent.
1523 */
1524 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1525 CPU_COPY(&all_cpus, &set->cs_mask);
1526 LIST_INIT(&set->cs_children);
1527 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
1528 refcount_init(&set->cs_ref, 1);
1529 set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY;
1530 set->cs_domain = &domainset0;
1531 cpuset_zero = set;
1532 cpuset_root = &set->cs_mask;
1533
1534 /*
1535 * Now derive a default (1), modifiable set from that to give out.
1536 */
1537 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1538 error = cpuset_init(set, cpuset_zero, NULL, NULL, 1);
1539 KASSERT(error == 0, ("Error creating default set: %d\n", error));
1540 cpuset_default = set;
1541 /*
1542 * Create the kernel set (2).
1543 */
1544 set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
1545 error = cpuset_init(set, cpuset_zero, NULL, NULL, 2);
1546 KASSERT(error == 0, ("Error creating kernel set: %d\n", error));
1547 set->cs_domain = &domainset2;
1548 cpuset_kernel = set;
1549
1550 /*
1551 * Initialize the unit allocator. 0 and 1 are allocated above.
1552 */
1553 cpuset_unr = new_unrhdr(3, INT_MAX, NULL);
1554
1555 /*
1556 * If MD code has not initialized per-domain cpusets, place all
1557 * CPUs in domain 0.
1558 */
1559 for (i = 0; i < MAXMEMDOM; i++)
1560 if (!CPU_EMPTY(&cpuset_domain[i]))
1561 goto domains_set;
1562 CPU_COPY(&all_cpus, &cpuset_domain[0]);
1563 domains_set:
1564
1565 return (cpuset_default);
1566 }
1567
1568 void
1569 cpuset_kernthread(struct thread *td)
1570 {
1571 struct cpuset *set;
1572
1573 thread_lock(td);
1574 set = td->td_cpuset;
1575 td->td_cpuset = cpuset_ref(cpuset_kernel);
1576 thread_unlock(td);
1577 cpuset_rel(set);
1578 }
1579
1580 /*
1581 * Create a cpuset, which would be cpuset_create() but
1582 * mark the new 'set' as root.
1583 *
1584 * We are not going to reparent the td to it. Use cpuset_setproc_update_set()
1585 * for that.
1586 *
1587 * In case of no error, returns the set in *setp locked with a reference.
1588 */
1589 int
1590 cpuset_create_root(struct prison *pr, struct cpuset **setp)
1591 {
1592 struct cpuset *set;
1593 int error;
1594
1595 KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
1596 KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
1597
1598 set = NULL;
1599 error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
1600 if (error)
1601 return (error);
1602
1603 KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data",
1604 __func__, __LINE__));
1605
1606 /* Mark the set as root. */
1607 set->cs_flags |= CPU_SET_ROOT;
1608 *setp = set;
1609
1610 return (0);
1611 }
1612
1613 int
1614 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
1615 {
1616 int error;
1617
1618 KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
1619 KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
1620
1621 cpuset_ref(set);
1622 error = cpuset_setproc(p->p_pid, set, NULL, NULL);
1623 if (error)
1624 return (error);
1625 cpuset_rel(set);
1626 return (0);
1627 }
1628
1629 /*
1630 * In Capability mode, the only accesses that are permitted are to the current
1631 * thread and process' CPU and domain sets.
1632 */
1633 static int
1634 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which,
1635 id_t id)
1636 {
1637 if (IN_CAPABILITY_MODE(td)) {
1638 if (level != CPU_LEVEL_WHICH)
1639 return (ECAPMODE);
1640 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
1641 return (ECAPMODE);
1642 if (id != -1 &&
1643 !(which == CPU_WHICH_TID && id == td->td_tid) &&
1644 !(which == CPU_WHICH_PID && id == td->td_proc->p_pid))
1645 return (ECAPMODE);
1646 }
1647 return (0);
1648 }
1649
1650 static const struct cpuset_copy_cb copy_set = {
1651 .cpuset_copyin = copyin,
1652 .cpuset_copyout = copyout
1653 };
1654
1655 #ifndef _SYS_SYSPROTO_H_
1656 struct cpuset_args {
1657 cpusetid_t *setid;
1658 };
1659 #endif
1660 int
1661 sys_cpuset(struct thread *td, struct cpuset_args *uap)
1662 {
1663 struct cpuset *root;
1664 struct cpuset *set;
1665 int error;
1666
1667 thread_lock(td);
1668 root = cpuset_refroot(td->td_cpuset);
1669 thread_unlock(td);
1670 set = NULL;
1671 error = cpuset_create(&set, root, &root->cs_mask);
1672 cpuset_rel(root);
1673 if (error)
1674 return (error);
1675 error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
1676 if (error == 0)
1677 error = cpuset_setproc(-1, set, NULL, NULL);
1678 cpuset_rel(set);
1679 return (error);
1680 }
1681
1682 #ifndef _SYS_SYSPROTO_H_
1683 struct cpuset_setid_args {
1684 cpuwhich_t which;
1685 id_t id;
1686 cpusetid_t setid;
1687 };
1688 #endif
1689 int
1690 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
1691 {
1692
1693 return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
1694 }
1695
1696 int
1697 kern_cpuset_setid(struct thread *td, cpuwhich_t which,
1698 id_t id, cpusetid_t setid)
1699 {
1700 struct cpuset *set;
1701 int error;
1702
1703 /*
1704 * Presently we only support per-process sets.
1705 */
1706 if (which != CPU_WHICH_PID)
1707 return (EINVAL);
1708 set = cpuset_lookup(setid, td);
1709 if (set == NULL)
1710 return (ESRCH);
1711 error = cpuset_setproc(id, set, NULL, NULL);
1712 cpuset_rel(set);
1713 return (error);
1714 }
1715
1716 #ifndef _SYS_SYSPROTO_H_
1717 struct cpuset_getid_args {
1718 cpulevel_t level;
1719 cpuwhich_t which;
1720 id_t id;
1721 cpusetid_t *setid;
1722 };
1723 #endif
1724 int
1725 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
1726 {
1727
1728 return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
1729 uap->setid));
1730 }
1731
1732 int
1733 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
1734 id_t id, cpusetid_t *setid)
1735 {
1736 struct cpuset *nset;
1737 struct cpuset *set;
1738 struct thread *ttd;
1739 struct proc *p;
1740 cpusetid_t tmpid;
1741 int error;
1742
1743 if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
1744 return (EINVAL);
1745 error = cpuset_which(which, id, &p, &ttd, &set);
1746 if (error)
1747 return (error);
1748 switch (which) {
1749 case CPU_WHICH_TID:
1750 case CPU_WHICH_PID:
1751 thread_lock(ttd);
1752 set = cpuset_refbase(ttd->td_cpuset);
1753 thread_unlock(ttd);
1754 PROC_UNLOCK(p);
1755 break;
1756 case CPU_WHICH_CPUSET:
1757 case CPU_WHICH_JAIL:
1758 break;
1759 case CPU_WHICH_IRQ:
1760 case CPU_WHICH_DOMAIN:
1761 return (EINVAL);
1762 }
1763 switch (level) {
1764 case CPU_LEVEL_ROOT:
1765 nset = cpuset_refroot(set);
1766 cpuset_rel(set);
1767 set = nset;
1768 break;
1769 case CPU_LEVEL_CPUSET:
1770 break;
1771 case CPU_LEVEL_WHICH:
1772 break;
1773 }
1774 tmpid = set->cs_id;
1775 cpuset_rel(set);
1776 if (error == 0)
1777 error = copyout(&tmpid, setid, sizeof(tmpid));
1778
1779 return (error);
1780 }
1781
1782 #ifndef _SYS_SYSPROTO_H_
1783 struct cpuset_getaffinity_args {
1784 cpulevel_t level;
1785 cpuwhich_t which;
1786 id_t id;
1787 size_t cpusetsize;
1788 cpuset_t *mask;
1789 };
1790 #endif
1791 int
1792 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
1793 {
1794
1795 return (kern_cpuset_getaffinity(td, uap->level, uap->which,
1796 uap->id, uap->cpusetsize, uap->mask, ©_set));
1797 }
1798
1799 int
1800 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1801 id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb)
1802 {
1803 struct thread *ttd;
1804 struct cpuset *nset;
1805 struct cpuset *set;
1806 struct proc *p;
1807 cpuset_t *mask;
1808 int error;
1809 size_t size;
1810
1811 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1812 return (ERANGE);
1813 error = cpuset_check_capabilities(td, level, which, id);
1814 if (error != 0)
1815 return (error);
1816 size = cpusetsize;
1817 mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
1818 error = cpuset_which(which, id, &p, &ttd, &set);
1819 if (error)
1820 goto out;
1821 switch (level) {
1822 case CPU_LEVEL_ROOT:
1823 case CPU_LEVEL_CPUSET:
1824 switch (which) {
1825 case CPU_WHICH_TID:
1826 case CPU_WHICH_PID:
1827 thread_lock(ttd);
1828 set = cpuset_ref(ttd->td_cpuset);
1829 thread_unlock(ttd);
1830 break;
1831 case CPU_WHICH_CPUSET:
1832 case CPU_WHICH_JAIL:
1833 break;
1834 case CPU_WHICH_IRQ:
1835 case CPU_WHICH_INTRHANDLER:
1836 case CPU_WHICH_ITHREAD:
1837 case CPU_WHICH_DOMAIN:
1838 error = EINVAL;
1839 goto out;
1840 }
1841 if (level == CPU_LEVEL_ROOT)
1842 nset = cpuset_refroot(set);
1843 else
1844 nset = cpuset_refbase(set);
1845 CPU_COPY(&nset->cs_mask, mask);
1846 cpuset_rel(nset);
1847 break;
1848 case CPU_LEVEL_WHICH:
1849 switch (which) {
1850 case CPU_WHICH_TID:
1851 thread_lock(ttd);
1852 CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
1853 thread_unlock(ttd);
1854 break;
1855 case CPU_WHICH_PID:
1856 FOREACH_THREAD_IN_PROC(p, ttd) {
1857 thread_lock(ttd);
1858 CPU_OR(mask, &ttd->td_cpuset->cs_mask);
1859 thread_unlock(ttd);
1860 }
1861 break;
1862 case CPU_WHICH_CPUSET:
1863 case CPU_WHICH_JAIL:
1864 CPU_COPY(&set->cs_mask, mask);
1865 break;
1866 case CPU_WHICH_IRQ:
1867 case CPU_WHICH_INTRHANDLER:
1868 case CPU_WHICH_ITHREAD:
1869 error = intr_getaffinity(id, which, mask);
1870 break;
1871 case CPU_WHICH_DOMAIN:
1872 if (id < 0 || id >= MAXMEMDOM)
1873 error = ESRCH;
1874 else
1875 CPU_COPY(&cpuset_domain[id], mask);
1876 break;
1877 }
1878 break;
1879 default:
1880 error = EINVAL;
1881 break;
1882 }
1883 if (set)
1884 cpuset_rel(set);
1885 if (p)
1886 PROC_UNLOCK(p);
1887 if (error == 0)
1888 error = cb->cpuset_copyout(mask, maskp, size);
1889 out:
1890 free(mask, M_TEMP);
1891 return (error);
1892 }
1893
1894 #ifndef _SYS_SYSPROTO_H_
1895 struct cpuset_setaffinity_args {
1896 cpulevel_t level;
1897 cpuwhich_t which;
1898 id_t id;
1899 size_t cpusetsize;
1900 const cpuset_t *mask;
1901 };
1902 #endif
1903 int
1904 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
1905 {
1906
1907 return (kern_cpuset_setaffinity(td, uap->level, uap->which,
1908 uap->id, uap->cpusetsize, uap->mask, ©_set));
1909 }
1910
1911 int
1912 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
1913 id_t id, size_t cpusetsize, const cpuset_t *maskp,
1914 const struct cpuset_copy_cb *cb)
1915 {
1916 struct cpuset *nset;
1917 struct cpuset *set;
1918 struct thread *ttd;
1919 struct proc *p;
1920 cpuset_t *mask;
1921 int error;
1922
1923 if (cpusetsize < sizeof(cpuset_t) || cpusetsize > CPU_MAXSIZE / NBBY)
1924 return (ERANGE);
1925 error = cpuset_check_capabilities(td, level, which, id);
1926 if (error != 0)
1927 return (error);
1928 mask = malloc(cpusetsize, M_TEMP, M_WAITOK | M_ZERO);
1929 error = cb->cpuset_copyin(maskp, mask, cpusetsize);
1930 if (error)
1931 goto out;
1932 /*
1933 * Verify that no high bits are set.
1934 */
1935 if (cpusetsize > sizeof(cpuset_t)) {
1936 char *end;
1937 char *cp;
1938
1939 end = cp = (char *)&mask->__bits;
1940 end += cpusetsize;
1941 cp += sizeof(cpuset_t);
1942 while (cp != end)
1943 if (*cp++ != 0) {
1944 error = EINVAL;
1945 goto out;
1946 }
1947
1948 }
1949 if (CPU_EMPTY(mask)) {
1950 error = EDEADLK;
1951 goto out;
1952 }
1953 switch (level) {
1954 case CPU_LEVEL_ROOT:
1955 case CPU_LEVEL_CPUSET:
1956 error = cpuset_which(which, id, &p, &ttd, &set);
1957 if (error)
1958 break;
1959 switch (which) {
1960 case CPU_WHICH_TID:
1961 case CPU_WHICH_PID:
1962 thread_lock(ttd);
1963 set = cpuset_ref(ttd->td_cpuset);
1964 thread_unlock(ttd);
1965 PROC_UNLOCK(p);
1966 break;
1967 case CPU_WHICH_CPUSET:
1968 case CPU_WHICH_JAIL:
1969 break;
1970 case CPU_WHICH_IRQ:
1971 case CPU_WHICH_INTRHANDLER:
1972 case CPU_WHICH_ITHREAD:
1973 case CPU_WHICH_DOMAIN:
1974 error = EINVAL;
1975 goto out;
1976 }
1977 if (level == CPU_LEVEL_ROOT)
1978 nset = cpuset_refroot(set);
1979 else
1980 nset = cpuset_refbase(set);
1981 error = cpuset_modify(nset, mask);
1982 cpuset_rel(nset);
1983 cpuset_rel(set);
1984 break;
1985 case CPU_LEVEL_WHICH:
1986 switch (which) {
1987 case CPU_WHICH_TID:
1988 error = cpuset_setthread(id, mask);
1989 break;
1990 case CPU_WHICH_PID:
1991 error = cpuset_setproc(id, NULL, mask, NULL);
1992 break;
1993 case CPU_WHICH_CPUSET:
1994 case CPU_WHICH_JAIL:
1995 error = cpuset_which(which, id, &p, &ttd, &set);
1996 if (error == 0) {
1997 error = cpuset_modify(set, mask);
1998 cpuset_rel(set);
1999 }
2000 break;
2001 case CPU_WHICH_IRQ:
2002 case CPU_WHICH_INTRHANDLER:
2003 case CPU_WHICH_ITHREAD:
2004 error = intr_setaffinity(id, which, mask);
2005 break;
2006 default:
2007 error = EINVAL;
2008 break;
2009 }
2010 break;
2011 default:
2012 error = EINVAL;
2013 break;
2014 }
2015 out:
2016 free(mask, M_TEMP);
2017 return (error);
2018 }
2019
2020 #ifndef _SYS_SYSPROTO_H_
2021 struct cpuset_getdomain_args {
2022 cpulevel_t level;
2023 cpuwhich_t which;
2024 id_t id;
2025 size_t domainsetsize;
2026 domainset_t *mask;
2027 int *policy;
2028 };
2029 #endif
2030 int
2031 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap)
2032 {
2033
2034 return (kern_cpuset_getdomain(td, uap->level, uap->which,
2035 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set));
2036 }
2037
2038 int
2039 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2040 id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp,
2041 const struct cpuset_copy_cb *cb)
2042 {
2043 struct domainset outset;
2044 struct thread *ttd;
2045 struct cpuset *nset;
2046 struct cpuset *set;
2047 struct domainset *dset;
2048 struct proc *p;
2049 domainset_t *mask;
2050 int error;
2051
2052 if (domainsetsize < sizeof(domainset_t) ||
2053 domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2054 return (ERANGE);
2055 error = cpuset_check_capabilities(td, level, which, id);
2056 if (error != 0)
2057 return (error);
2058 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2059 bzero(&outset, sizeof(outset));
2060 error = cpuset_which(which, id, &p, &ttd, &set);
2061 if (error)
2062 goto out;
2063 switch (level) {
2064 case CPU_LEVEL_ROOT:
2065 case CPU_LEVEL_CPUSET:
2066 switch (which) {
2067 case CPU_WHICH_TID:
2068 case CPU_WHICH_PID:
2069 thread_lock(ttd);
2070 set = cpuset_ref(ttd->td_cpuset);
2071 thread_unlock(ttd);
2072 break;
2073 case CPU_WHICH_CPUSET:
2074 case CPU_WHICH_JAIL:
2075 break;
2076 case CPU_WHICH_IRQ:
2077 case CPU_WHICH_INTRHANDLER:
2078 case CPU_WHICH_ITHREAD:
2079 case CPU_WHICH_DOMAIN:
2080 error = EINVAL;
2081 goto out;
2082 }
2083 if (level == CPU_LEVEL_ROOT)
2084 nset = cpuset_refroot(set);
2085 else
2086 nset = cpuset_refbase(set);
2087 domainset_copy(nset->cs_domain, &outset);
2088 cpuset_rel(nset);
2089 break;
2090 case CPU_LEVEL_WHICH:
2091 switch (which) {
2092 case CPU_WHICH_TID:
2093 thread_lock(ttd);
2094 domainset_copy(ttd->td_cpuset->cs_domain, &outset);
2095 thread_unlock(ttd);
2096 break;
2097 case CPU_WHICH_PID:
2098 FOREACH_THREAD_IN_PROC(p, ttd) {
2099 thread_lock(ttd);
2100 dset = ttd->td_cpuset->cs_domain;
2101 /* Show all domains in the proc. */
2102 DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask);
2103 /* Last policy wins. */
2104 outset.ds_policy = dset->ds_policy;
2105 outset.ds_prefer = dset->ds_prefer;
2106 thread_unlock(ttd);
2107 }
2108 break;
2109 case CPU_WHICH_CPUSET:
2110 case CPU_WHICH_JAIL:
2111 domainset_copy(set->cs_domain, &outset);
2112 break;
2113 case CPU_WHICH_IRQ:
2114 case CPU_WHICH_INTRHANDLER:
2115 case CPU_WHICH_ITHREAD:
2116 case CPU_WHICH_DOMAIN:
2117 error = EINVAL;
2118 break;
2119 }
2120 break;
2121 default:
2122 error = EINVAL;
2123 break;
2124 }
2125 if (set)
2126 cpuset_rel(set);
2127 if (p)
2128 PROC_UNLOCK(p);
2129 /*
2130 * Translate prefer into a set containing only the preferred domain,
2131 * not the entire fallback set.
2132 */
2133 if (outset.ds_policy == DOMAINSET_POLICY_PREFER) {
2134 DOMAINSET_ZERO(&outset.ds_mask);
2135 DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask);
2136 }
2137 DOMAINSET_COPY(&outset.ds_mask, mask);
2138 if (error == 0)
2139 error = cb->cpuset_copyout(mask, maskp, domainsetsize);
2140 if (error == 0)
2141 if (suword32(policyp, outset.ds_policy) != 0)
2142 error = EFAULT;
2143 out:
2144 free(mask, M_TEMP);
2145 return (error);
2146 }
2147
2148 #ifndef _SYS_SYSPROTO_H_
2149 struct cpuset_setdomain_args {
2150 cpulevel_t level;
2151 cpuwhich_t which;
2152 id_t id;
2153 size_t domainsetsize;
2154 domainset_t *mask;
2155 int policy;
2156 };
2157 #endif
2158 int
2159 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap)
2160 {
2161
2162 return (kern_cpuset_setdomain(td, uap->level, uap->which,
2163 uap->id, uap->domainsetsize, uap->mask, uap->policy, ©_set));
2164 }
2165
2166 int
2167 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
2168 id_t id, size_t domainsetsize, const domainset_t *maskp, int policy,
2169 const struct cpuset_copy_cb *cb)
2170 {
2171 struct cpuset *nset;
2172 struct cpuset *set;
2173 struct thread *ttd;
2174 struct proc *p;
2175 struct domainset domain;
2176 domainset_t *mask;
2177 int error;
2178
2179 if (domainsetsize < sizeof(domainset_t) ||
2180 domainsetsize > DOMAINSET_MAXSIZE / NBBY)
2181 return (ERANGE);
2182 if (policy <= DOMAINSET_POLICY_INVALID ||
2183 policy > DOMAINSET_POLICY_MAX)
2184 return (EINVAL);
2185 error = cpuset_check_capabilities(td, level, which, id);
2186 if (error != 0)
2187 return (error);
2188 memset(&domain, 0, sizeof(domain));
2189 mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
2190 error = cb->cpuset_copyin(maskp, mask, domainsetsize);
2191 if (error)
2192 goto out;
2193 /*
2194 * Verify that no high bits are set.
2195 */
2196 if (domainsetsize > sizeof(domainset_t)) {
2197 char *end;
2198 char *cp;
2199
2200 end = cp = (char *)&mask->__bits;
2201 end += domainsetsize;
2202 cp += sizeof(domainset_t);
2203 while (cp != end)
2204 if (*cp++ != 0) {
2205 error = EINVAL;
2206 goto out;
2207 }
2208
2209 }
2210 if (DOMAINSET_EMPTY(mask)) {
2211 error = EDEADLK;
2212 goto out;
2213 }
2214 DOMAINSET_COPY(mask, &domain.ds_mask);
2215 domain.ds_policy = policy;
2216
2217 /*
2218 * Sanitize the provided mask.
2219 */
2220 if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) {
2221 error = EINVAL;
2222 goto out;
2223 }
2224
2225 /* Translate preferred policy into a mask and fallback. */
2226 if (policy == DOMAINSET_POLICY_PREFER) {
2227 /* Only support a single preferred domain. */
2228 if (DOMAINSET_COUNT(&domain.ds_mask) != 1) {
2229 error = EINVAL;
2230 goto out;
2231 }
2232 domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1;
2233 /* This will be constrained by domainset_shadow(). */
2234 DOMAINSET_COPY(&all_domains, &domain.ds_mask);
2235 }
2236
2237 /*
2238 * When given an impossible policy, fall back to interleaving
2239 * across all domains.
2240 */
2241 if (domainset_empty_vm(&domain))
2242 domainset_copy(&domainset2, &domain);
2243
2244 switch (level) {
2245 case CPU_LEVEL_ROOT:
2246 case CPU_LEVEL_CPUSET:
2247 error = cpuset_which(which, id, &p, &ttd, &set);
2248 if (error)
2249 break;
2250 switch (which) {
2251 case CPU_WHICH_TID:
2252 case CPU_WHICH_PID:
2253 thread_lock(ttd);
2254 set = cpuset_ref(ttd->td_cpuset);
2255 thread_unlock(ttd);
2256 PROC_UNLOCK(p);
2257 break;
2258 case CPU_WHICH_CPUSET:
2259 case CPU_WHICH_JAIL:
2260 break;
2261 case CPU_WHICH_IRQ:
2262 case CPU_WHICH_INTRHANDLER:
2263 case CPU_WHICH_ITHREAD:
2264 case CPU_WHICH_DOMAIN:
2265 error = EINVAL;
2266 goto out;
2267 }
2268 if (level == CPU_LEVEL_ROOT)
2269 nset = cpuset_refroot(set);
2270 else
2271 nset = cpuset_refbase(set);
2272 error = cpuset_modify_domain(nset, &domain);
2273 cpuset_rel(nset);
2274 cpuset_rel(set);
2275 break;
2276 case CPU_LEVEL_WHICH:
2277 switch (which) {
2278 case CPU_WHICH_TID:
2279 error = _cpuset_setthread(id, NULL, &domain);
2280 break;
2281 case CPU_WHICH_PID:
2282 error = cpuset_setproc(id, NULL, NULL, &domain);
2283 break;
2284 case CPU_WHICH_CPUSET:
2285 case CPU_WHICH_JAIL:
2286 error = cpuset_which(which, id, &p, &ttd, &set);
2287 if (error == 0) {
2288 error = cpuset_modify_domain(set, &domain);
2289 cpuset_rel(set);
2290 }
2291 break;
2292 case CPU_WHICH_IRQ:
2293 case CPU_WHICH_INTRHANDLER:
2294 case CPU_WHICH_ITHREAD:
2295 default:
2296 error = EINVAL;
2297 break;
2298 }
2299 break;
2300 default:
2301 error = EINVAL;
2302 break;
2303 }
2304 out:
2305 free(mask, M_TEMP);
2306 return (error);
2307 }
2308
2309 #ifdef DDB
2310
2311 static void
2312 ddb_display_bitset(const struct bitset *set, int size)
2313 {
2314 int bit, once;
2315
2316 for (once = 0, bit = 0; bit < size; bit++) {
2317 if (CPU_ISSET(bit, set)) {
2318 if (once == 0) {
2319 db_printf("%d", bit);
2320 once = 1;
2321 } else
2322 db_printf(",%d", bit);
2323 }
2324 }
2325 if (once == 0)
2326 db_printf("<none>");
2327 }
2328
2329 void
2330 ddb_display_cpuset(const cpuset_t *set)
2331 {
2332 ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE);
2333 }
2334
2335 static void
2336 ddb_display_domainset(const domainset_t *set)
2337 {
2338 ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE);
2339 }
2340
2341 DB_SHOW_COMMAND(cpusets, db_show_cpusets)
2342 {
2343 struct cpuset *set;
2344
2345 LIST_FOREACH(set, &cpuset_ids, cs_link) {
2346 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
2347 set, set->cs_id, set->cs_ref, set->cs_flags,
2348 (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
2349 db_printf(" cpu mask=");
2350 ddb_display_cpuset(&set->cs_mask);
2351 db_printf("\n");
2352 db_printf(" domain policy %d prefer %d mask=",
2353 set->cs_domain->ds_policy, set->cs_domain->ds_prefer);
2354 ddb_display_domainset(&set->cs_domain->ds_mask);
2355 db_printf("\n");
2356 if (db_pager_quit)
2357 break;
2358 }
2359 }
2360
2361 DB_SHOW_COMMAND(domainsets, db_show_domainsets)
2362 {
2363 struct domainset *set;
2364
2365 LIST_FOREACH(set, &cpuset_domains, ds_link) {
2366 db_printf("set=%p policy %d prefer %d cnt %d\n",
2367 set, set->ds_policy, set->ds_prefer, set->ds_cnt);
2368 db_printf(" mask =");
2369 ddb_display_domainset(&set->ds_mask);
2370 db_printf("\n");
2371 }
2372 }
2373 #endif /* DDB */
Cache object: bdce74ff571f145c2e33b2534f990a87
|