The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_cpuset.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2008,  Jeffrey Roberson <jeff@freebsd.org>
    5  * All rights reserved.
    6  * 
    7  * Copyright (c) 2008 Nokia Corporation
    8  * All rights reserved.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice unmodified, this list of conditions, and the following
   15  *    disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
   21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
   22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
   24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
   25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
   26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
   27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
   28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
   29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   30  *
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __FBSDID("$FreeBSD$");
   35 
   36 #include "opt_ddb.h"
   37 #include "opt_ktrace.h"
   38 
   39 #include <sys/param.h>
   40 #include <sys/systm.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/ctype.h>
   43 #include <sys/sysproto.h>
   44 #include <sys/jail.h>
   45 #include <sys/kernel.h>
   46 #include <sys/lock.h>
   47 #include <sys/malloc.h>
   48 #include <sys/mutex.h>
   49 #include <sys/priv.h>
   50 #include <sys/proc.h>
   51 #include <sys/refcount.h>
   52 #include <sys/sched.h>
   53 #include <sys/smp.h>
   54 #include <sys/syscallsubr.h>
   55 #include <sys/sysent.h>
   56 #include <sys/capsicum.h>
   57 #include <sys/cpuset.h>
   58 #include <sys/domainset.h>
   59 #include <sys/sx.h>
   60 #include <sys/queue.h>
   61 #include <sys/libkern.h>
   62 #include <sys/limits.h>
   63 #include <sys/bus.h>
   64 #include <sys/interrupt.h>
   65 #include <sys/vmmeter.h>
   66 #include <sys/ktrace.h>
   67 
   68 #include <vm/uma.h>
   69 #include <vm/vm.h>
   70 #include <vm/vm_object.h>
   71 #include <vm/vm_page.h>
   72 #include <vm/vm_pageout.h>
   73 #include <vm/vm_extern.h>
   74 #include <vm/vm_param.h>
   75 #include <vm/vm_phys.h>
   76 #include <vm/vm_pagequeue.h>
   77 
   78 #ifdef DDB
   79 #include <ddb/ddb.h>
   80 #endif /* DDB */
   81 
   82 /*
   83  * cpusets provide a mechanism for creating and manipulating sets of
   84  * processors for the purpose of constraining the scheduling of threads to
   85  * specific processors.
   86  *
   87  * Each process belongs to an identified set, by default this is set 1.  Each
   88  * thread may further restrict the cpus it may run on to a subset of this
   89  * named set.  This creates an anonymous set which other threads and processes
   90  * may not join by number.
   91  *
   92  * The named set is referred to herein as the 'base' set to avoid ambiguity.
   93  * This set is usually a child of a 'root' set while the anonymous set may
   94  * simply be referred to as a mask.  In the syscall api these are referred to
   95  * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here.
   96  *
   97  * Threads inherit their set from their creator whether it be anonymous or
   98  * not.  This means that anonymous sets are immutable because they may be
   99  * shared.  To modify an anonymous set a new set is created with the desired
  100  * mask and the same parent as the existing anonymous set.  This gives the
  101  * illusion of each thread having a private mask.
  102  *
  103  * Via the syscall apis a user may ask to retrieve or modify the root, base,
  104  * or mask that is discovered via a pid, tid, or setid.  Modifying a set
  105  * modifies all numbered and anonymous child sets to comply with the new mask.
  106  * Modifying a pid or tid's mask applies only to that tid but must still
  107  * exist within the assigned parent set.
  108  *
  109  * A thread may not be assigned to a group separate from other threads in
  110  * the process.  This is to remove ambiguity when the setid is queried with
  111  * a pid argument.  There is no other technical limitation.
  112  *
  113  * This somewhat complex arrangement is intended to make it easy for
  114  * applications to query available processors and bind their threads to
  115  * specific processors while also allowing administrators to dynamically
  116  * reprovision by changing sets which apply to groups of processes.
  117  *
  118  * A simple application should not concern itself with sets at all and
  119  * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id
  120  * meaning 'curthread'.  It may query available cpus for that tid with a
  121  * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...).
  122  */
  123 
  124 LIST_HEAD(domainlist, domainset);
  125 struct domainset __read_mostly domainset_firsttouch;
  126 struct domainset __read_mostly domainset_fixed[MAXMEMDOM];
  127 struct domainset __read_mostly domainset_interleave;
  128 struct domainset __read_mostly domainset_prefer[MAXMEMDOM];
  129 struct domainset __read_mostly domainset_roundrobin;
  130 
  131 static uma_zone_t cpuset_zone;
  132 static uma_zone_t domainset_zone;
  133 static struct mtx cpuset_lock;
  134 static struct setlist cpuset_ids;
  135 static struct domainlist cpuset_domains;
  136 static struct unrhdr *cpuset_unr;
  137 static struct cpuset *cpuset_zero, *cpuset_default, *cpuset_kernel;
  138 static struct domainset *domainset0, *domainset2;
  139 
  140 /* Return the size of cpuset_t at the kernel level */
  141 SYSCTL_INT(_kern_sched, OID_AUTO, cpusetsize, CTLFLAG_RD | CTLFLAG_CAPRD,
  142     SYSCTL_NULL_INT_PTR, sizeof(cpuset_t), "sizeof(cpuset_t)");
  143 
  144 cpuset_t *cpuset_root;
  145 cpuset_t cpuset_domain[MAXMEMDOM];
  146 
  147 static int domainset_valid(const struct domainset *, const struct domainset *);
  148 
  149 /*
  150  * Find the first non-anonymous set starting from 'set'.
  151  */
  152 static struct cpuset *
  153 cpuset_getbase(struct cpuset *set)
  154 {
  155 
  156         if (set->cs_id == CPUSET_INVALID)
  157                 set = set->cs_parent;
  158         return (set);
  159 }
  160 
  161 /*
  162  * Walks up the tree from 'set' to find the root.
  163  */
  164 static struct cpuset *
  165 cpuset_getroot(struct cpuset *set)
  166 {
  167 
  168         while ((set->cs_flags & CPU_SET_ROOT) == 0 && set->cs_parent != NULL)
  169                 set = set->cs_parent;
  170         return (set);
  171 }
  172 
  173 /*
  174  * Acquire a reference to a cpuset, all pointers must be tracked with refs.
  175  */
  176 struct cpuset *
  177 cpuset_ref(struct cpuset *set)
  178 {
  179 
  180         refcount_acquire(&set->cs_ref);
  181         return (set);
  182 }
  183 
  184 /*
  185  * Walks up the tree from 'set' to find the root.  Returns the root
  186  * referenced.
  187  */
  188 static struct cpuset *
  189 cpuset_refroot(struct cpuset *set)
  190 {
  191 
  192         return (cpuset_ref(cpuset_getroot(set)));
  193 }
  194 
  195 /*
  196  * Find the first non-anonymous set starting from 'set'.  Returns this set
  197  * referenced.  May return the passed in set with an extra ref if it is
  198  * not anonymous. 
  199  */
  200 static struct cpuset *
  201 cpuset_refbase(struct cpuset *set)
  202 {
  203 
  204         return (cpuset_ref(cpuset_getbase(set)));
  205 }
  206 
  207 /*
  208  * Release a reference in a context where it is safe to allocate.
  209  */
  210 void
  211 cpuset_rel(struct cpuset *set)
  212 {
  213         cpusetid_t id;
  214 
  215         if (refcount_release_if_not_last(&set->cs_ref))
  216                 return;
  217         mtx_lock_spin(&cpuset_lock);
  218         if (!refcount_release(&set->cs_ref)) {
  219                 mtx_unlock_spin(&cpuset_lock);
  220                 return;
  221         }
  222         LIST_REMOVE(set, cs_siblings);
  223         id = set->cs_id;
  224         if (id != CPUSET_INVALID)
  225                 LIST_REMOVE(set, cs_link);
  226         mtx_unlock_spin(&cpuset_lock);
  227         cpuset_rel(set->cs_parent);
  228         uma_zfree(cpuset_zone, set);
  229         if (id != CPUSET_INVALID)
  230                 free_unr(cpuset_unr, id);
  231 }
  232 
  233 /*
  234  * Deferred release must be used when in a context that is not safe to
  235  * allocate/free.  This places any unreferenced sets on the list 'head'.
  236  */
  237 static void
  238 cpuset_rel_defer(struct setlist *head, struct cpuset *set)
  239 {
  240 
  241         if (refcount_release_if_not_last(&set->cs_ref))
  242                 return;
  243         mtx_lock_spin(&cpuset_lock);
  244         if (!refcount_release(&set->cs_ref)) {
  245                 mtx_unlock_spin(&cpuset_lock);
  246                 return;
  247         }
  248         LIST_REMOVE(set, cs_siblings);
  249         if (set->cs_id != CPUSET_INVALID)
  250                 LIST_REMOVE(set, cs_link);
  251         LIST_INSERT_HEAD(head, set, cs_link);
  252         mtx_unlock_spin(&cpuset_lock);
  253 }
  254 
  255 /*
  256  * Complete a deferred release.  Removes the set from the list provided to
  257  * cpuset_rel_defer.
  258  */
  259 static void
  260 cpuset_rel_complete(struct cpuset *set)
  261 {
  262         cpusetid_t id;
  263 
  264         id = set->cs_id;
  265         LIST_REMOVE(set, cs_link);
  266         cpuset_rel(set->cs_parent);
  267         uma_zfree(cpuset_zone, set);
  268         if (id != CPUSET_INVALID)
  269                 free_unr(cpuset_unr, id);
  270 }
  271 
  272 /*
  273  * Find a set based on an id.  Returns it with a ref.
  274  */
  275 static struct cpuset *
  276 cpuset_lookup(cpusetid_t setid, struct thread *td)
  277 {
  278         struct cpuset *set;
  279 
  280         if (setid == CPUSET_INVALID)
  281                 return (NULL);
  282         mtx_lock_spin(&cpuset_lock);
  283         LIST_FOREACH(set, &cpuset_ids, cs_link)
  284                 if (set->cs_id == setid)
  285                         break;
  286         if (set)
  287                 cpuset_ref(set);
  288         mtx_unlock_spin(&cpuset_lock);
  289 
  290         KASSERT(td != NULL, ("[%s:%d] td is NULL", __func__, __LINE__));
  291         if (set != NULL && jailed(td->td_ucred)) {
  292                 struct cpuset *jset, *tset;
  293 
  294                 jset = td->td_ucred->cr_prison->pr_cpuset;
  295                 for (tset = set; tset != NULL; tset = tset->cs_parent)
  296                         if (tset == jset)
  297                                 break;
  298                 if (tset == NULL) {
  299                         cpuset_rel(set);
  300                         set = NULL;
  301                 }
  302         }
  303 
  304         return (set);
  305 }
  306 
  307 /*
  308  * Initialize a set in the space provided in 'set' with the provided parameters.
  309  * The set is returned with a single ref.  May return EDEADLK if the set
  310  * will have no valid cpu based on restrictions from the parent.
  311  */
  312 static int
  313 cpuset_init(struct cpuset *set, struct cpuset *parent,
  314     const cpuset_t *mask, struct domainset *domain, cpusetid_t id)
  315 {
  316 
  317         if (domain == NULL)
  318                 domain = parent->cs_domain;
  319         if (mask == NULL)
  320                 mask = &parent->cs_mask;
  321         if (!CPU_OVERLAP(&parent->cs_mask, mask))
  322                 return (EDEADLK);
  323         /* The domain must be prepared ahead of time. */
  324         if (!domainset_valid(parent->cs_domain, domain))
  325                 return (EDEADLK);
  326         CPU_COPY(mask, &set->cs_mask);
  327         LIST_INIT(&set->cs_children);
  328         refcount_init(&set->cs_ref, 1);
  329         set->cs_flags = 0;
  330         mtx_lock_spin(&cpuset_lock);
  331         set->cs_domain = domain;
  332         CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask);
  333         set->cs_id = id;
  334         set->cs_parent = cpuset_ref(parent);
  335         LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
  336         if (set->cs_id != CPUSET_INVALID)
  337                 LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
  338         mtx_unlock_spin(&cpuset_lock);
  339 
  340         return (0);
  341 }
  342 
  343 /*
  344  * Create a new non-anonymous set with the requested parent and mask.  May
  345  * return failures if the mask is invalid or a new number can not be
  346  * allocated.
  347  *
  348  * If *setp is not NULL, then it will be used as-is.  The caller must take
  349  * into account that *setp will be inserted at the head of cpuset_ids and
  350  * plan any potentially conflicting cs_link usage accordingly.
  351  */
  352 static int
  353 cpuset_create(struct cpuset **setp, struct cpuset *parent, const cpuset_t *mask)
  354 {
  355         struct cpuset *set;
  356         cpusetid_t id;
  357         int error;
  358         bool dofree;
  359 
  360         id = alloc_unr(cpuset_unr);
  361         if (id == -1)
  362                 return (ENFILE);
  363         dofree = (*setp == NULL);
  364         if (*setp != NULL)
  365                 set = *setp;
  366         else
  367                 *setp = set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
  368         error = cpuset_init(set, parent, mask, NULL, id);
  369         if (error == 0)
  370                 return (0);
  371         free_unr(cpuset_unr, id);
  372         if (dofree)
  373                 uma_zfree(cpuset_zone, set);
  374 
  375         return (error);
  376 }
  377 
  378 static void
  379 cpuset_freelist_add(struct setlist *list, int count)
  380 {
  381         struct cpuset *set;
  382         int i;
  383 
  384         for (i = 0; i < count; i++) {
  385                 set = uma_zalloc(cpuset_zone, M_ZERO | M_WAITOK);
  386                 LIST_INSERT_HEAD(list, set, cs_link);
  387         }
  388 }
  389 
  390 static void
  391 cpuset_freelist_init(struct setlist *list, int count)
  392 {
  393 
  394         LIST_INIT(list);
  395         cpuset_freelist_add(list, count);
  396 }
  397 
  398 static void
  399 cpuset_freelist_free(struct setlist *list)
  400 {
  401         struct cpuset *set;
  402 
  403         while ((set = LIST_FIRST(list)) != NULL) {
  404                 LIST_REMOVE(set, cs_link);
  405                 uma_zfree(cpuset_zone, set);
  406         }
  407 }
  408 
  409 static void
  410 domainset_freelist_add(struct domainlist *list, int count)
  411 {
  412         struct domainset *set;
  413         int i;
  414 
  415         for (i = 0; i < count; i++) {
  416                 set = uma_zalloc(domainset_zone, M_ZERO | M_WAITOK);
  417                 LIST_INSERT_HEAD(list, set, ds_link);
  418         }
  419 }
  420 
  421 static void
  422 domainset_freelist_init(struct domainlist *list, int count)
  423 {
  424 
  425         LIST_INIT(list);
  426         domainset_freelist_add(list, count);
  427 }
  428 
  429 static void
  430 domainset_freelist_free(struct domainlist *list)
  431 {
  432         struct domainset *set;
  433 
  434         while ((set = LIST_FIRST(list)) != NULL) {
  435                 LIST_REMOVE(set, ds_link);
  436                 uma_zfree(domainset_zone, set);
  437         }
  438 }
  439 
  440 /* Copy a domainset preserving mask and policy. */
  441 static void
  442 domainset_copy(const struct domainset *from, struct domainset *to)
  443 {
  444 
  445         DOMAINSET_COPY(&from->ds_mask, &to->ds_mask);
  446         to->ds_policy = from->ds_policy;
  447         to->ds_prefer = from->ds_prefer;
  448 }
  449 
  450 /* Return 1 if mask and policy are equal, otherwise 0. */
  451 static int
  452 domainset_equal(const struct domainset *one, const struct domainset *two)
  453 {
  454 
  455         return (DOMAINSET_CMP(&one->ds_mask, &two->ds_mask) == 0 &&
  456             one->ds_policy == two->ds_policy &&
  457             one->ds_prefer == two->ds_prefer);
  458 }
  459 
  460 /* Return 1 if child is a valid subset of parent. */
  461 static int
  462 domainset_valid(const struct domainset *parent, const struct domainset *child)
  463 {
  464         if (child->ds_policy != DOMAINSET_POLICY_PREFER)
  465                 return (DOMAINSET_SUBSET(&parent->ds_mask, &child->ds_mask));
  466         return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
  467 }
  468 
  469 static int
  470 domainset_restrict(const struct domainset *parent,
  471     const struct domainset *child)
  472 {
  473         if (child->ds_policy != DOMAINSET_POLICY_PREFER)
  474                 return (DOMAINSET_OVERLAP(&parent->ds_mask, &child->ds_mask));
  475         return (DOMAINSET_ISSET(child->ds_prefer, &parent->ds_mask));
  476 }
  477 
  478 /*
  479  * Lookup or create a domainset.  The key is provided in ds_mask and
  480  * ds_policy.  If the domainset does not yet exist the storage in
  481  * 'domain' is used to insert.  Otherwise this storage is freed to the
  482  * domainset_zone and the existing domainset is returned.
  483  */
  484 static struct domainset *
  485 _domainset_create(struct domainset *domain, struct domainlist *freelist)
  486 {
  487         struct domainset *ndomain;
  488         int i, j;
  489 
  490         KASSERT(domain->ds_cnt <= vm_ndomains,
  491             ("invalid domain count in domainset %p", domain));
  492         KASSERT(domain->ds_policy != DOMAINSET_POLICY_PREFER ||
  493             domain->ds_prefer < vm_ndomains,
  494             ("invalid preferred domain in domains %p", domain));
  495 
  496         mtx_lock_spin(&cpuset_lock);
  497         LIST_FOREACH(ndomain, &cpuset_domains, ds_link)
  498                 if (domainset_equal(ndomain, domain))
  499                         break;
  500         /*
  501          * If the domain does not yet exist we insert it and initialize
  502          * various iteration helpers which are not part of the key.
  503          */
  504         if (ndomain == NULL) {
  505                 LIST_INSERT_HEAD(&cpuset_domains, domain, ds_link);
  506                 domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
  507                 for (i = 0, j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
  508                         if (DOMAINSET_ISSET(i, &domain->ds_mask))
  509                                 domain->ds_order[j++] = i;
  510         }
  511         mtx_unlock_spin(&cpuset_lock);
  512         if (ndomain == NULL)
  513                 return (domain);
  514         if (freelist != NULL)
  515                 LIST_INSERT_HEAD(freelist, domain, ds_link);
  516         else
  517                 uma_zfree(domainset_zone, domain);
  518         return (ndomain);
  519 
  520 }
  521 
  522 /*
  523  * Are any of the domains in the mask empty?  If so, silently
  524  * remove them and update the domainset accordingly.  If only empty
  525  * domains are present, we must return failure.
  526  */
  527 static bool
  528 domainset_empty_vm(struct domainset *domain)
  529 {
  530         domainset_t empty;
  531         int i, j;
  532 
  533         DOMAINSET_ZERO(&empty);
  534         for (i = 0; i < vm_ndomains; i++)
  535                 if (VM_DOMAIN_EMPTY(i))
  536                         DOMAINSET_SET(i, &empty);
  537         if (DOMAINSET_SUBSET(&empty, &domain->ds_mask))
  538                 return (true);
  539 
  540         /* Remove empty domains from the set and recompute. */
  541         DOMAINSET_ANDNOT(&domain->ds_mask, &empty);
  542         domain->ds_cnt = DOMAINSET_COUNT(&domain->ds_mask);
  543         for (i = j = 0; i < DOMAINSET_FLS(&domain->ds_mask); i++)
  544                 if (DOMAINSET_ISSET(i, &domain->ds_mask))
  545                         domain->ds_order[j++] = i;
  546 
  547         /* Convert a PREFER policy referencing an empty domain to RR. */
  548         if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
  549             DOMAINSET_ISSET(domain->ds_prefer, &empty)) {
  550                 domain->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
  551                 domain->ds_prefer = -1;
  552         }
  553 
  554         return (false);
  555 }
  556 
  557 /*
  558  * Create or lookup a domainset based on the key held in 'domain'.
  559  */
  560 struct domainset *
  561 domainset_create(const struct domainset *domain)
  562 {
  563         struct domainset *ndomain;
  564 
  565         /*
  566          * Validate the policy.  It must specify a useable policy number with
  567          * only valid domains.  Preferred must include the preferred domain
  568          * in the mask.
  569          */
  570         if (domain->ds_policy <= DOMAINSET_POLICY_INVALID ||
  571             domain->ds_policy > DOMAINSET_POLICY_MAX)
  572                 return (NULL);
  573         if (domain->ds_policy == DOMAINSET_POLICY_PREFER &&
  574             !DOMAINSET_ISSET(domain->ds_prefer, &domain->ds_mask))
  575                 return (NULL);
  576         if (!DOMAINSET_SUBSET(&domainset0->ds_mask, &domain->ds_mask))
  577                 return (NULL);
  578         ndomain = uma_zalloc(domainset_zone, M_WAITOK | M_ZERO);
  579         domainset_copy(domain, ndomain);
  580         return _domainset_create(ndomain, NULL);
  581 }
  582 
  583 /*
  584  * Update thread domainset pointers.
  585  */
  586 static void
  587 domainset_notify(void)
  588 {
  589         struct thread *td;
  590         struct proc *p;
  591 
  592         sx_slock(&allproc_lock);
  593         FOREACH_PROC_IN_SYSTEM(p) {
  594                 PROC_LOCK(p);
  595                 if (p->p_state == PRS_NEW) {
  596                         PROC_UNLOCK(p);
  597                         continue;
  598                 }
  599                 FOREACH_THREAD_IN_PROC(p, td) {
  600                         thread_lock(td);
  601                         td->td_domain.dr_policy = td->td_cpuset->cs_domain;
  602                         thread_unlock(td);
  603                 }
  604                 PROC_UNLOCK(p);
  605         }
  606         sx_sunlock(&allproc_lock);
  607         kernel_object->domain.dr_policy = cpuset_kernel->cs_domain;
  608 }
  609 
  610 /*
  611  * Create a new set that is a subset of a parent.
  612  */
  613 static struct domainset *
  614 domainset_shadow(const struct domainset *pdomain,
  615     const struct domainset *domain, struct domainlist *freelist)
  616 {
  617         struct domainset *ndomain;
  618 
  619         ndomain = LIST_FIRST(freelist);
  620         LIST_REMOVE(ndomain, ds_link);
  621 
  622         /*
  623          * Initialize the key from the request.
  624          */
  625         domainset_copy(domain, ndomain);
  626 
  627         /*
  628          * Restrict the key by the parent.
  629          */
  630         DOMAINSET_AND(&ndomain->ds_mask, &pdomain->ds_mask);
  631 
  632         return _domainset_create(ndomain, freelist);
  633 }
  634 
  635 /*
  636  * Recursively check for errors that would occur from applying mask to
  637  * the tree of sets starting at 'set'.  Checks for sets that would become
  638  * empty as well as RDONLY flags.
  639  */
  640 static int
  641 cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
  642 {
  643         struct cpuset *nset;
  644         cpuset_t newmask;
  645         int error;
  646 
  647         mtx_assert(&cpuset_lock, MA_OWNED);
  648         if (set->cs_flags & CPU_SET_RDONLY)
  649                 return (EPERM);
  650         if (augment_mask) {
  651                 CPU_AND(&newmask, &set->cs_mask, mask);
  652         } else
  653                 CPU_COPY(mask, &newmask);
  654 
  655         if (CPU_EMPTY(&newmask))
  656                 return (EDEADLK);
  657         error = 0;
  658         LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
  659                 if ((error = cpuset_testupdate(nset, &newmask, 1)) != 0)
  660                         break;
  661         return (error);
  662 }
  663 
  664 /*
  665  * Applies the mask 'mask' without checking for empty sets or permissions.
  666  */
  667 static void
  668 cpuset_update(struct cpuset *set, cpuset_t *mask)
  669 {
  670         struct cpuset *nset;
  671 
  672         mtx_assert(&cpuset_lock, MA_OWNED);
  673         CPU_AND(&set->cs_mask, &set->cs_mask, mask);
  674         LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
  675                 cpuset_update(nset, &set->cs_mask);
  676 
  677         return;
  678 }
  679 
  680 /*
  681  * Modify the set 'set' to use a copy of the mask provided.  Apply this new
  682  * mask to restrict all children in the tree.  Checks for validity before
  683  * applying the changes.
  684  */
  685 static int
  686 cpuset_modify(struct cpuset *set, cpuset_t *mask)
  687 {
  688         struct cpuset *root;
  689         int error;
  690 
  691         error = priv_check(curthread, PRIV_SCHED_CPUSET);
  692         if (error)
  693                 return (error);
  694         /*
  695          * In case we are called from within the jail,
  696          * we do not allow modifying the dedicated root
  697          * cpuset of the jail but may still allow to
  698          * change child sets, including subordinate jails'
  699          * roots.
  700          */
  701         if ((set->cs_flags & CPU_SET_ROOT) != 0 &&
  702             jailed(curthread->td_ucred) &&
  703             set == curthread->td_ucred->cr_prison->pr_cpuset)
  704                 return (EPERM);
  705         /*
  706          * Verify that we have access to this set of
  707          * cpus.
  708          */
  709         if ((set->cs_flags & (CPU_SET_ROOT | CPU_SET_RDONLY)) == CPU_SET_ROOT) {
  710                 KASSERT(set->cs_parent != NULL,
  711                     ("jail.cpuset=%d is not a proper child of parent jail's root.",
  712                     set->cs_id));
  713 
  714                 /*
  715                  * cpuset_getroot() cannot work here due to how top-level jail
  716                  * roots are constructed.  Top-level jails are parented to
  717                  * thread0's cpuset (i.e. cpuset 1) rather than the system root.
  718                  */
  719                 root = set->cs_parent;
  720         } else {
  721                 root = cpuset_getroot(set);
  722         }
  723         mtx_lock_spin(&cpuset_lock);
  724         if (root && !CPU_SUBSET(&root->cs_mask, mask)) {
  725                 error = EINVAL;
  726                 goto out;
  727         }
  728         error = cpuset_testupdate(set, mask, 0);
  729         if (error)
  730                 goto out;
  731         CPU_COPY(mask, &set->cs_mask);
  732         cpuset_update(set, mask);
  733 out:
  734         mtx_unlock_spin(&cpuset_lock);
  735 
  736         return (error);
  737 }
  738 
  739 /*
  740  * Recursively check for errors that would occur from applying mask to
  741  * the tree of sets starting at 'set'.  Checks for sets that would become
  742  * empty as well as RDONLY flags.
  743  */
  744 static int
  745 cpuset_testupdate_domain(struct cpuset *set, struct domainset *dset,
  746     struct domainset *orig, int *count, int augment_mask __unused)
  747 {
  748         struct cpuset *nset;
  749         struct domainset *domain;
  750         struct domainset newset;
  751         int error;
  752 
  753         mtx_assert(&cpuset_lock, MA_OWNED);
  754         if (set->cs_flags & CPU_SET_RDONLY)
  755                 return (EPERM);
  756         domain = set->cs_domain;
  757         domainset_copy(domain, &newset);
  758         if (!domainset_equal(domain, orig)) {
  759                 if (!domainset_restrict(domain, dset))
  760                         return (EDEADLK);
  761                 DOMAINSET_AND(&newset.ds_mask, &dset->ds_mask);
  762                 /* Count the number of domains that are changing. */
  763                 (*count)++;
  764         }
  765         error = 0;
  766         LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
  767                 if ((error = cpuset_testupdate_domain(nset, &newset, domain,
  768                     count, 1)) != 0)
  769                         break;
  770         return (error);
  771 }
  772 
  773 /*
  774  * Applies the mask 'mask' without checking for empty sets or permissions.
  775  */
  776 static void
  777 cpuset_update_domain(struct cpuset *set, struct domainset *domain,
  778     struct domainset *orig, struct domainlist *domains)
  779 {
  780         struct cpuset *nset;
  781 
  782         mtx_assert(&cpuset_lock, MA_OWNED);
  783         /*
  784          * If this domainset has changed from the parent we must calculate
  785          * a new set.  Otherwise it simply inherits from the parent.  When
  786          * we inherit from the parent we get a new mask and policy.  If the
  787          * set is modified from the parent we keep the policy and only
  788          * update the mask.
  789          */
  790         if (set->cs_domain != orig) {
  791                 orig = set->cs_domain;
  792                 set->cs_domain = domainset_shadow(domain, orig, domains);
  793         } else
  794                 set->cs_domain = domain;
  795         LIST_FOREACH(nset, &set->cs_children, cs_siblings) 
  796                 cpuset_update_domain(nset, set->cs_domain, orig, domains);
  797 
  798         return;
  799 }
  800 
  801 /*
  802  * Modify the set 'set' to use a copy the domainset provided.  Apply this new
  803  * mask to restrict all children in the tree.  Checks for validity before
  804  * applying the changes.
  805  */
  806 static int
  807 cpuset_modify_domain(struct cpuset *set, struct domainset *domain)
  808 {
  809         struct domainlist domains;
  810         struct domainset temp;
  811         struct domainset *dset;
  812         struct cpuset *root;
  813         int ndomains, needed;
  814         int error;
  815 
  816         error = priv_check(curthread, PRIV_SCHED_CPUSET);
  817         if (error)
  818                 return (error);
  819         /*
  820          * In case we are called from within the jail
  821          * we do not allow modifying the dedicated root
  822          * cpuset of the jail but may still allow to
  823          * change child sets.
  824          */
  825         if (jailed(curthread->td_ucred) &&
  826             set->cs_flags & CPU_SET_ROOT)
  827                 return (EPERM);
  828         domainset_freelist_init(&domains, 0);
  829         domain = domainset_create(domain);
  830         ndomains = 0;
  831 
  832         mtx_lock_spin(&cpuset_lock);
  833         for (;;) {
  834                 root = cpuset_getroot(set);
  835                 dset = root->cs_domain;
  836                 /*
  837                  * Verify that we have access to this set of domains.
  838                  */
  839                 if (!domainset_valid(dset, domain)) {
  840                         error = EINVAL;
  841                         goto out;
  842                 }
  843                 /*
  844                  * If applying prefer we keep the current set as the fallback.
  845                  */
  846                 if (domain->ds_policy == DOMAINSET_POLICY_PREFER)
  847                         DOMAINSET_COPY(&set->cs_domain->ds_mask,
  848                             &domain->ds_mask);
  849                 /*
  850                  * Determine whether we can apply this set of domains and
  851                  * how many new domain structures it will require.
  852                  */
  853                 domainset_copy(domain, &temp);
  854                 needed = 0;
  855                 error = cpuset_testupdate_domain(set, &temp, set->cs_domain,
  856                     &needed, 0);
  857                 if (error)
  858                         goto out;
  859                 if (ndomains >= needed)
  860                         break;
  861 
  862                 /* Dropping the lock; we'll need to re-evaluate again. */
  863                 mtx_unlock_spin(&cpuset_lock);
  864                 domainset_freelist_add(&domains, needed - ndomains);
  865                 ndomains = needed;
  866                 mtx_lock_spin(&cpuset_lock);
  867         }
  868         dset = set->cs_domain;
  869         cpuset_update_domain(set, domain, dset, &domains);
  870 out:
  871         mtx_unlock_spin(&cpuset_lock);
  872         domainset_freelist_free(&domains);
  873         if (error == 0)
  874                 domainset_notify();
  875 
  876         return (error);
  877 }
  878 
  879 /*
  880  * Resolve the 'which' parameter of several cpuset apis.
  881  *
  882  * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid.  Also
  883  * checks for permission via p_cansched().
  884  *
  885  * For WHICH_SET returns a valid set with a new reference.
  886  *
  887  * -1 may be supplied for any argument to mean the current proc/thread or
  888  * the base set of the current thread.  May fail with ESRCH/EPERM.
  889  */
  890 int
  891 cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp,
  892     struct cpuset **setp)
  893 {
  894         struct cpuset *set;
  895         struct thread *td;
  896         struct proc *p;
  897         int error;
  898 
  899         *pp = p = NULL;
  900         *tdp = td = NULL;
  901         *setp = set = NULL;
  902         switch (which) {
  903         case CPU_WHICH_PID:
  904                 if (id == -1) {
  905                         PROC_LOCK(curproc);
  906                         p = curproc;
  907                         break;
  908                 }
  909                 if ((p = pfind(id)) == NULL)
  910                         return (ESRCH);
  911                 break;
  912         case CPU_WHICH_TID:
  913                 if (id == -1) {
  914                         PROC_LOCK(curproc);
  915                         p = curproc;
  916                         td = curthread;
  917                         break;
  918                 }
  919                 td = tdfind(id, -1);
  920                 if (td == NULL)
  921                         return (ESRCH);
  922                 p = td->td_proc;
  923                 break;
  924         case CPU_WHICH_CPUSET:
  925                 if (id == -1) {
  926                         thread_lock(curthread);
  927                         set = cpuset_refbase(curthread->td_cpuset);
  928                         thread_unlock(curthread);
  929                 } else
  930                         set = cpuset_lookup(id, curthread);
  931                 if (set) {
  932                         *setp = set;
  933                         return (0);
  934                 }
  935                 return (ESRCH);
  936         case CPU_WHICH_JAIL:
  937         {
  938                 /* Find `set' for prison with given id. */
  939                 struct prison *pr;
  940 
  941                 sx_slock(&allprison_lock);
  942                 pr = prison_find_child(curthread->td_ucred->cr_prison, id);
  943                 sx_sunlock(&allprison_lock);
  944                 if (pr == NULL)
  945                         return (ESRCH);
  946                 cpuset_ref(pr->pr_cpuset);
  947                 *setp = pr->pr_cpuset;
  948                 mtx_unlock(&pr->pr_mtx);
  949                 return (0);
  950         }
  951         case CPU_WHICH_IRQ:
  952         case CPU_WHICH_DOMAIN:
  953                 return (0);
  954         default:
  955                 return (EINVAL);
  956         }
  957         error = p_cansched(curthread, p);
  958         if (error) {
  959                 PROC_UNLOCK(p);
  960                 return (error);
  961         }
  962         if (td == NULL)
  963                 td = FIRST_THREAD_IN_PROC(p);
  964         *pp = p;
  965         *tdp = td;
  966         return (0);
  967 }
  968 
  969 static int
  970 cpuset_testshadow(struct cpuset *set, const cpuset_t *mask,
  971     const struct domainset *domain)
  972 {
  973         struct cpuset *parent;
  974         struct domainset *dset;
  975 
  976         parent = cpuset_getbase(set);
  977         /*
  978          * If we are restricting a cpu mask it must be a subset of the
  979          * parent or invalid CPUs have been specified.
  980          */
  981         if (mask != NULL && !CPU_SUBSET(&parent->cs_mask, mask))
  982                 return (EINVAL);
  983 
  984         /*
  985          * If we are restricting a domain mask it must be a subset of the
  986          * parent or invalid domains have been specified.
  987          */
  988         dset = parent->cs_domain;
  989         if (domain != NULL && !domainset_valid(dset, domain))
  990                 return (EINVAL);
  991 
  992         return (0);
  993 }
  994 
  995 /*
  996  * Create an anonymous set with the provided mask in the space provided by
  997  * 'nset'.  If the passed in set is anonymous we use its parent otherwise
  998  * the new set is a child of 'set'.
  999  */
 1000 static int
 1001 cpuset_shadow(struct cpuset *set, struct cpuset **nsetp,
 1002    const cpuset_t *mask, const struct domainset *domain,
 1003    struct setlist *cpusets, struct domainlist *domains)
 1004 {
 1005         struct cpuset *parent;
 1006         struct cpuset *nset;
 1007         struct domainset *dset;
 1008         struct domainset *d;
 1009         int error;
 1010 
 1011         error = cpuset_testshadow(set, mask, domain);
 1012         if (error)
 1013                 return (error);
 1014 
 1015         parent = cpuset_getbase(set);
 1016         dset = parent->cs_domain;
 1017         if (mask == NULL)
 1018                 mask = &set->cs_mask;
 1019         if (domain != NULL)
 1020                 d = domainset_shadow(dset, domain, domains);
 1021         else
 1022                 d = set->cs_domain;
 1023         nset = LIST_FIRST(cpusets);
 1024         error = cpuset_init(nset, parent, mask, d, CPUSET_INVALID);
 1025         if (error == 0) {
 1026                 LIST_REMOVE(nset, cs_link);
 1027                 *nsetp = nset;
 1028         }
 1029         return (error);
 1030 }
 1031 
 1032 static struct cpuset *
 1033 cpuset_update_thread(struct thread *td, struct cpuset *nset)
 1034 {
 1035         struct cpuset *tdset;
 1036 
 1037         tdset = td->td_cpuset;
 1038         td->td_cpuset = nset;
 1039         td->td_domain.dr_policy = nset->cs_domain;
 1040         sched_affinity(td);
 1041 
 1042         return (tdset);
 1043 }
 1044 
 1045 static int
 1046 cpuset_setproc_test_maskthread(struct cpuset *tdset, cpuset_t *mask,
 1047     struct domainset *domain)
 1048 {
 1049         struct cpuset *parent;
 1050 
 1051         parent = cpuset_getbase(tdset);
 1052         if (mask == NULL)
 1053                 mask = &tdset->cs_mask;
 1054         if (domain == NULL)
 1055                 domain = tdset->cs_domain;
 1056         return cpuset_testshadow(parent, mask, domain);
 1057 }
 1058 
 1059 static int
 1060 cpuset_setproc_maskthread(struct cpuset *tdset, cpuset_t *mask,
 1061     struct domainset *domain, struct cpuset **nsetp,
 1062     struct setlist *freelist, struct domainlist *domainlist)
 1063 {
 1064         struct cpuset *parent;
 1065 
 1066         parent = cpuset_getbase(tdset);
 1067         if (mask == NULL)
 1068                 mask = &tdset->cs_mask;
 1069         if (domain == NULL)
 1070                 domain = tdset->cs_domain;
 1071         return cpuset_shadow(parent, nsetp, mask, domain, freelist,
 1072             domainlist);
 1073 }
 1074 
 1075 static int
 1076 cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
 1077     cpuset_t *mask, struct domainset *domain)
 1078 {
 1079         struct cpuset *parent;
 1080 
 1081         parent = cpuset_getbase(tdset);
 1082 
 1083         /*
 1084          * If the thread restricted its mask then apply that same
 1085          * restriction to the new set, otherwise take it wholesale.
 1086          */
 1087         if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
 1088                 CPU_AND(mask, &tdset->cs_mask, &set->cs_mask);
 1089         } else
 1090                 CPU_COPY(&set->cs_mask, mask);
 1091 
 1092         /*
 1093          * If the thread restricted the domain then we apply the
 1094          * restriction to the new set but retain the policy.
 1095          */
 1096         if (tdset->cs_domain != parent->cs_domain) {
 1097                 domainset_copy(tdset->cs_domain, domain);
 1098                 DOMAINSET_AND(&domain->ds_mask, &set->cs_domain->ds_mask);
 1099         } else
 1100                 domainset_copy(set->cs_domain, domain);
 1101 
 1102         if (CPU_EMPTY(mask) || DOMAINSET_EMPTY(&domain->ds_mask))
 1103                 return (EDEADLK);
 1104 
 1105         return (0);
 1106 }
 1107 
 1108 static int
 1109 cpuset_setproc_test_setthread(struct cpuset *tdset, struct cpuset *set)
 1110 {
 1111         struct domainset domain;
 1112         cpuset_t mask;
 1113 
 1114         if (tdset->cs_id != CPUSET_INVALID)
 1115                 return (0);
 1116         return cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
 1117 }
 1118 
 1119 static int
 1120 cpuset_setproc_setthread(struct cpuset *tdset, struct cpuset *set,
 1121     struct cpuset **nsetp, struct setlist *freelist,
 1122     struct domainlist *domainlist)
 1123 {
 1124         struct domainset domain;
 1125         cpuset_t mask;
 1126         int error;
 1127 
 1128         /*
 1129          * If we're replacing on a thread that has not constrained the
 1130          * original set we can simply accept the new set.
 1131          */
 1132         if (tdset->cs_id != CPUSET_INVALID) {
 1133                 *nsetp = cpuset_ref(set);
 1134                 return (0);
 1135         }
 1136         error = cpuset_setproc_setthread_mask(tdset, set, &mask, &domain);
 1137         if (error)
 1138                 return (error);
 1139 
 1140         return cpuset_shadow(set, nsetp, &mask, &domain, freelist,
 1141             domainlist);
 1142 }
 1143 
 1144 static int
 1145 cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
 1146     struct cpuset *nroot, struct cpuset **nsetp,
 1147     struct setlist *cpusets, struct domainlist *domainlist)
 1148 {
 1149         struct domainset ndomain;
 1150         cpuset_t nmask;
 1151         struct cpuset *pbase;
 1152         int error;
 1153 
 1154         pbase = cpuset_getbase(td->td_cpuset);
 1155 
 1156         /* Copy process mask, then further apply the new root mask. */
 1157         CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask);
 1158 
 1159         domainset_copy(pbase->cs_domain, &ndomain);
 1160         DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
 1161 
 1162         /* Policy is too restrictive, will not work. */
 1163         if (CPU_EMPTY(&nmask) || DOMAINSET_EMPTY(&ndomain.ds_mask))
 1164                 return (EDEADLK);
 1165 
 1166         /*
 1167          * Remove pbase from the freelist in advance, it'll be pushed to
 1168          * cpuset_ids on success.  We assume here that cpuset_create() will not
 1169          * touch pbase on failure, and we just enqueue it back to the freelist
 1170          * to remain in a consistent state.
 1171          */
 1172         pbase = LIST_FIRST(cpusets);
 1173         LIST_REMOVE(pbase, cs_link);
 1174         error = cpuset_create(&pbase, set, &nmask);
 1175         if (error != 0) {
 1176                 LIST_INSERT_HEAD(cpusets, pbase, cs_link);
 1177                 return (error);
 1178         }
 1179 
 1180         /* Duplicates some work from above... oh well. */
 1181         pbase->cs_domain = domainset_shadow(set->cs_domain, &ndomain,
 1182             domainlist);
 1183         *nsetp = pbase;
 1184         return (0);
 1185 }
 1186 
 1187 /*
 1188  * Handle four cases for updating an entire process.
 1189  *
 1190  * 1) Set is non-null and the process is not rebasing onto a new root.  This
 1191  *    reparents all anonymous sets to the provided set and replaces all
 1192  *    non-anonymous td_cpusets with the provided set.
 1193  * 2) Set is non-null and the process is rebasing onto a new root.  This
 1194  *    creates a new base set if the process previously had its own base set,
 1195  *    then reparents all anonymous sets either to that set or the provided set
 1196  *    if one was not created.  Non-anonymous sets are similarly replaced.
 1197  * 3) Mask is non-null.  This replaces or creates anonymous sets for every
 1198  *    thread with the existing base as a parent.
 1199  * 4) domain is non-null.  This creates anonymous sets for every thread
 1200  *    and replaces the domain set.
 1201  *
 1202  * This is overly complicated because we can't allocate while holding a 
 1203  * spinlock and spinlocks must be held while changing and examining thread
 1204  * state.
 1205  */
 1206 static int
 1207 cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask,
 1208     struct domainset *domain, bool rebase)
 1209 {
 1210         struct setlist freelist;
 1211         struct setlist droplist;
 1212         struct domainlist domainlist;
 1213         struct cpuset *base, *nset, *nroot, *tdroot;
 1214         struct thread *td;
 1215         struct proc *p;
 1216         int needed;
 1217         int nfree;
 1218         int error;
 1219 
 1220         /*
 1221          * The algorithm requires two passes due to locking considerations.
 1222          * 
 1223          * 1) Lookup the process and acquire the locks in the required order.
 1224          * 2) If enough cpusets have not been allocated release the locks and
 1225          *    allocate them.  Loop.
 1226          */
 1227         cpuset_freelist_init(&freelist, 1);
 1228         domainset_freelist_init(&domainlist, 1);
 1229         nfree = 1;
 1230         LIST_INIT(&droplist);
 1231         nfree = 0;
 1232         base = set;
 1233         nroot = NULL;
 1234         if (set != NULL)
 1235                 nroot = cpuset_getroot(set);
 1236         for (;;) {
 1237                 error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset);
 1238                 if (error)
 1239                         goto out;
 1240                 tdroot = cpuset_getroot(td->td_cpuset);
 1241                 needed = p->p_numthreads;
 1242                 if (set != NULL && rebase && tdroot != nroot)
 1243                         needed++;
 1244                 if (nfree >= needed)
 1245                         break;
 1246                 PROC_UNLOCK(p);
 1247                 if (nfree < needed) {
 1248                         cpuset_freelist_add(&freelist, needed - nfree);
 1249                         domainset_freelist_add(&domainlist, needed - nfree);
 1250                         nfree = needed;
 1251                 }
 1252         }
 1253         PROC_LOCK_ASSERT(p, MA_OWNED);
 1254 
 1255         /*
 1256          * If we're changing roots and the root set is what has been specified
 1257          * as the parent, then we'll check if the process was previously using
 1258          * the root set and, if it wasn't, create a new base with the process's
 1259          * mask applied to it.
 1260          *
 1261          * If the new root is incompatible with the existing mask, then we allow
 1262          * the process to take on the new root if and only if they have
 1263          * privilege to widen their mask anyways.  Unprivileged processes get
 1264          * rejected with EDEADLK.
 1265          */
 1266         if (set != NULL && rebase && nroot != tdroot) {
 1267                 cpusetid_t base_id, root_id;
 1268 
 1269                 root_id = td->td_ucred->cr_prison->pr_cpuset->cs_id;
 1270                 base_id = cpuset_getbase(td->td_cpuset)->cs_id;
 1271 
 1272                 if (base_id != root_id) {
 1273                         error = cpuset_setproc_newbase(td, set, nroot, &base,
 1274                             &freelist, &domainlist);
 1275                         if (error == EDEADLK &&
 1276                             priv_check(td, PRIV_SCHED_CPUSET) == 0)
 1277                                 error = 0;
 1278                         if (error != 0)
 1279                                 goto unlock_out;
 1280                 }
 1281         }
 1282 
 1283         /*
 1284          * Now that the appropriate locks are held and we have enough cpusets,
 1285          * make sure the operation will succeed before applying changes. The
 1286          * proc lock prevents td_cpuset from changing between calls.
 1287          */
 1288         error = 0;
 1289         FOREACH_THREAD_IN_PROC(p, td) {
 1290                 thread_lock(td);
 1291                 if (set != NULL)
 1292                         error = cpuset_setproc_test_setthread(td->td_cpuset,
 1293                             base);
 1294                 else
 1295                         error = cpuset_setproc_test_maskthread(td->td_cpuset,
 1296                             mask, domain);
 1297                 thread_unlock(td);
 1298                 if (error)
 1299                         goto unlock_out;
 1300         }
 1301         /*
 1302          * Replace each thread's cpuset while using deferred release.  We
 1303          * must do this because the thread lock must be held while operating
 1304          * on the thread and this limits the type of operations allowed.
 1305          */
 1306         FOREACH_THREAD_IN_PROC(p, td) {
 1307                 thread_lock(td);
 1308                 if (set != NULL)
 1309                         error = cpuset_setproc_setthread(td->td_cpuset, base,
 1310                             &nset, &freelist, &domainlist);
 1311                 else
 1312                         error = cpuset_setproc_maskthread(td->td_cpuset, mask,
 1313                             domain, &nset, &freelist, &domainlist);
 1314                 if (error) {
 1315                         thread_unlock(td);
 1316                         break;
 1317                 }
 1318                 cpuset_rel_defer(&droplist, cpuset_update_thread(td, nset));
 1319                 thread_unlock(td);
 1320         }
 1321 unlock_out:
 1322         PROC_UNLOCK(p);
 1323 out:
 1324         if (base != NULL && base != set)
 1325                 cpuset_rel(base);
 1326         while ((nset = LIST_FIRST(&droplist)) != NULL)
 1327                 cpuset_rel_complete(nset);
 1328         cpuset_freelist_free(&freelist);
 1329         domainset_freelist_free(&domainlist);
 1330         return (error);
 1331 }
 1332 
 1333 static int
 1334 bitset_strprint(char *buf, size_t bufsiz, const struct bitset *set, int setlen)
 1335 {
 1336         size_t bytes;
 1337         int i, once;
 1338         char *p;
 1339 
 1340         once = 0;
 1341         p = buf;
 1342         for (i = 0; i < __bitset_words(setlen); i++) {
 1343                 if (once != 0) {
 1344                         if (bufsiz < 1)
 1345                                 return (0);
 1346                         *p = ',';
 1347                         p++;
 1348                         bufsiz--;
 1349                 } else
 1350                         once = 1;
 1351                 if (bufsiz < sizeof(__STRING(ULONG_MAX)))
 1352                         return (0);
 1353                 bytes = snprintf(p, bufsiz, "%lx", set->__bits[i]);
 1354                 p += bytes;
 1355                 bufsiz -= bytes;
 1356         }
 1357         return (p - buf);
 1358 }
 1359 
 1360 static int
 1361 bitset_strscan(struct bitset *set, int setlen, const char *buf)
 1362 {
 1363         int i, ret;
 1364         const char *p;
 1365 
 1366         BIT_ZERO(setlen, set);
 1367         p = buf;
 1368         for (i = 0; i < __bitset_words(setlen); i++) {
 1369                 if (*p == ',') {
 1370                         p++;
 1371                         continue;
 1372                 }
 1373                 ret = sscanf(p, "%lx", &set->__bits[i]);
 1374                 if (ret == 0 || ret == -1)
 1375                         break;
 1376                 while (isxdigit(*p))
 1377                         p++;
 1378         }
 1379         return (p - buf);
 1380 }
 1381 
 1382 /*
 1383  * Return a string representing a valid layout for a cpuset_t object.
 1384  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
 1385  */
 1386 char *
 1387 cpusetobj_strprint(char *buf, const cpuset_t *set)
 1388 {
 1389 
 1390         bitset_strprint(buf, CPUSETBUFSIZ, (const struct bitset *)set,
 1391             CPU_SETSIZE);
 1392         return (buf);
 1393 }
 1394 
 1395 /*
 1396  * Build a valid cpuset_t object from a string representation.
 1397  * It expects an incoming buffer at least sized as CPUSETBUFSIZ.
 1398  */
 1399 int
 1400 cpusetobj_strscan(cpuset_t *set, const char *buf)
 1401 {
 1402         char p;
 1403 
 1404         if (strlen(buf) > CPUSETBUFSIZ - 1)
 1405                 return (-1);
 1406 
 1407         p = buf[bitset_strscan((struct bitset *)set, CPU_SETSIZE, buf)];
 1408         if (p != '\0')
 1409                 return (-1);
 1410 
 1411         return (0);
 1412 }
 1413 
 1414 /*
 1415  * Handle a domainset specifier in the sysctl tree.  A poiner to a pointer to
 1416  * a domainset is in arg1.  If the user specifies a valid domainset the
 1417  * pointer is updated.
 1418  *
 1419  * Format is:
 1420  * hex mask word 0,hex mask word 1,...:decimal policy:decimal preferred
 1421  */
 1422 int
 1423 sysctl_handle_domainset(SYSCTL_HANDLER_ARGS)
 1424 {
 1425         char buf[DOMAINSETBUFSIZ];
 1426         struct domainset *dset;
 1427         struct domainset key;
 1428         int policy, prefer, error;
 1429         char *p;
 1430 
 1431         dset = *(struct domainset **)arg1;
 1432         error = 0;
 1433 
 1434         if (dset != NULL) {
 1435                 p = buf + bitset_strprint(buf, DOMAINSETBUFSIZ,
 1436                     (const struct bitset *)&dset->ds_mask, DOMAINSET_SETSIZE);
 1437                 sprintf(p, ":%d:%d", dset->ds_policy, dset->ds_prefer);
 1438         } else
 1439                 sprintf(buf, "<NULL>");
 1440         error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
 1441         if (error != 0 || req->newptr == NULL)
 1442                 return (error);
 1443 
 1444         /*
 1445          * Read in and validate the string.
 1446          */
 1447         memset(&key, 0, sizeof(key));
 1448         p = &buf[bitset_strscan((struct bitset *)&key.ds_mask,
 1449             DOMAINSET_SETSIZE, buf)];
 1450         if (p == buf)
 1451                 return (EINVAL);
 1452         if (sscanf(p, ":%d:%d", &policy, &prefer) != 2)
 1453                 return (EINVAL);
 1454         key.ds_policy = policy;
 1455         key.ds_prefer = prefer;
 1456 
 1457         /* Domainset_create() validates the policy.*/
 1458         dset = domainset_create(&key);
 1459         if (dset == NULL)
 1460                 return (EINVAL);
 1461         *(struct domainset **)arg1 = dset;
 1462 
 1463         return (error);
 1464 }
 1465 
 1466 /*
 1467  * Apply an anonymous mask or a domain to a single thread.
 1468  */
 1469 static int
 1470 _cpuset_setthread(lwpid_t id, cpuset_t *mask, struct domainset *domain)
 1471 {
 1472         struct setlist cpusets;
 1473         struct domainlist domainlist;
 1474         struct cpuset *nset;
 1475         struct cpuset *set;
 1476         struct thread *td;
 1477         struct proc *p;
 1478         int error;
 1479 
 1480         cpuset_freelist_init(&cpusets, 1);
 1481         domainset_freelist_init(&domainlist, domain != NULL);
 1482         error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &set);
 1483         if (error)
 1484                 goto out;
 1485         set = NULL;
 1486         thread_lock(td);
 1487         error = cpuset_shadow(td->td_cpuset, &nset, mask, domain,
 1488             &cpusets, &domainlist);
 1489         if (error == 0)
 1490                 set = cpuset_update_thread(td, nset);
 1491         thread_unlock(td);
 1492         PROC_UNLOCK(p);
 1493         if (set)
 1494                 cpuset_rel(set);
 1495 out:
 1496         cpuset_freelist_free(&cpusets);
 1497         domainset_freelist_free(&domainlist);
 1498         return (error);
 1499 }
 1500 
 1501 /*
 1502  * Apply an anonymous mask to a single thread.
 1503  */
 1504 int
 1505 cpuset_setthread(lwpid_t id, cpuset_t *mask)
 1506 {
 1507 
 1508         return _cpuset_setthread(id, mask, NULL);
 1509 }
 1510 
 1511 /*
 1512  * Apply new cpumask to the ithread.
 1513  */
 1514 int
 1515 cpuset_setithread(lwpid_t id, int cpu)
 1516 {
 1517         cpuset_t mask;
 1518 
 1519         CPU_ZERO(&mask);
 1520         if (cpu == NOCPU)
 1521                 CPU_COPY(cpuset_root, &mask);
 1522         else
 1523                 CPU_SET(cpu, &mask);
 1524         return _cpuset_setthread(id, &mask, NULL);
 1525 }
 1526 
 1527 /*
 1528  * Initialize static domainsets after NUMA information is available.  This is
 1529  * called before memory allocators are initialized.
 1530  */
 1531 void
 1532 domainset_init(void)
 1533 {
 1534         struct domainset *dset;
 1535         int i;
 1536 
 1537         dset = &domainset_firsttouch;
 1538         DOMAINSET_COPY(&all_domains, &dset->ds_mask);
 1539         dset->ds_policy = DOMAINSET_POLICY_FIRSTTOUCH;
 1540         dset->ds_prefer = -1;
 1541         _domainset_create(dset, NULL);
 1542 
 1543         dset = &domainset_interleave;
 1544         DOMAINSET_COPY(&all_domains, &dset->ds_mask);
 1545         dset->ds_policy = DOMAINSET_POLICY_INTERLEAVE;
 1546         dset->ds_prefer = -1;
 1547         _domainset_create(dset, NULL);
 1548 
 1549         dset = &domainset_roundrobin;
 1550         DOMAINSET_COPY(&all_domains, &dset->ds_mask);
 1551         dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
 1552         dset->ds_prefer = -1;
 1553         _domainset_create(dset, NULL);
 1554 
 1555         for (i = 0; i < vm_ndomains; i++) {
 1556                 dset = &domainset_fixed[i];
 1557                 DOMAINSET_ZERO(&dset->ds_mask);
 1558                 DOMAINSET_SET(i, &dset->ds_mask);
 1559                 dset->ds_policy = DOMAINSET_POLICY_ROUNDROBIN;
 1560                 _domainset_create(dset, NULL);
 1561 
 1562                 dset = &domainset_prefer[i];
 1563                 DOMAINSET_COPY(&all_domains, &dset->ds_mask);
 1564                 dset->ds_policy = DOMAINSET_POLICY_PREFER;
 1565                 dset->ds_prefer = i;
 1566                 _domainset_create(dset, NULL);
 1567         }
 1568 }
 1569 
 1570 /*
 1571  * Define the domainsets for cpuset 0, 1 and cpuset 2.
 1572  */
 1573 void
 1574 domainset_zero(void)
 1575 {
 1576         struct domainset *dset, *tmp;
 1577 
 1578         mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE);
 1579 
 1580         domainset0 = &domainset_firsttouch;
 1581         curthread->td_domain.dr_policy = domainset0;
 1582 
 1583         domainset2 = &domainset_interleave;
 1584         kernel_object->domain.dr_policy = domainset2;
 1585 
 1586         /* Remove empty domains from the global policies. */
 1587         LIST_FOREACH_SAFE(dset, &cpuset_domains, ds_link, tmp)
 1588                 if (domainset_empty_vm(dset))
 1589                         LIST_REMOVE(dset, ds_link);
 1590 }
 1591 
 1592 /*
 1593  * Creates system-wide cpusets and the cpuset for thread0 including three
 1594  * sets:
 1595  * 
 1596  * 0 - The root set which should represent all valid processors in the
 1597  *     system.  This set is immutable.
 1598  * 1 - The default set which all processes are a member of until changed.
 1599  *     This allows an administrator to move all threads off of given cpus to
 1600  *     dedicate them to high priority tasks or save power etc.
 1601  * 2 - The kernel set which allows restriction and policy to be applied only
 1602  *     to kernel threads and the kernel_object.
 1603  */
 1604 struct cpuset *
 1605 cpuset_thread0(void)
 1606 {
 1607         struct cpuset *set;
 1608         int i;
 1609         int error __unused;
 1610 
 1611         cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL,
 1612             NULL, NULL, UMA_ALIGN_CACHE, 0);
 1613         domainset_zone = uma_zcreate("domainset", sizeof(struct domainset),
 1614             NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
 1615 
 1616         /*
 1617          * Create the root system set (0) for the whole machine.  Doesn't use
 1618          * cpuset_create() due to NULL parent.
 1619          */
 1620         set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
 1621         CPU_COPY(&all_cpus, &set->cs_mask);
 1622         LIST_INIT(&set->cs_children);
 1623         LIST_INSERT_HEAD(&cpuset_ids, set, cs_link);
 1624         refcount_init(&set->cs_ref, 1);
 1625         set->cs_flags = CPU_SET_ROOT | CPU_SET_RDONLY;
 1626         set->cs_domain = domainset0;
 1627         cpuset_zero = set;
 1628         cpuset_root = &set->cs_mask;
 1629 
 1630         /*
 1631          * Now derive a default (1), modifiable set from that to give out.
 1632          */
 1633         set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
 1634         error = cpuset_init(set, cpuset_zero, NULL, NULL, 1);
 1635         KASSERT(error == 0, ("Error creating default set: %d\n", error));
 1636         cpuset_default = set;
 1637         /*
 1638          * Create the kernel set (2).
 1639          */
 1640         set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO);
 1641         error = cpuset_init(set, cpuset_zero, NULL, NULL, 2);
 1642         KASSERT(error == 0, ("Error creating kernel set: %d\n", error));
 1643         set->cs_domain = domainset2;
 1644         cpuset_kernel = set;
 1645 
 1646         /*
 1647          * Initialize the unit allocator. 0 and 1 are allocated above.
 1648          */
 1649         cpuset_unr = new_unrhdr(3, INT_MAX, NULL);
 1650 
 1651         /*
 1652          * If MD code has not initialized per-domain cpusets, place all
 1653          * CPUs in domain 0.
 1654          */
 1655         for (i = 0; i < MAXMEMDOM; i++)
 1656                 if (!CPU_EMPTY(&cpuset_domain[i]))
 1657                         goto domains_set;
 1658         CPU_COPY(&all_cpus, &cpuset_domain[0]);
 1659 domains_set:
 1660 
 1661         return (cpuset_default);
 1662 }
 1663 
 1664 void
 1665 cpuset_kernthread(struct thread *td)
 1666 {
 1667         struct cpuset *set;
 1668 
 1669         thread_lock(td);
 1670         set = td->td_cpuset;
 1671         td->td_cpuset = cpuset_ref(cpuset_kernel);
 1672         thread_unlock(td);
 1673         cpuset_rel(set);
 1674 }
 1675 
 1676 /*
 1677  * Create a cpuset, which would be cpuset_create() but
 1678  * mark the new 'set' as root.
 1679  *
 1680  * We are not going to reparent the td to it.  Use cpuset_setproc_update_set()
 1681  * for that.
 1682  *
 1683  * In case of no error, returns the set in *setp locked with a reference.
 1684  */
 1685 int
 1686 cpuset_create_root(struct prison *pr, struct cpuset **setp)
 1687 {
 1688         struct cpuset *set;
 1689         int error;
 1690 
 1691         KASSERT(pr != NULL, ("[%s:%d] invalid pr", __func__, __LINE__));
 1692         KASSERT(setp != NULL, ("[%s:%d] invalid setp", __func__, __LINE__));
 1693 
 1694         set = NULL;
 1695         error = cpuset_create(&set, pr->pr_cpuset, &pr->pr_cpuset->cs_mask);
 1696         if (error)
 1697                 return (error);
 1698 
 1699         KASSERT(set != NULL, ("[%s:%d] cpuset_create returned invalid data",
 1700             __func__, __LINE__));
 1701 
 1702         /* Mark the set as root. */
 1703         set->cs_flags |= CPU_SET_ROOT;
 1704         *setp = set;
 1705 
 1706         return (0);
 1707 }
 1708 
 1709 int
 1710 cpuset_setproc_update_set(struct proc *p, struct cpuset *set)
 1711 {
 1712         int error;
 1713 
 1714         KASSERT(p != NULL, ("[%s:%d] invalid proc", __func__, __LINE__));
 1715         KASSERT(set != NULL, ("[%s:%d] invalid set", __func__, __LINE__));
 1716 
 1717         cpuset_ref(set);
 1718         error = cpuset_setproc(p->p_pid, set, NULL, NULL, true);
 1719         if (error)
 1720                 return (error);
 1721         cpuset_rel(set);
 1722         return (0);
 1723 }
 1724 
 1725 /*
 1726  * In Capability mode, the only accesses that are permitted are to the current
 1727  * thread and process' CPU and domain sets.
 1728  */
 1729 static int
 1730 cpuset_check_capabilities(struct thread *td, cpulevel_t level, cpuwhich_t which,
 1731     id_t id)
 1732 {
 1733         if (IN_CAPABILITY_MODE(td)) {
 1734                 if (level != CPU_LEVEL_WHICH)
 1735                         return (ECAPMODE);
 1736                 if (which != CPU_WHICH_TID && which != CPU_WHICH_PID)
 1737                         return (ECAPMODE);
 1738                 if (id != -1 &&
 1739                     !(which == CPU_WHICH_TID && id == td->td_tid) &&
 1740                     !(which == CPU_WHICH_PID && id == td->td_proc->p_pid))
 1741                         return (ECAPMODE);
 1742         }
 1743         return (0);
 1744 }
 1745 
 1746 #if defined(__powerpc__)
 1747 /*
 1748  * TODO: At least powerpc64 and powerpc64le kernels panic with
 1749  * exception 0x480 (instruction segment exception) when copyin/copyout,
 1750  * are set as a function pointer in cpuset_copy_cb struct and called by
 1751  * an external module (like pfsync). Tip: copyin/copyout have an ifunc
 1752  * resolver function.
 1753  *
 1754  * Bisect of LLVM shows that the behavior changed on LLVM 10.0 with
 1755  * https://reviews.llvm.org/rGdc06b0bc9ad055d06535462d91bfc2a744b2f589
 1756  *
 1757  * This is a hack/workaround while problem is being discussed with LLVM
 1758  * community
 1759  */
 1760 static int
 1761 cpuset_copyin(const void *uaddr, void *kaddr, size_t len)
 1762 {
 1763         return(copyin(uaddr, kaddr, len));
 1764 }
 1765 
 1766 static int
 1767 cpuset_copyout(const void *kaddr, void *uaddr, size_t len)
 1768 {
 1769         return(copyout(kaddr, uaddr, len));
 1770 }
 1771 
 1772 static const struct cpuset_copy_cb copy_set = {
 1773         .cpuset_copyin = cpuset_copyin,
 1774         .cpuset_copyout = cpuset_copyout
 1775 };
 1776 #else
 1777 static const struct cpuset_copy_cb copy_set = {
 1778         .cpuset_copyin = copyin,
 1779         .cpuset_copyout = copyout
 1780 };
 1781 #endif
 1782 
 1783 #ifndef _SYS_SYSPROTO_H_
 1784 struct cpuset_args {
 1785         cpusetid_t      *setid;
 1786 };
 1787 #endif
 1788 int
 1789 sys_cpuset(struct thread *td, struct cpuset_args *uap)
 1790 {
 1791         struct cpuset *root;
 1792         struct cpuset *set;
 1793         int error;
 1794 
 1795         thread_lock(td);
 1796         root = cpuset_refroot(td->td_cpuset);
 1797         thread_unlock(td);
 1798         set = NULL;
 1799         error = cpuset_create(&set, root, &root->cs_mask);
 1800         cpuset_rel(root);
 1801         if (error)
 1802                 return (error);
 1803         error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id));
 1804         if (error == 0)
 1805                 error = cpuset_setproc(-1, set, NULL, NULL, false);
 1806         cpuset_rel(set);
 1807         return (error);
 1808 }
 1809 
 1810 #ifndef _SYS_SYSPROTO_H_
 1811 struct cpuset_setid_args {
 1812         cpuwhich_t      which;
 1813         id_t            id;
 1814         cpusetid_t      setid;
 1815 };
 1816 #endif
 1817 int
 1818 sys_cpuset_setid(struct thread *td, struct cpuset_setid_args *uap)
 1819 {
 1820 
 1821         return (kern_cpuset_setid(td, uap->which, uap->id, uap->setid));
 1822 }
 1823 
 1824 int
 1825 kern_cpuset_setid(struct thread *td, cpuwhich_t which,
 1826     id_t id, cpusetid_t setid)
 1827 {
 1828         struct cpuset *set;
 1829         int error;
 1830 
 1831         /*
 1832          * Presently we only support per-process sets.
 1833          */
 1834         if (which != CPU_WHICH_PID)
 1835                 return (EINVAL);
 1836         set = cpuset_lookup(setid, td);
 1837         if (set == NULL)
 1838                 return (ESRCH);
 1839         error = cpuset_setproc(id, set, NULL, NULL, false);
 1840         cpuset_rel(set);
 1841         return (error);
 1842 }
 1843 
 1844 #ifndef _SYS_SYSPROTO_H_
 1845 struct cpuset_getid_args {
 1846         cpulevel_t      level;
 1847         cpuwhich_t      which;
 1848         id_t            id;
 1849         cpusetid_t      *setid;
 1850 };
 1851 #endif
 1852 int
 1853 sys_cpuset_getid(struct thread *td, struct cpuset_getid_args *uap)
 1854 {
 1855 
 1856         return (kern_cpuset_getid(td, uap->level, uap->which, uap->id,
 1857             uap->setid));
 1858 }
 1859 
 1860 int
 1861 kern_cpuset_getid(struct thread *td, cpulevel_t level, cpuwhich_t which,
 1862     id_t id, cpusetid_t *setid)
 1863 {
 1864         struct cpuset *nset;
 1865         struct cpuset *set;
 1866         struct thread *ttd;
 1867         struct proc *p;
 1868         cpusetid_t tmpid;
 1869         int error;
 1870 
 1871         if (level == CPU_LEVEL_WHICH && which != CPU_WHICH_CPUSET)
 1872                 return (EINVAL);
 1873         error = cpuset_which(which, id, &p, &ttd, &set);
 1874         if (error)
 1875                 return (error);
 1876         switch (which) {
 1877         case CPU_WHICH_TID:
 1878         case CPU_WHICH_PID:
 1879                 thread_lock(ttd);
 1880                 set = cpuset_refbase(ttd->td_cpuset);
 1881                 thread_unlock(ttd);
 1882                 PROC_UNLOCK(p);
 1883                 break;
 1884         case CPU_WHICH_CPUSET:
 1885         case CPU_WHICH_JAIL:
 1886                 break;
 1887         case CPU_WHICH_IRQ:
 1888         case CPU_WHICH_DOMAIN:
 1889                 return (EINVAL);
 1890         }
 1891         switch (level) {
 1892         case CPU_LEVEL_ROOT:
 1893                 nset = cpuset_refroot(set);
 1894                 cpuset_rel(set);
 1895                 set = nset;
 1896                 break;
 1897         case CPU_LEVEL_CPUSET:
 1898                 break;
 1899         case CPU_LEVEL_WHICH:
 1900                 break;
 1901         }
 1902         tmpid = set->cs_id;
 1903         cpuset_rel(set);
 1904         if (error == 0)
 1905                 error = copyout(&tmpid, setid, sizeof(tmpid));
 1906 
 1907         return (error);
 1908 }
 1909 
 1910 #ifndef _SYS_SYSPROTO_H_
 1911 struct cpuset_getaffinity_args {
 1912         cpulevel_t      level;
 1913         cpuwhich_t      which;
 1914         id_t            id;
 1915         size_t          cpusetsize;
 1916         cpuset_t        *mask;
 1917 };
 1918 #endif
 1919 int
 1920 sys_cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap)
 1921 {
 1922 
 1923         return (user_cpuset_getaffinity(td, uap->level, uap->which,
 1924             uap->id, uap->cpusetsize, uap->mask, &copy_set));
 1925 }
 1926 
 1927 int
 1928 kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
 1929     id_t id, size_t cpusetsize, cpuset_t *mask)
 1930 {
 1931         struct thread *ttd;
 1932         struct cpuset *nset;
 1933         struct cpuset *set;
 1934         struct proc *p;
 1935         int error;
 1936 
 1937         error = cpuset_check_capabilities(td, level, which, id);
 1938         if (error != 0)
 1939                 return (error);
 1940         error = cpuset_which(which, id, &p, &ttd, &set);
 1941         if (error != 0)
 1942                 return (error);
 1943         switch (level) {
 1944         case CPU_LEVEL_ROOT:
 1945         case CPU_LEVEL_CPUSET:
 1946                 switch (which) {
 1947                 case CPU_WHICH_TID:
 1948                 case CPU_WHICH_PID:
 1949                         thread_lock(ttd);
 1950                         set = cpuset_ref(ttd->td_cpuset);
 1951                         thread_unlock(ttd);
 1952                         break;
 1953                 case CPU_WHICH_CPUSET:
 1954                 case CPU_WHICH_JAIL:
 1955                         break;
 1956                 case CPU_WHICH_IRQ:
 1957                 case CPU_WHICH_INTRHANDLER:
 1958                 case CPU_WHICH_ITHREAD:
 1959                 case CPU_WHICH_DOMAIN:
 1960                         return (EINVAL);
 1961                 }
 1962                 if (level == CPU_LEVEL_ROOT)
 1963                         nset = cpuset_refroot(set);
 1964                 else
 1965                         nset = cpuset_refbase(set);
 1966                 CPU_COPY(&nset->cs_mask, mask);
 1967                 cpuset_rel(nset);
 1968                 break;
 1969         case CPU_LEVEL_WHICH:
 1970                 switch (which) {
 1971                 case CPU_WHICH_TID:
 1972                         thread_lock(ttd);
 1973                         CPU_COPY(&ttd->td_cpuset->cs_mask, mask);
 1974                         thread_unlock(ttd);
 1975                         break;
 1976                 case CPU_WHICH_PID:
 1977                         FOREACH_THREAD_IN_PROC(p, ttd) {
 1978                                 thread_lock(ttd);
 1979                                 CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask);
 1980                                 thread_unlock(ttd);
 1981                         }
 1982                         break;
 1983                 case CPU_WHICH_CPUSET:
 1984                 case CPU_WHICH_JAIL:
 1985                         CPU_COPY(&set->cs_mask, mask);
 1986                         break;
 1987                 case CPU_WHICH_IRQ:
 1988                 case CPU_WHICH_INTRHANDLER:
 1989                 case CPU_WHICH_ITHREAD:
 1990                         error = intr_getaffinity(id, which, mask);
 1991                         break;
 1992                 case CPU_WHICH_DOMAIN:
 1993                         if (id < 0 || id >= MAXMEMDOM)
 1994                                 error = ESRCH;
 1995                         else
 1996                                 CPU_COPY(&cpuset_domain[id], mask);
 1997                         break;
 1998                 }
 1999                 break;
 2000         default:
 2001                 error = EINVAL;
 2002                 break;
 2003         }
 2004         if (set)
 2005                 cpuset_rel(set);
 2006         if (p)
 2007                 PROC_UNLOCK(p);
 2008         if (error == 0) {
 2009                 if (cpusetsize < howmany(CPU_FLS(mask), NBBY))
 2010                         return (ERANGE);
 2011 #ifdef KTRACE
 2012                 if (KTRPOINT(td, KTR_STRUCT))
 2013                         ktrcpuset(mask, cpusetsize);
 2014 #endif
 2015         }
 2016         return (error);
 2017 }
 2018 
 2019 int
 2020 user_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
 2021     id_t id, size_t cpusetsize, cpuset_t *maskp, const struct cpuset_copy_cb *cb)
 2022 {
 2023         cpuset_t *mask;
 2024         size_t size;
 2025         int error;
 2026 
 2027         mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
 2028         size = min(cpusetsize, sizeof(cpuset_t));
 2029         error = kern_cpuset_getaffinity(td, level, which, id, size, mask);
 2030         if (error == 0) {
 2031                 error = cb->cpuset_copyout(mask, maskp, size);
 2032                 if (error != 0)
 2033                         goto out;
 2034                 if (cpusetsize > size) {
 2035                         char *end;
 2036                         char *cp;
 2037                         int rv;
 2038 
 2039                         end = cp = (char *)&maskp->__bits;
 2040                         end += cpusetsize;
 2041                         cp += size;
 2042                         while (cp != end) {
 2043                                 rv = subyte(cp, 0);
 2044                                 if (rv == -1) {
 2045                                         error = EFAULT;
 2046                                         goto out;
 2047                                 }
 2048                                 cp++;
 2049                         }
 2050                 }
 2051         }
 2052 out:
 2053         free(mask, M_TEMP);
 2054         return (error);
 2055 }
 2056 
 2057 #ifndef _SYS_SYSPROTO_H_
 2058 struct cpuset_setaffinity_args {
 2059         cpulevel_t      level;
 2060         cpuwhich_t      which;
 2061         id_t            id;
 2062         size_t          cpusetsize;
 2063         const cpuset_t  *mask;
 2064 };
 2065 #endif
 2066 int
 2067 sys_cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap)
 2068 {
 2069 
 2070         return (user_cpuset_setaffinity(td, uap->level, uap->which,
 2071             uap->id, uap->cpusetsize, uap->mask, &copy_set));
 2072 }
 2073 
 2074 int
 2075 kern_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
 2076     id_t id, cpuset_t *mask)
 2077 {
 2078         struct cpuset *nset;
 2079         struct cpuset *set;
 2080         struct thread *ttd;
 2081         struct proc *p;
 2082         int error;
 2083 
 2084 #ifdef KTRACE
 2085         if (KTRPOINT(td, KTR_STRUCT))
 2086                 ktrcpuset(mask, sizeof(cpuset_t));
 2087 #endif
 2088         error = cpuset_check_capabilities(td, level, which, id);
 2089         if (error != 0)
 2090                 return (error);
 2091         if (CPU_EMPTY(mask))
 2092                 return (EDEADLK);
 2093         switch (level) {
 2094         case CPU_LEVEL_ROOT:
 2095         case CPU_LEVEL_CPUSET:
 2096                 error = cpuset_which(which, id, &p, &ttd, &set);
 2097                 if (error)
 2098                         break;
 2099                 switch (which) {
 2100                 case CPU_WHICH_TID:
 2101                 case CPU_WHICH_PID:
 2102                         thread_lock(ttd);
 2103                         set = cpuset_ref(ttd->td_cpuset);
 2104                         thread_unlock(ttd);
 2105                         PROC_UNLOCK(p);
 2106                         break;
 2107                 case CPU_WHICH_CPUSET:
 2108                 case CPU_WHICH_JAIL:
 2109                         break;
 2110                 case CPU_WHICH_IRQ:
 2111                 case CPU_WHICH_INTRHANDLER:
 2112                 case CPU_WHICH_ITHREAD:
 2113                 case CPU_WHICH_DOMAIN:
 2114                         return (EINVAL);
 2115                 }
 2116                 if (level == CPU_LEVEL_ROOT)
 2117                         nset = cpuset_refroot(set);
 2118                 else
 2119                         nset = cpuset_refbase(set);
 2120                 error = cpuset_modify(nset, mask);
 2121                 cpuset_rel(nset);
 2122                 cpuset_rel(set);
 2123                 break;
 2124         case CPU_LEVEL_WHICH:
 2125                 switch (which) {
 2126                 case CPU_WHICH_TID:
 2127                         error = cpuset_setthread(id, mask);
 2128                         break;
 2129                 case CPU_WHICH_PID:
 2130                         error = cpuset_setproc(id, NULL, mask, NULL, false);
 2131                         break;
 2132                 case CPU_WHICH_CPUSET:
 2133                 case CPU_WHICH_JAIL:
 2134                         error = cpuset_which(which, id, &p, &ttd, &set);
 2135                         if (error == 0) {
 2136                                 error = cpuset_modify(set, mask);
 2137                                 cpuset_rel(set);
 2138                         }
 2139                         break;
 2140                 case CPU_WHICH_IRQ:
 2141                 case CPU_WHICH_INTRHANDLER:
 2142                 case CPU_WHICH_ITHREAD:
 2143                         error = intr_setaffinity(id, which, mask);
 2144                         break;
 2145                 default:
 2146                         error = EINVAL;
 2147                         break;
 2148                 }
 2149                 break;
 2150         default:
 2151                 error = EINVAL;
 2152                 break;
 2153         }
 2154         return (error);
 2155 }
 2156 
 2157 int
 2158 user_cpuset_setaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
 2159     id_t id, size_t cpusetsize, const cpuset_t *maskp, const struct cpuset_copy_cb *cb)
 2160 {
 2161         cpuset_t *mask;
 2162         int error;
 2163         size_t size;
 2164 
 2165         size = min(cpusetsize, sizeof(cpuset_t));
 2166         mask = malloc(sizeof(cpuset_t), M_TEMP, M_WAITOK | M_ZERO);
 2167         error = cb->cpuset_copyin(maskp, mask, size);
 2168         if (error)
 2169                 goto out;
 2170         /*
 2171          * Verify that no high bits are set.
 2172          */
 2173         if (cpusetsize > sizeof(cpuset_t)) {
 2174                 const char *end, *cp;
 2175                 int val;
 2176                 end = cp = (const char *)&maskp->__bits;
 2177                 end += cpusetsize;
 2178                 cp += sizeof(cpuset_t);
 2179 
 2180                 while (cp != end) {
 2181                         val = fubyte(cp);
 2182                         if (val == -1) {
 2183                                 error = EFAULT;
 2184                                 goto out;
 2185                         }
 2186                         if (val != 0) {
 2187                                 error = EINVAL;
 2188                                 goto out;
 2189                         }
 2190                         cp++;
 2191                 }
 2192         }
 2193         error = kern_cpuset_setaffinity(td, level, which, id, mask);
 2194 
 2195 out:
 2196         free(mask, M_TEMP);
 2197         return (error);
 2198 }
 2199 
 2200 #ifndef _SYS_SYSPROTO_H_
 2201 struct cpuset_getdomain_args {
 2202         cpulevel_t      level;
 2203         cpuwhich_t      which;
 2204         id_t            id;
 2205         size_t          domainsetsize;
 2206         domainset_t     *mask;
 2207         int             *policy;
 2208 };
 2209 #endif
 2210 int
 2211 sys_cpuset_getdomain(struct thread *td, struct cpuset_getdomain_args *uap)
 2212 {
 2213 
 2214         return (kern_cpuset_getdomain(td, uap->level, uap->which,
 2215             uap->id, uap->domainsetsize, uap->mask, uap->policy, &copy_set));
 2216 }
 2217 
 2218 int
 2219 kern_cpuset_getdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
 2220     id_t id, size_t domainsetsize, domainset_t *maskp, int *policyp,
 2221     const struct cpuset_copy_cb *cb)
 2222 {
 2223         struct domainset outset;
 2224         struct thread *ttd;
 2225         struct cpuset *nset;
 2226         struct cpuset *set;
 2227         struct domainset *dset;
 2228         struct proc *p;
 2229         domainset_t *mask;
 2230         int error;
 2231 
 2232         if (domainsetsize < sizeof(domainset_t) ||
 2233             domainsetsize > DOMAINSET_MAXSIZE / NBBY)
 2234                 return (ERANGE);
 2235         error = cpuset_check_capabilities(td, level, which, id);
 2236         if (error != 0)
 2237                 return (error);
 2238         mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
 2239         bzero(&outset, sizeof(outset));
 2240         error = cpuset_which(which, id, &p, &ttd, &set);
 2241         if (error)
 2242                 goto out;
 2243         switch (level) {
 2244         case CPU_LEVEL_ROOT:
 2245         case CPU_LEVEL_CPUSET:
 2246                 switch (which) {
 2247                 case CPU_WHICH_TID:
 2248                 case CPU_WHICH_PID:
 2249                         thread_lock(ttd);
 2250                         set = cpuset_ref(ttd->td_cpuset);
 2251                         thread_unlock(ttd);
 2252                         break;
 2253                 case CPU_WHICH_CPUSET:
 2254                 case CPU_WHICH_JAIL:
 2255                         break;
 2256                 case CPU_WHICH_IRQ:
 2257                 case CPU_WHICH_INTRHANDLER:
 2258                 case CPU_WHICH_ITHREAD:
 2259                 case CPU_WHICH_DOMAIN:
 2260                         error = EINVAL;
 2261                         goto out;
 2262                 }
 2263                 if (level == CPU_LEVEL_ROOT)
 2264                         nset = cpuset_refroot(set);
 2265                 else
 2266                         nset = cpuset_refbase(set);
 2267                 domainset_copy(nset->cs_domain, &outset);
 2268                 cpuset_rel(nset);
 2269                 break;
 2270         case CPU_LEVEL_WHICH:
 2271                 switch (which) {
 2272                 case CPU_WHICH_TID:
 2273                         thread_lock(ttd);
 2274                         domainset_copy(ttd->td_cpuset->cs_domain, &outset);
 2275                         thread_unlock(ttd);
 2276                         break;
 2277                 case CPU_WHICH_PID:
 2278                         FOREACH_THREAD_IN_PROC(p, ttd) {
 2279                                 thread_lock(ttd);
 2280                                 dset = ttd->td_cpuset->cs_domain;
 2281                                 /* Show all domains in the proc. */
 2282                                 DOMAINSET_OR(&outset.ds_mask, &dset->ds_mask);
 2283                                 /* Last policy wins. */
 2284                                 outset.ds_policy = dset->ds_policy;
 2285                                 outset.ds_prefer = dset->ds_prefer;
 2286                                 thread_unlock(ttd);
 2287                         }
 2288                         break;
 2289                 case CPU_WHICH_CPUSET:
 2290                 case CPU_WHICH_JAIL:
 2291                         domainset_copy(set->cs_domain, &outset);
 2292                         break;
 2293                 case CPU_WHICH_IRQ:
 2294                 case CPU_WHICH_INTRHANDLER:
 2295                 case CPU_WHICH_ITHREAD:
 2296                 case CPU_WHICH_DOMAIN:
 2297                         error = EINVAL;
 2298                         break;
 2299                 }
 2300                 break;
 2301         default:
 2302                 error = EINVAL;
 2303                 break;
 2304         }
 2305         if (set)
 2306                 cpuset_rel(set);
 2307         if (p)
 2308                 PROC_UNLOCK(p);
 2309         /*
 2310          * Translate prefer into a set containing only the preferred domain,
 2311          * not the entire fallback set.
 2312          */
 2313         if (outset.ds_policy == DOMAINSET_POLICY_PREFER) {
 2314                 DOMAINSET_ZERO(&outset.ds_mask);
 2315                 DOMAINSET_SET(outset.ds_prefer, &outset.ds_mask);
 2316         }
 2317         DOMAINSET_COPY(&outset.ds_mask, mask);
 2318         if (error == 0)
 2319                 error = cb->cpuset_copyout(mask, maskp, domainsetsize);
 2320         if (error == 0)
 2321                 if (suword32(policyp, outset.ds_policy) != 0)
 2322                         error = EFAULT;
 2323 out:
 2324         free(mask, M_TEMP);
 2325         return (error);
 2326 }
 2327 
 2328 #ifndef _SYS_SYSPROTO_H_
 2329 struct cpuset_setdomain_args {
 2330         cpulevel_t      level;
 2331         cpuwhich_t      which;
 2332         id_t            id;
 2333         size_t          domainsetsize;
 2334         domainset_t     *mask;
 2335         int             policy;
 2336 };
 2337 #endif
 2338 int
 2339 sys_cpuset_setdomain(struct thread *td, struct cpuset_setdomain_args *uap)
 2340 {
 2341 
 2342         return (kern_cpuset_setdomain(td, uap->level, uap->which,
 2343             uap->id, uap->domainsetsize, uap->mask, uap->policy, &copy_set));
 2344 }
 2345 
 2346 int
 2347 kern_cpuset_setdomain(struct thread *td, cpulevel_t level, cpuwhich_t which,
 2348     id_t id, size_t domainsetsize, const domainset_t *maskp, int policy,
 2349     const struct cpuset_copy_cb *cb)
 2350 {
 2351         struct cpuset *nset;
 2352         struct cpuset *set;
 2353         struct thread *ttd;
 2354         struct proc *p;
 2355         struct domainset domain;
 2356         domainset_t *mask;
 2357         int error;
 2358 
 2359         if (domainsetsize < sizeof(domainset_t) ||
 2360             domainsetsize > DOMAINSET_MAXSIZE / NBBY)
 2361                 return (ERANGE);
 2362         if (policy <= DOMAINSET_POLICY_INVALID ||
 2363             policy > DOMAINSET_POLICY_MAX)
 2364                 return (EINVAL);
 2365         error = cpuset_check_capabilities(td, level, which, id);
 2366         if (error != 0)
 2367                 return (error);
 2368         memset(&domain, 0, sizeof(domain));
 2369         mask = malloc(domainsetsize, M_TEMP, M_WAITOK | M_ZERO);
 2370         error = cb->cpuset_copyin(maskp, mask, domainsetsize);
 2371         if (error)
 2372                 goto out;
 2373         /*
 2374          * Verify that no high bits are set.
 2375          */
 2376         if (domainsetsize > sizeof(domainset_t)) {
 2377                 char *end;
 2378                 char *cp;
 2379 
 2380                 end = cp = (char *)&mask->__bits;
 2381                 end += domainsetsize;
 2382                 cp += sizeof(domainset_t);
 2383                 while (cp != end)
 2384                         if (*cp++ != 0) {
 2385                                 error = EINVAL;
 2386                                 goto out;
 2387                         }
 2388         }
 2389         if (DOMAINSET_EMPTY(mask)) {
 2390                 error = EDEADLK;
 2391                 goto out;
 2392         }
 2393         DOMAINSET_COPY(mask, &domain.ds_mask);
 2394         domain.ds_policy = policy;
 2395 
 2396         /*
 2397          * Sanitize the provided mask.
 2398          */
 2399         if (!DOMAINSET_SUBSET(&all_domains, &domain.ds_mask)) {
 2400                 error = EINVAL;
 2401                 goto out;
 2402         }
 2403 
 2404         /* Translate preferred policy into a mask and fallback. */
 2405         if (policy == DOMAINSET_POLICY_PREFER) {
 2406                 /* Only support a single preferred domain. */
 2407                 if (DOMAINSET_COUNT(&domain.ds_mask) != 1) {
 2408                         error = EINVAL;
 2409                         goto out;
 2410                 }
 2411                 domain.ds_prefer = DOMAINSET_FFS(&domain.ds_mask) - 1;
 2412                 /* This will be constrained by domainset_shadow(). */
 2413                 DOMAINSET_COPY(&all_domains, &domain.ds_mask);
 2414         }
 2415 
 2416         /*
 2417          * When given an impossible policy, fall back to interleaving
 2418          * across all domains.
 2419          */
 2420         if (domainset_empty_vm(&domain))
 2421                 domainset_copy(domainset2, &domain);
 2422 
 2423         switch (level) {
 2424         case CPU_LEVEL_ROOT:
 2425         case CPU_LEVEL_CPUSET:
 2426                 error = cpuset_which(which, id, &p, &ttd, &set);
 2427                 if (error)
 2428                         break;
 2429                 switch (which) {
 2430                 case CPU_WHICH_TID:
 2431                 case CPU_WHICH_PID:
 2432                         thread_lock(ttd);
 2433                         set = cpuset_ref(ttd->td_cpuset);
 2434                         thread_unlock(ttd);
 2435                         PROC_UNLOCK(p);
 2436                         break;
 2437                 case CPU_WHICH_CPUSET:
 2438                 case CPU_WHICH_JAIL:
 2439                         break;
 2440                 case CPU_WHICH_IRQ:
 2441                 case CPU_WHICH_INTRHANDLER:
 2442                 case CPU_WHICH_ITHREAD:
 2443                 case CPU_WHICH_DOMAIN:
 2444                         error = EINVAL;
 2445                         goto out;
 2446                 }
 2447                 if (level == CPU_LEVEL_ROOT)
 2448                         nset = cpuset_refroot(set);
 2449                 else
 2450                         nset = cpuset_refbase(set);
 2451                 error = cpuset_modify_domain(nset, &domain);
 2452                 cpuset_rel(nset);
 2453                 cpuset_rel(set);
 2454                 break;
 2455         case CPU_LEVEL_WHICH:
 2456                 switch (which) {
 2457                 case CPU_WHICH_TID:
 2458                         error = _cpuset_setthread(id, NULL, &domain);
 2459                         break;
 2460                 case CPU_WHICH_PID:
 2461                         error = cpuset_setproc(id, NULL, NULL, &domain, false);
 2462                         break;
 2463                 case CPU_WHICH_CPUSET:
 2464                 case CPU_WHICH_JAIL:
 2465                         error = cpuset_which(which, id, &p, &ttd, &set);
 2466                         if (error == 0) {
 2467                                 error = cpuset_modify_domain(set, &domain);
 2468                                 cpuset_rel(set);
 2469                         }
 2470                         break;
 2471                 case CPU_WHICH_IRQ:
 2472                 case CPU_WHICH_INTRHANDLER:
 2473                 case CPU_WHICH_ITHREAD:
 2474                 default:
 2475                         error = EINVAL;
 2476                         break;
 2477                 }
 2478                 break;
 2479         default:
 2480                 error = EINVAL;
 2481                 break;
 2482         }
 2483 out:
 2484         free(mask, M_TEMP);
 2485         return (error);
 2486 }
 2487 
 2488 #ifdef DDB
 2489 
 2490 static void
 2491 ddb_display_bitset(const struct bitset *set, int size)
 2492 {
 2493         int bit, once;
 2494 
 2495         for (once = 0, bit = 0; bit < size; bit++) {
 2496                 if (CPU_ISSET(bit, set)) {
 2497                         if (once == 0) {
 2498                                 db_printf("%d", bit);
 2499                                 once = 1;
 2500                         } else  
 2501                                 db_printf(",%d", bit);
 2502                 }
 2503         }
 2504         if (once == 0)
 2505                 db_printf("<none>");
 2506 }
 2507 
 2508 void
 2509 ddb_display_cpuset(const cpuset_t *set)
 2510 {
 2511         ddb_display_bitset((const struct bitset *)set, CPU_SETSIZE);
 2512 }
 2513 
 2514 static void
 2515 ddb_display_domainset(const domainset_t *set)
 2516 {
 2517         ddb_display_bitset((const struct bitset *)set, DOMAINSET_SETSIZE);
 2518 }
 2519 
 2520 DB_SHOW_COMMAND_FLAGS(cpusets, db_show_cpusets, DB_CMD_MEMSAFE)
 2521 {
 2522         struct cpuset *set;
 2523 
 2524         LIST_FOREACH(set, &cpuset_ids, cs_link) {
 2525                 db_printf("set=%p id=%-6u ref=%-6d flags=0x%04x parent id=%d\n",
 2526                     set, set->cs_id, refcount_load(&set->cs_ref), set->cs_flags,
 2527                     (set->cs_parent != NULL) ? set->cs_parent->cs_id : 0);
 2528                 db_printf("  cpu mask=");
 2529                 ddb_display_cpuset(&set->cs_mask);
 2530                 db_printf("\n");
 2531                 db_printf("  domain policy %d prefer %d mask=",
 2532                     set->cs_domain->ds_policy, set->cs_domain->ds_prefer);
 2533                 ddb_display_domainset(&set->cs_domain->ds_mask);
 2534                 db_printf("\n");
 2535                 if (db_pager_quit)
 2536                         break;
 2537         }
 2538 }
 2539 
 2540 DB_SHOW_COMMAND_FLAGS(domainsets, db_show_domainsets, DB_CMD_MEMSAFE)
 2541 {
 2542         struct domainset *set;
 2543 
 2544         LIST_FOREACH(set, &cpuset_domains, ds_link) {
 2545                 db_printf("set=%p policy %d prefer %d cnt %d\n",
 2546                     set, set->ds_policy, set->ds_prefer, set->ds_cnt);
 2547                 db_printf("  mask =");
 2548                 ddb_display_domainset(&set->ds_mask);
 2549                 db_printf("\n");
 2550         }
 2551 }
 2552 #endif /* DDB */

Cache object: 6150bef52e9d70da2057f19e534e4ab5


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.