The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kernel/user.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * The "user cache".
    3  *
    4  * (C) Copyright 1991-2000 Linus Torvalds
    5  *
    6  * We have a per-user structure to keep track of how many
    7  * processes, files etc the user has claimed, in order to be
    8  * able to have per-user limits for system resources. 
    9  */
   10 
   11 #include <linux/init.h>
   12 #include <linux/sched.h>
   13 #include <linux/slab.h>
   14 
   15 /*
   16  * UID task count cache, to get fast user lookup in "alloc_uid"
   17  * when changing user ID's (ie setuid() and friends).
   18  */
   19 #define UIDHASH_BITS            8
   20 #define UIDHASH_SZ              (1 << UIDHASH_BITS)
   21 #define UIDHASH_MASK            (UIDHASH_SZ - 1)
   22 #define __uidhashfn(uid)        (((uid >> UIDHASH_BITS) ^ uid) & UIDHASH_MASK)
   23 #define uidhashentry(uid)       (uidhash_table + __uidhashfn(uid))
   24 
   25 static kmem_cache_t *uid_cachep;
   26 static struct user_struct *uidhash_table[UIDHASH_SZ];
   27 static spinlock_t uidhash_lock = SPIN_LOCK_UNLOCKED;
   28 
   29 struct user_struct root_user = {
   30         __count:        ATOMIC_INIT(1),
   31         processes:      ATOMIC_INIT(1),
   32         files:          ATOMIC_INIT(0)
   33 };
   34 
   35 /*
   36  * These routines must be called with the uidhash spinlock held!
   37  */
   38 static inline void uid_hash_insert(struct user_struct *up, struct user_struct **hashent)
   39 {
   40         struct user_struct *next = *hashent;
   41 
   42         up->next = next;
   43         if (next)
   44                 next->pprev = &up->next;
   45         up->pprev = hashent;
   46         *hashent = up;
   47 }
   48 
   49 static inline void uid_hash_remove(struct user_struct *up)
   50 {
   51         struct user_struct *next = up->next;
   52         struct user_struct **pprev = up->pprev;
   53 
   54         if (next)
   55                 next->pprev = pprev;
   56         *pprev = next;
   57 }
   58 
   59 static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
   60 {
   61         struct user_struct *next;
   62 
   63         next = *hashent;
   64         for (;;) {
   65                 struct user_struct *up = next;
   66                 if (next) {
   67                         next = up->next;
   68                         if (up->uid != uid)
   69                                 continue;
   70                         atomic_inc(&up->__count);
   71                 }
   72                 return up;
   73         }
   74 }
   75 
   76 void free_uid(struct user_struct *up)
   77 {
   78         if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
   79                 uid_hash_remove(up);
   80                 kmem_cache_free(uid_cachep, up);
   81                 spin_unlock(&uidhash_lock);
   82         }
   83 }
   84 
   85 struct user_struct * alloc_uid(uid_t uid)
   86 {
   87         struct user_struct **hashent = uidhashentry(uid);
   88         struct user_struct *up;
   89 
   90         spin_lock(&uidhash_lock);
   91         up = uid_hash_find(uid, hashent);
   92         spin_unlock(&uidhash_lock);
   93 
   94         if (!up) {
   95                 struct user_struct *new;
   96 
   97                 new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL);
   98                 if (!new)
   99                         return NULL;
  100                 new->uid = uid;
  101                 atomic_set(&new->__count, 1);
  102                 atomic_set(&new->processes, 0);
  103                 atomic_set(&new->files, 0);
  104 
  105                 /*
  106                  * Before adding this, check whether we raced
  107                  * on adding the same user already..
  108                  */
  109                 spin_lock(&uidhash_lock);
  110                 up = uid_hash_find(uid, hashent);
  111                 if (up) {
  112                         kmem_cache_free(uid_cachep, new);
  113                 } else {
  114                         uid_hash_insert(new, hashent);
  115                         up = new;
  116                 }
  117                 spin_unlock(&uidhash_lock);
  118 
  119         }
  120         return up;
  121 }
  122 
  123 
  124 static int __init uid_cache_init(void)
  125 {
  126         uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  127                                        0,
  128                                        SLAB_HWCACHE_ALIGN, NULL, NULL);
  129         if(!uid_cachep)
  130                 panic("Cannot create uid taskcount SLAB cache\n");
  131 
  132         /* Insert the root user immediately - init already runs with this */
  133         uid_hash_insert(&root_user, uidhashentry(0));
  134         return 0;
  135 }
  136 
  137 module_init(uid_cache_init);

Cache object: 86a9065a2e9222f86e4bb6ad3e54a1ae


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.