The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mm/util.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 #include <linux/mm.h>
    2 #include <linux/slab.h>
    3 #include <linux/string.h>
    4 #include <linux/export.h>
    5 #include <linux/err.h>
    6 #include <linux/sched.h>
    7 #include <linux/security.h>
    8 #include <asm/uaccess.h>
    9 
   10 #include "internal.h"
   11 
   12 #define CREATE_TRACE_POINTS
   13 #include <trace/events/kmem.h>
   14 
   15 /**
   16  * kstrdup - allocate space for and copy an existing string
   17  * @s: the string to duplicate
   18  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   19  */
   20 char *kstrdup(const char *s, gfp_t gfp)
   21 {
   22         size_t len;
   23         char *buf;
   24 
   25         if (!s)
   26                 return NULL;
   27 
   28         len = strlen(s) + 1;
   29         buf = kmalloc_track_caller(len, gfp);
   30         if (buf)
   31                 memcpy(buf, s, len);
   32         return buf;
   33 }
   34 EXPORT_SYMBOL(kstrdup);
   35 
   36 /**
   37  * kstrndup - allocate space for and copy an existing string
   38  * @s: the string to duplicate
   39  * @max: read at most @max chars from @s
   40  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
   41  */
   42 char *kstrndup(const char *s, size_t max, gfp_t gfp)
   43 {
   44         size_t len;
   45         char *buf;
   46 
   47         if (!s)
   48                 return NULL;
   49 
   50         len = strnlen(s, max);
   51         buf = kmalloc_track_caller(len+1, gfp);
   52         if (buf) {
   53                 memcpy(buf, s, len);
   54                 buf[len] = '\0';
   55         }
   56         return buf;
   57 }
   58 EXPORT_SYMBOL(kstrndup);
   59 
   60 /**
   61  * kmemdup - duplicate region of memory
   62  *
   63  * @src: memory region to duplicate
   64  * @len: memory region length
   65  * @gfp: GFP mask to use
   66  */
   67 void *kmemdup(const void *src, size_t len, gfp_t gfp)
   68 {
   69         void *p;
   70 
   71         p = kmalloc_track_caller(len, gfp);
   72         if (p)
   73                 memcpy(p, src, len);
   74         return p;
   75 }
   76 EXPORT_SYMBOL(kmemdup);
   77 
   78 /**
   79  * memdup_user - duplicate memory region from user space
   80  *
   81  * @src: source address in user space
   82  * @len: number of bytes to copy
   83  *
   84  * Returns an ERR_PTR() on failure.
   85  */
   86 void *memdup_user(const void __user *src, size_t len)
   87 {
   88         void *p;
   89 
   90         /*
   91          * Always use GFP_KERNEL, since copy_from_user() can sleep and
   92          * cause pagefault, which makes it pointless to use GFP_NOFS
   93          * or GFP_ATOMIC.
   94          */
   95         p = kmalloc_track_caller(len, GFP_KERNEL);
   96         if (!p)
   97                 return ERR_PTR(-ENOMEM);
   98 
   99         if (copy_from_user(p, src, len)) {
  100                 kfree(p);
  101                 return ERR_PTR(-EFAULT);
  102         }
  103 
  104         return p;
  105 }
  106 EXPORT_SYMBOL(memdup_user);
  107 
  108 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
  109                                            gfp_t flags)
  110 {
  111         void *ret;
  112         size_t ks = 0;
  113 
  114         if (p)
  115                 ks = ksize(p);
  116 
  117         if (ks >= new_size)
  118                 return (void *)p;
  119 
  120         ret = kmalloc_track_caller(new_size, flags);
  121         if (ret && p)
  122                 memcpy(ret, p, ks);
  123 
  124         return ret;
  125 }
  126 
  127 /**
  128  * __krealloc - like krealloc() but don't free @p.
  129  * @p: object to reallocate memory for.
  130  * @new_size: how many bytes of memory are required.
  131  * @flags: the type of memory to allocate.
  132  *
  133  * This function is like krealloc() except it never frees the originally
  134  * allocated buffer. Use this if you don't want to free the buffer immediately
  135  * like, for example, with RCU.
  136  */
  137 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
  138 {
  139         if (unlikely(!new_size))
  140                 return ZERO_SIZE_PTR;
  141 
  142         return __do_krealloc(p, new_size, flags);
  143 
  144 }
  145 EXPORT_SYMBOL(__krealloc);
  146 
  147 /**
  148  * krealloc - reallocate memory. The contents will remain unchanged.
  149  * @p: object to reallocate memory for.
  150  * @new_size: how many bytes of memory are required.
  151  * @flags: the type of memory to allocate.
  152  *
  153  * The contents of the object pointed to are preserved up to the
  154  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
  155  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
  156  * %NULL pointer, the object pointed to is freed.
  157  */
  158 void *krealloc(const void *p, size_t new_size, gfp_t flags)
  159 {
  160         void *ret;
  161 
  162         if (unlikely(!new_size)) {
  163                 kfree(p);
  164                 return ZERO_SIZE_PTR;
  165         }
  166 
  167         ret = __do_krealloc(p, new_size, flags);
  168         if (ret && p != ret)
  169                 kfree(p);
  170 
  171         return ret;
  172 }
  173 EXPORT_SYMBOL(krealloc);
  174 
  175 /**
  176  * kzfree - like kfree but zero memory
  177  * @p: object to free memory of
  178  *
  179  * The memory of the object @p points to is zeroed before freed.
  180  * If @p is %NULL, kzfree() does nothing.
  181  *
  182  * Note: this function zeroes the whole allocated buffer which can be a good
  183  * deal bigger than the requested buffer size passed to kmalloc(). So be
  184  * careful when using this function in performance sensitive code.
  185  */
  186 void kzfree(const void *p)
  187 {
  188         size_t ks;
  189         void *mem = (void *)p;
  190 
  191         if (unlikely(ZERO_OR_NULL_PTR(mem)))
  192                 return;
  193         ks = ksize(mem);
  194         memset(mem, 0, ks);
  195         kfree(mem);
  196 }
  197 EXPORT_SYMBOL(kzfree);
  198 
  199 /*
  200  * strndup_user - duplicate an existing string from user space
  201  * @s: The string to duplicate
  202  * @n: Maximum number of bytes to copy, including the trailing NUL.
  203  */
  204 char *strndup_user(const char __user *s, long n)
  205 {
  206         char *p;
  207         long length;
  208 
  209         length = strnlen_user(s, n);
  210 
  211         if (!length)
  212                 return ERR_PTR(-EFAULT);
  213 
  214         if (length > n)
  215                 return ERR_PTR(-EINVAL);
  216 
  217         p = memdup_user(s, length);
  218 
  219         if (IS_ERR(p))
  220                 return p;
  221 
  222         p[length - 1] = '\0';
  223 
  224         return p;
  225 }
  226 EXPORT_SYMBOL(strndup_user);
  227 
  228 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
  229                 struct vm_area_struct *prev, struct rb_node *rb_parent)
  230 {
  231         struct vm_area_struct *next;
  232 
  233         vma->vm_prev = prev;
  234         if (prev) {
  235                 next = prev->vm_next;
  236                 prev->vm_next = vma;
  237         } else {
  238                 mm->mmap = vma;
  239                 if (rb_parent)
  240                         next = rb_entry(rb_parent,
  241                                         struct vm_area_struct, vm_rb);
  242                 else
  243                         next = NULL;
  244         }
  245         vma->vm_next = next;
  246         if (next)
  247                 next->vm_prev = vma;
  248 }
  249 
  250 /* Check if the vma is being used as a stack by this task */
  251 static int vm_is_stack_for_task(struct task_struct *t,
  252                                 struct vm_area_struct *vma)
  253 {
  254         return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
  255 }
  256 
  257 /*
  258  * Check if the vma is being used as a stack.
  259  * If is_group is non-zero, check in the entire thread group or else
  260  * just check in the current task. Returns the pid of the task that
  261  * the vma is stack for.
  262  */
  263 pid_t vm_is_stack(struct task_struct *task,
  264                   struct vm_area_struct *vma, int in_group)
  265 {
  266         pid_t ret = 0;
  267 
  268         if (vm_is_stack_for_task(task, vma))
  269                 return task->pid;
  270 
  271         if (in_group) {
  272                 struct task_struct *t;
  273                 rcu_read_lock();
  274                 if (!pid_alive(task))
  275                         goto done;
  276 
  277                 t = task;
  278                 do {
  279                         if (vm_is_stack_for_task(t, vma)) {
  280                                 ret = t->pid;
  281                                 goto done;
  282                         }
  283                 } while_each_thread(task, t);
  284 done:
  285                 rcu_read_unlock();
  286         }
  287 
  288         return ret;
  289 }
  290 
  291 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
  292 void arch_pick_mmap_layout(struct mm_struct *mm)
  293 {
  294         mm->mmap_base = TASK_UNMAPPED_BASE;
  295         mm->get_unmapped_area = arch_get_unmapped_area;
  296         mm->unmap_area = arch_unmap_area;
  297 }
  298 #endif
  299 
  300 /*
  301  * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  302  * back to the regular GUP.
  303  * If the architecture not support this function, simply return with no
  304  * page pinned
  305  */
  306 int __attribute__((weak)) __get_user_pages_fast(unsigned long start,
  307                                  int nr_pages, int write, struct page **pages)
  308 {
  309         return 0;
  310 }
  311 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
  312 
  313 /**
  314  * get_user_pages_fast() - pin user pages in memory
  315  * @start:      starting user address
  316  * @nr_pages:   number of pages from start to pin
  317  * @write:      whether pages will be written to
  318  * @pages:      array that receives pointers to the pages pinned.
  319  *              Should be at least nr_pages long.
  320  *
  321  * Returns number of pages pinned. This may be fewer than the number
  322  * requested. If nr_pages is 0 or negative, returns 0. If no pages
  323  * were pinned, returns -errno.
  324  *
  325  * get_user_pages_fast provides equivalent functionality to get_user_pages,
  326  * operating on current and current->mm, with force=0 and vma=NULL. However
  327  * unlike get_user_pages, it must be called without mmap_sem held.
  328  *
  329  * get_user_pages_fast may take mmap_sem and page table locks, so no
  330  * assumptions can be made about lack of locking. get_user_pages_fast is to be
  331  * implemented in a way that is advantageous (vs get_user_pages()) when the
  332  * user memory area is already faulted in and present in ptes. However if the
  333  * pages have to be faulted in, it may turn out to be slightly slower so
  334  * callers need to carefully consider what to use. On many architectures,
  335  * get_user_pages_fast simply falls back to get_user_pages.
  336  */
  337 int __attribute__((weak)) get_user_pages_fast(unsigned long start,
  338                                 int nr_pages, int write, struct page **pages)
  339 {
  340         struct mm_struct *mm = current->mm;
  341         int ret;
  342 
  343         down_read(&mm->mmap_sem);
  344         ret = get_user_pages(current, mm, start, nr_pages,
  345                                         write, 0, pages, NULL);
  346         up_read(&mm->mmap_sem);
  347 
  348         return ret;
  349 }
  350 EXPORT_SYMBOL_GPL(get_user_pages_fast);
  351 
  352 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
  353         unsigned long len, unsigned long prot,
  354         unsigned long flag, unsigned long pgoff)
  355 {
  356         unsigned long ret;
  357         struct mm_struct *mm = current->mm;
  358 
  359         ret = security_mmap_file(file, prot, flag);
  360         if (!ret) {
  361                 down_write(&mm->mmap_sem);
  362                 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
  363                 up_write(&mm->mmap_sem);
  364         }
  365         return ret;
  366 }
  367 
  368 unsigned long vm_mmap(struct file *file, unsigned long addr,
  369         unsigned long len, unsigned long prot,
  370         unsigned long flag, unsigned long offset)
  371 {
  372         if (unlikely(offset + PAGE_ALIGN(len) < offset))
  373                 return -EINVAL;
  374         if (unlikely(offset & ~PAGE_MASK))
  375                 return -EINVAL;
  376 
  377         return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
  378 }
  379 EXPORT_SYMBOL(vm_mmap);
  380 
  381 /* Tracepoints definitions. */
  382 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  383 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  384 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  385 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  386 EXPORT_TRACEPOINT_SYMBOL(kfree);
  387 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);

Cache object: 27c755dd4bb50099ddd057d170acf676


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.