The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/subr_copy.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: subr_copy.c,v 1.16 2022/04/09 23:51:09 riastradh Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 1997, 1998, 1999, 2002, 2007, 2008, 2019
    5  *      The NetBSD Foundation, Inc.
    6  * All rights reserved.
    7  *
    8  * This code is derived from software contributed to The NetBSD Foundation
    9  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
   10  * NASA Ames Research Center.
   11  *
   12  * Redistribution and use in source and binary forms, with or without
   13  * modification, are permitted provided that the following conditions
   14  * are met:
   15  * 1. Redistributions of source code must retain the above copyright
   16  *    notice, this list of conditions and the following disclaimer.
   17  * 2. Redistributions in binary form must reproduce the above copyright
   18  *    notice, this list of conditions and the following disclaimer in the
   19  *    documentation and/or other materials provided with the distribution.
   20  *
   21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   31  * POSSIBILITY OF SUCH DAMAGE.
   32  */
   33 
   34 /*
   35  * Copyright (c) 1982, 1986, 1991, 1993
   36  *      The Regents of the University of California.  All rights reserved.
   37  * (c) UNIX System Laboratories, Inc.
   38  * All or some portions of this file are derived from material licensed
   39  * to the University of California by American Telephone and Telegraph
   40  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
   41  * the permission of UNIX System Laboratories, Inc.
   42  *
   43  * Copyright (c) 1992, 1993
   44  *      The Regents of the University of California.  All rights reserved.
   45  *
   46  * This software was developed by the Computer Systems Engineering group
   47  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
   48  * contributed to Berkeley.
   49  *
   50  * All advertising materials mentioning features or use of this software
   51  * must display the following acknowledgement:
   52  *      This product includes software developed by the University of
   53  *      California, Lawrence Berkeley Laboratory.
   54  *
   55  * Redistribution and use in source and binary forms, with or without
   56  * modification, are permitted provided that the following conditions
   57  * are met:
   58  * 1. Redistributions of source code must retain the above copyright
   59  *    notice, this list of conditions and the following disclaimer.
   60  * 2. Redistributions in binary form must reproduce the above copyright
   61  *    notice, this list of conditions and the following disclaimer in the
   62  *    documentation and/or other materials provided with the distribution.
   63  * 3. Neither the name of the University nor the names of its contributors
   64  *    may be used to endorse or promote products derived from this software
   65  *    without specific prior written permission.
   66  *
   67  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   68  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   69  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   70  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   71  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   72  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   73  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   74  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   75  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   76  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   77  * SUCH DAMAGE.
   78  *
   79  *      @(#)kern_subr.c 8.4 (Berkeley) 2/14/95
   80  */
   81 
   82 #include <sys/cdefs.h>
   83 __KERNEL_RCSID(0, "$NetBSD: subr_copy.c,v 1.16 2022/04/09 23:51:09 riastradh Exp $");
   84 
   85 #define __UFETCHSTORE_PRIVATE
   86 #define __UCAS_PRIVATE
   87 
   88 #include <sys/param.h>
   89 #include <sys/fcntl.h>
   90 #include <sys/proc.h>
   91 #include <sys/systm.h>
   92 
   93 #include <uvm/uvm_extern.h>
   94 
   95 void
   96 uio_setup_sysspace(struct uio *uio)
   97 {
   98 
   99         uio->uio_vmspace = vmspace_kernel();
  100 }
  101 
  102 int
  103 uiomove(void *buf, size_t n, struct uio *uio)
  104 {
  105         struct vmspace *vm = uio->uio_vmspace;
  106         struct iovec *iov;
  107         size_t cnt;
  108         int error = 0;
  109         char *cp = buf;
  110 
  111         ASSERT_SLEEPABLE();
  112 
  113         KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE);
  114         while (n > 0 && uio->uio_resid) {
  115                 iov = uio->uio_iov;
  116                 cnt = iov->iov_len;
  117                 if (cnt == 0) {
  118                         KASSERT(uio->uio_iovcnt > 0);
  119                         uio->uio_iov++;
  120                         uio->uio_iovcnt--;
  121                         continue;
  122                 }
  123                 if (cnt > n)
  124                         cnt = n;
  125                 if (!VMSPACE_IS_KERNEL_P(vm)) {
  126                         preempt_point();
  127                 }
  128 
  129                 if (uio->uio_rw == UIO_READ) {
  130                         error = copyout_vmspace(vm, cp, iov->iov_base,
  131                             cnt);
  132                 } else {
  133                         error = copyin_vmspace(vm, iov->iov_base, cp,
  134                             cnt);
  135                 }
  136                 if (error) {
  137                         break;
  138                 }
  139                 iov->iov_base = (char *)iov->iov_base + cnt;
  140                 iov->iov_len -= cnt;
  141                 uio->uio_resid -= cnt;
  142                 uio->uio_offset += cnt;
  143                 cp += cnt;
  144                 KDASSERT(cnt <= n);
  145                 n -= cnt;
  146         }
  147 
  148         return (error);
  149 }
  150 
  151 /*
  152  * Wrapper for uiomove() that validates the arguments against a known-good
  153  * kernel buffer.
  154  */
  155 int
  156 uiomove_frombuf(void *buf, size_t buflen, struct uio *uio)
  157 {
  158         size_t offset;
  159 
  160         if (uio->uio_offset < 0 || /* uio->uio_resid < 0 || */
  161             (offset = uio->uio_offset) != uio->uio_offset)
  162                 return (EINVAL);
  163         if (offset >= buflen)
  164                 return (0);
  165         return (uiomove((char *)buf + offset, buflen - offset, uio));
  166 }
  167 
  168 /*
  169  * Give next character to user as result of read.
  170  */
  171 int
  172 ureadc(int c, struct uio *uio)
  173 {
  174         struct iovec *iov;
  175 
  176         if (uio->uio_resid <= 0)
  177                 panic("ureadc: non-positive resid");
  178 again:
  179         if (uio->uio_iovcnt <= 0)
  180                 panic("ureadc: non-positive iovcnt");
  181         iov = uio->uio_iov;
  182         if (iov->iov_len <= 0) {
  183                 uio->uio_iovcnt--;
  184                 uio->uio_iov++;
  185                 goto again;
  186         }
  187         if (!VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
  188                 int error;
  189                 if ((error = ustore_char(iov->iov_base, c)) != 0)
  190                         return (error);
  191         } else {
  192                 *(char *)iov->iov_base = c;
  193         }
  194         iov->iov_base = (char *)iov->iov_base + 1;
  195         iov->iov_len--;
  196         uio->uio_resid--;
  197         uio->uio_offset++;
  198         return (0);
  199 }
  200 
  201 /*
  202  * Like copyin(), but operates on an arbitrary vmspace.
  203  */
  204 int
  205 copyin_vmspace(struct vmspace *vm, const void *uaddr, void *kaddr, size_t len)
  206 {
  207         struct iovec iov;
  208         struct uio uio;
  209         int error;
  210 
  211         if (len == 0)
  212                 return (0);
  213 
  214         if (VMSPACE_IS_KERNEL_P(vm)) {
  215                 return kcopy(uaddr, kaddr, len);
  216         }
  217         if (__predict_true(vm == curproc->p_vmspace)) {
  218                 return copyin(uaddr, kaddr, len);
  219         }
  220 
  221         iov.iov_base = kaddr;
  222         iov.iov_len = len;
  223         uio.uio_iov = &iov;
  224         uio.uio_iovcnt = 1;
  225         uio.uio_offset = (off_t)(uintptr_t)uaddr;
  226         uio.uio_resid = len;
  227         uio.uio_rw = UIO_READ;
  228         UIO_SETUP_SYSSPACE(&uio);
  229         error = uvm_io(&vm->vm_map, &uio, 0);
  230 
  231         return (error);
  232 }
  233 
  234 /*
  235  * Like copyout(), but operates on an arbitrary vmspace.
  236  */
  237 int
  238 copyout_vmspace(struct vmspace *vm, const void *kaddr, void *uaddr, size_t len)
  239 {
  240         struct iovec iov;
  241         struct uio uio;
  242         int error;
  243 
  244         if (len == 0)
  245                 return (0);
  246 
  247         if (VMSPACE_IS_KERNEL_P(vm)) {
  248                 return kcopy(kaddr, uaddr, len);
  249         }
  250         if (__predict_true(vm == curproc->p_vmspace)) {
  251                 return copyout(kaddr, uaddr, len);
  252         }
  253 
  254         iov.iov_base = __UNCONST(kaddr); /* XXXUNCONST cast away const */
  255         iov.iov_len = len;
  256         uio.uio_iov = &iov;
  257         uio.uio_iovcnt = 1;
  258         uio.uio_offset = (off_t)(uintptr_t)uaddr;
  259         uio.uio_resid = len;
  260         uio.uio_rw = UIO_WRITE;
  261         UIO_SETUP_SYSSPACE(&uio);
  262         error = uvm_io(&vm->vm_map, &uio, 0);
  263 
  264         return (error);
  265 }
  266 
  267 /*
  268  * Like copyin(), but operates on an arbitrary process.
  269  */
  270 int
  271 copyin_proc(struct proc *p, const void *uaddr, void *kaddr, size_t len)
  272 {
  273         struct vmspace *vm;
  274         int error;
  275 
  276         error = proc_vmspace_getref(p, &vm);
  277         if (error) {
  278                 return error;
  279         }
  280         error = copyin_vmspace(vm, uaddr, kaddr, len);
  281         uvmspace_free(vm);
  282 
  283         return error;
  284 }
  285 
  286 /*
  287  * Like copyout(), but operates on an arbitrary process.
  288  */
  289 int
  290 copyout_proc(struct proc *p, const void *kaddr, void *uaddr, size_t len)
  291 {
  292         struct vmspace *vm;
  293         int error;
  294 
  295         error = proc_vmspace_getref(p, &vm);
  296         if (error) {
  297                 return error;
  298         }
  299         error = copyout_vmspace(vm, kaddr, uaddr, len);
  300         uvmspace_free(vm);
  301 
  302         return error;
  303 }
  304 
  305 /*
  306  * Like copyin(), but operates on an arbitrary pid.
  307  */
  308 int
  309 copyin_pid(pid_t pid, const void *uaddr, void *kaddr, size_t len)
  310 {
  311         struct proc *p;
  312         struct vmspace *vm;
  313         int error;
  314 
  315         mutex_enter(&proc_lock);
  316         p = proc_find(pid);
  317         if (p == NULL) {
  318                 mutex_exit(&proc_lock);
  319                 return ESRCH;
  320         }
  321         mutex_enter(p->p_lock);
  322         error = proc_vmspace_getref(p, &vm);
  323         mutex_exit(p->p_lock);
  324         mutex_exit(&proc_lock);
  325 
  326         if (error == 0) {
  327                 error = copyin_vmspace(vm, uaddr, kaddr, len);
  328                 uvmspace_free(vm);
  329         }
  330         return error;
  331 }
  332 
  333 /*
  334  * Like copyin(), except it operates on kernel addresses when the FKIOCTL
  335  * flag is passed in `ioctlflags' from the ioctl call.
  336  */
  337 int
  338 ioctl_copyin(int ioctlflags, const void *src, void *dst, size_t len)
  339 {
  340         if (ioctlflags & FKIOCTL)
  341                 return kcopy(src, dst, len);
  342         return copyin(src, dst, len);
  343 }
  344 
  345 /*
  346  * Like copyout(), except it operates on kernel addresses when the FKIOCTL
  347  * flag is passed in `ioctlflags' from the ioctl call.
  348  */
  349 int
  350 ioctl_copyout(int ioctlflags, const void *src, void *dst, size_t len)
  351 {
  352         if (ioctlflags & FKIOCTL)
  353                 return kcopy(src, dst, len);
  354         return copyout(src, dst, len);
  355 }
  356 
  357 /*
  358  * User-space CAS / fetch / store
  359  */
  360 
  361 #ifdef __NO_STRICT_ALIGNMENT
  362 #define CHECK_ALIGNMENT(x)      __nothing
  363 #else /* ! __NO_STRICT_ALIGNMENT */
  364 static bool
  365 ufetchstore_aligned(uintptr_t uaddr, size_t size)
  366 {
  367         return (uaddr & (size - 1)) == 0;
  368 }
  369 
  370 #define CHECK_ALIGNMENT()                                               \
  371 do {                                                                    \
  372         if (!ufetchstore_aligned((uintptr_t)uaddr, sizeof(*uaddr)))     \
  373                 return EFAULT;                                          \
  374 } while (/*CONSTCOND*/0)
  375 #endif /* __NO_STRICT_ALIGNMENT */
  376 
  377 /*
  378  * __HAVE_UCAS_FULL platforms provide _ucas_32() and _ucas_64() themselves.
  379  * _RUMPKERNEL also provides it's own _ucas_32() and _ucas_64().
  380  *
  381  * In all other cases, we provide generic implementations that work on
  382  * all platforms.
  383  */
  384 
  385 #if !defined(__HAVE_UCAS_FULL) && !defined(_RUMPKERNEL)
  386 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
  387 #include <sys/atomic.h>
  388 #include <sys/cpu.h>
  389 #include <sys/once.h>
  390 #include <sys/mutex.h>
  391 #include <sys/ipi.h>
  392 
  393 static int ucas_critical_splcookie;
  394 static volatile u_int ucas_critical_pausing_cpus;
  395 static u_int ucas_critical_ipi;
  396 static ONCE_DECL(ucas_critical_init_once)
  397 
  398 static void
  399 ucas_critical_cpu_gate(void *arg __unused)
  400 {
  401         int count = SPINLOCK_BACKOFF_MIN;
  402 
  403         KASSERT(atomic_load_relaxed(&ucas_critical_pausing_cpus) > 0);
  404 
  405         /*
  406          * Notify ucas_critical_wait that we have stopped.  Using
  407          * store-release ensures all our memory operations up to the
  408          * IPI happen before the ucas -- no buffered stores on our end
  409          * can clobber it later on, for instance.
  410          *
  411          * Matches atomic_load_acquire in ucas_critical_wait -- turns
  412          * the following atomic_dec_uint into a store-release.
  413          */
  414 #ifndef __HAVE_ATOMIC_AS_MEMBAR
  415         membar_release();
  416 #endif
  417         atomic_dec_uint(&ucas_critical_pausing_cpus);
  418 
  419         /*
  420          * Wait for ucas_critical_exit to reopen the gate and let us
  421          * proceed.  Using a load-acquire ensures the ucas happens
  422          * before any of our memory operations when we return from the
  423          * IPI and proceed -- we won't observe any stale cached value
  424          * that the ucas overwrote, for instance.
  425          *
  426          * Matches atomic_store_release in ucas_critical_exit.
  427          */
  428         while (atomic_load_acquire(&ucas_critical_pausing_cpus) != (u_int)-1) {
  429                 SPINLOCK_BACKOFF(count);
  430         }
  431 }
  432 
  433 static int
  434 ucas_critical_init(void)
  435 {
  436 
  437         ucas_critical_ipi = ipi_register(ucas_critical_cpu_gate, NULL);
  438         return 0;
  439 }
  440 
  441 static void
  442 ucas_critical_wait(void)
  443 {
  444         int count = SPINLOCK_BACKOFF_MIN;
  445 
  446         /*
  447          * Wait for all CPUs to stop at the gate.  Using a load-acquire
  448          * ensures all memory operations before they stop at the gate
  449          * happen before the ucas -- no buffered stores in other CPUs
  450          * can clobber it later on, for instance.
  451          *
  452          * Matches membar_release/atomic_dec_uint (store-release) in
  453          * ucas_critical_cpu_gate.
  454          */
  455         while (atomic_load_acquire(&ucas_critical_pausing_cpus) > 0) {
  456                 SPINLOCK_BACKOFF(count);
  457         }
  458 }
  459 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
  460 
  461 static inline void
  462 ucas_critical_enter(lwp_t * const l)
  463 {
  464 
  465 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
  466         if (ncpu > 1) {
  467                 RUN_ONCE(&ucas_critical_init_once, ucas_critical_init);
  468 
  469                 /*
  470                  * Acquire the mutex first, then go to splhigh() and
  471                  * broadcast the IPI to lock all of the other CPUs
  472                  * behind the gate.
  473                  *
  474                  * N.B. Going to splhigh() implicitly disables preemption,
  475                  * so there's no need to do it explicitly.
  476                  */
  477                 mutex_enter(&cpu_lock);
  478                 ucas_critical_splcookie = splhigh();
  479                 ucas_critical_pausing_cpus = ncpu - 1;
  480                 ipi_trigger_broadcast(ucas_critical_ipi, true);
  481                 ucas_critical_wait();
  482                 return;
  483         }
  484 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
  485 
  486         KPREEMPT_DISABLE(l);
  487 }
  488 
  489 static inline void
  490 ucas_critical_exit(lwp_t * const l)
  491 {
  492 
  493 #if !defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)
  494         if (ncpu > 1) {
  495                 /*
  496                  * Open the gate and notify all CPUs in
  497                  * ucas_critical_cpu_gate that they can now proceed.
  498                  * Using a store-release ensures the ucas happens
  499                  * before any memory operations they issue after the
  500                  * IPI -- they won't observe any stale cache of the
  501                  * target word, for instance.
  502                  *
  503                  * Matches atomic_load_acquire in ucas_critical_cpu_gate.
  504                  */
  505                 atomic_store_release(&ucas_critical_pausing_cpus, (u_int)-1);
  506                 splx(ucas_critical_splcookie);
  507                 mutex_exit(&cpu_lock);
  508                 return;
  509         }
  510 #endif /* ! __HAVE_UCAS_MP && MULTIPROCESSOR */
  511 
  512         KPREEMPT_ENABLE(l);
  513 }
  514 
  515 int
  516 _ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
  517 {
  518         lwp_t * const l = curlwp;
  519         uint32_t *uva = ((void *)(uintptr_t)uaddr);
  520         int error;
  521 
  522         /*
  523          * Wire the user address down to avoid taking a page fault during
  524          * the critical section.
  525          */
  526         error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
  527                            VM_PROT_READ | VM_PROT_WRITE);
  528         if (error)
  529                 return error;
  530 
  531         ucas_critical_enter(l);
  532         error = _ufetch_32(uva, ret);
  533         if (error == 0 && *ret == old) {
  534                 error = _ustore_32(uva, new);
  535         }
  536         ucas_critical_exit(l);
  537 
  538         uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
  539 
  540         return error;
  541 }
  542 
  543 #ifdef _LP64
  544 int
  545 _ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
  546 {
  547         lwp_t * const l = curlwp;
  548         uint64_t *uva = ((void *)(uintptr_t)uaddr);
  549         int error;
  550 
  551         /*
  552          * Wire the user address down to avoid taking a page fault during
  553          * the critical section.
  554          */
  555         error = uvm_vslock(l->l_proc->p_vmspace, uva, sizeof(*uaddr),
  556                            VM_PROT_READ | VM_PROT_WRITE);
  557         if (error)
  558                 return error;
  559 
  560         ucas_critical_enter(l);
  561         error = _ufetch_64(uva, ret);
  562         if (error == 0 && *ret == old) {
  563                 error = _ustore_64(uva, new);
  564         }
  565         ucas_critical_exit(l);
  566 
  567         uvm_vsunlock(l->l_proc->p_vmspace, uva, sizeof(*uaddr));
  568 
  569         return error;
  570 }
  571 #endif /* _LP64 */
  572 #endif /* ! __HAVE_UCAS_FULL && ! _RUMPKERNEL */
  573 
  574 int
  575 ucas_32(volatile uint32_t *uaddr, uint32_t old, uint32_t new, uint32_t *ret)
  576 {
  577 
  578         ASSERT_SLEEPABLE();
  579         CHECK_ALIGNMENT();
  580 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
  581     !defined(_RUMPKERNEL)
  582         if (ncpu > 1) {
  583                 return _ucas_32_mp(uaddr, old, new, ret);
  584         }
  585 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
  586         return _ucas_32(uaddr, old, new, ret);
  587 }
  588 
  589 #ifdef _LP64
  590 int
  591 ucas_64(volatile uint64_t *uaddr, uint64_t old, uint64_t new, uint64_t *ret)
  592 {
  593 
  594         ASSERT_SLEEPABLE();
  595         CHECK_ALIGNMENT();
  596 #if (defined(__HAVE_UCAS_MP) && defined(MULTIPROCESSOR)) && \
  597     !defined(_RUMPKERNEL)
  598         if (ncpu > 1) {
  599                 return _ucas_64_mp(uaddr, old, new, ret);
  600         }
  601 #endif /* __HAVE_UCAS_MP && MULTIPROCESSOR */
  602         return _ucas_64(uaddr, old, new, ret);
  603 }
  604 #endif /* _LP64 */
  605 
  606 __strong_alias(ucas_int,ucas_32);
  607 #ifdef _LP64
  608 __strong_alias(ucas_ptr,ucas_64);
  609 #else
  610 __strong_alias(ucas_ptr,ucas_32);
  611 #endif /* _LP64 */
  612 
  613 int
  614 ufetch_8(const uint8_t *uaddr, uint8_t *valp)
  615 {
  616 
  617         ASSERT_SLEEPABLE();
  618         CHECK_ALIGNMENT();
  619         return _ufetch_8(uaddr, valp);
  620 }
  621 
  622 int
  623 ufetch_16(const uint16_t *uaddr, uint16_t *valp)
  624 {
  625 
  626         ASSERT_SLEEPABLE();
  627         CHECK_ALIGNMENT();
  628         return _ufetch_16(uaddr, valp);
  629 }
  630 
  631 int
  632 ufetch_32(const uint32_t *uaddr, uint32_t *valp)
  633 {
  634 
  635         ASSERT_SLEEPABLE();
  636         CHECK_ALIGNMENT();
  637         return _ufetch_32(uaddr, valp);
  638 }
  639 
  640 #ifdef _LP64
  641 int
  642 ufetch_64(const uint64_t *uaddr, uint64_t *valp)
  643 {
  644 
  645         ASSERT_SLEEPABLE();
  646         CHECK_ALIGNMENT();
  647         return _ufetch_64(uaddr, valp);
  648 }
  649 #endif /* _LP64 */
  650 
  651 __strong_alias(ufetch_char,ufetch_8);
  652 __strong_alias(ufetch_short,ufetch_16);
  653 __strong_alias(ufetch_int,ufetch_32);
  654 #ifdef _LP64
  655 __strong_alias(ufetch_long,ufetch_64);
  656 __strong_alias(ufetch_ptr,ufetch_64);
  657 #else
  658 __strong_alias(ufetch_long,ufetch_32);
  659 __strong_alias(ufetch_ptr,ufetch_32);
  660 #endif /* _LP64 */
  661 
  662 int
  663 ustore_8(uint8_t *uaddr, uint8_t val)
  664 {
  665 
  666         ASSERT_SLEEPABLE();
  667         CHECK_ALIGNMENT();
  668         return _ustore_8(uaddr, val);
  669 }
  670 
  671 int
  672 ustore_16(uint16_t *uaddr, uint16_t val)
  673 {
  674 
  675         ASSERT_SLEEPABLE();
  676         CHECK_ALIGNMENT();
  677         return _ustore_16(uaddr, val);
  678 }
  679 
  680 int
  681 ustore_32(uint32_t *uaddr, uint32_t val)
  682 {
  683 
  684         ASSERT_SLEEPABLE();
  685         CHECK_ALIGNMENT();
  686         return _ustore_32(uaddr, val);
  687 }
  688 
  689 #ifdef _LP64
  690 int
  691 ustore_64(uint64_t *uaddr, uint64_t val)
  692 {
  693 
  694         ASSERT_SLEEPABLE();
  695         CHECK_ALIGNMENT();
  696         return _ustore_64(uaddr, val);
  697 }
  698 #endif /* _LP64 */
  699 
  700 __strong_alias(ustore_char,ustore_8);
  701 __strong_alias(ustore_short,ustore_16);
  702 __strong_alias(ustore_int,ustore_32);
  703 #ifdef _LP64
  704 __strong_alias(ustore_long,ustore_64);
  705 __strong_alias(ustore_ptr,ustore_64);
  706 #else
  707 __strong_alias(ustore_long,ustore_32);
  708 __strong_alias(ustore_ptr,ustore_32);
  709 #endif /* _LP64 */

Cache object: af7afcc911a35371d5726bebcb75342c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.