The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/copyout.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2018 The FreeBSD Foundation
    5  * All rights reserved.
    6  *
    7  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
    8  * under sponsorship from the FreeBSD Foundation.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   29  * SUCH DAMAGE.
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD$");
   34 
   35 #include <sys/param.h>
   36 #include <sys/lock.h>
   37 #include <sys/mutex.h>
   38 #include <sys/pcpu.h>
   39 #include <sys/proc.h>
   40 #include <sys/sched.h>
   41 #include <sys/sysctl.h>
   42 #include <sys/systm.h>
   43 #include <vm/vm.h>
   44 #include <vm/vm_param.h>
   45 #include <vm/vm_extern.h>
   46 #include <vm/pmap.h>
   47 #include <vm/vm_map.h>
   48 #include <vm/vm_page.h>
   49 
   50 int copyin_fast(const void *udaddr, void *kaddr, size_t len, u_int);
   51 static int (*copyin_fast_tramp)(const void *, void *, size_t, u_int);
   52 int copyout_fast(const void *kaddr, void *udaddr, size_t len, u_int);
   53 static int (*copyout_fast_tramp)(const void *, void *, size_t, u_int);
   54 int fubyte_fast(volatile const void *base, u_int kcr3);
   55 static int (*fubyte_fast_tramp)(volatile const void *, u_int);
   56 int fuword16_fast(volatile const void *base, u_int kcr3);
   57 static int (*fuword16_fast_tramp)(volatile const void *, u_int);
   58 int fueword_fast(volatile const void *base, long *val, u_int kcr3);
   59 static int (*fueword_fast_tramp)(volatile const void *, long *, u_int);
   60 int subyte_fast(volatile void *base, int val, u_int kcr3);
   61 static int (*subyte_fast_tramp)(volatile void *, int, u_int);
   62 int suword16_fast(volatile void *base, int val, u_int kcr3);
   63 static int (*suword16_fast_tramp)(volatile void *, int, u_int);
   64 int suword_fast(volatile void *base, long val, u_int kcr3);
   65 static int (*suword_fast_tramp)(volatile void *, long, u_int);
   66 
   67 static int fast_copyout = 1;
   68 SYSCTL_INT(_machdep, OID_AUTO, fast_copyout, CTLFLAG_RWTUN,
   69     &fast_copyout, 0,
   70     "");
   71 
   72 void
   73 copyout_init_tramp(void)
   74 {
   75 
   76         copyin_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
   77             (uintptr_t)copyin_fast + setidt_disp);
   78         copyout_fast_tramp = (int (*)(const void *, void *, size_t, u_int))(
   79             (uintptr_t)copyout_fast + setidt_disp);
   80         fubyte_fast_tramp = (int (*)(volatile const void *, u_int))(
   81             (uintptr_t)fubyte_fast + setidt_disp);
   82         fuword16_fast_tramp = (int (*)(volatile const void *, u_int))(
   83             (uintptr_t)fuword16_fast + setidt_disp);
   84         fueword_fast_tramp = (int (*)(volatile const void *, long *, u_int))(
   85             (uintptr_t)fueword_fast + setidt_disp);
   86         subyte_fast_tramp = (int (*)(volatile void *, int, u_int))(
   87             (uintptr_t)subyte_fast + setidt_disp);
   88         suword16_fast_tramp = (int (*)(volatile void *, int, u_int))(
   89             (uintptr_t)suword16_fast + setidt_disp);
   90         suword_fast_tramp = (int (*)(volatile void *, long, u_int))(
   91             (uintptr_t)suword_fast + setidt_disp);
   92 }
   93 
   94 int
   95 cp_slow0(vm_offset_t uva, size_t len, bool write,
   96     void (*f)(vm_offset_t, void *), void *arg)
   97 {
   98         struct pcpu *pc;
   99         vm_page_t m[2];
  100         vm_offset_t kaddr;
  101         int error, i, plen;
  102         bool sleepable;
  103 
  104         plen = howmany(uva - trunc_page(uva) + len, PAGE_SIZE);
  105         MPASS(plen <= nitems(m));
  106         error = 0;
  107         i = vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map, uva, len,
  108             (write ? VM_PROT_WRITE : VM_PROT_READ) | VM_PROT_QUICK_NOFAULT,
  109             m, nitems(m));
  110         if (i != plen)
  111                 return (EFAULT);
  112         sched_pin();
  113         pc = get_pcpu();
  114         if (!THREAD_CAN_SLEEP() || curthread->td_vslock_sz > 0 ||
  115             (curthread->td_pflags & TDP_NOFAULTING) != 0) {
  116                 sleepable = false;
  117                 mtx_lock(&pc->pc_copyout_mlock);
  118                 kaddr = pc->pc_copyout_maddr;
  119         } else {
  120                 sleepable = true;
  121                 sx_xlock(&pc->pc_copyout_slock);
  122                 kaddr = pc->pc_copyout_saddr;
  123         }
  124         pmap_cp_slow0_map(kaddr, plen, m);
  125         kaddr += uva - trunc_page(uva);
  126         f(kaddr, arg);
  127         sched_unpin();
  128         if (sleepable)
  129                 sx_xunlock(&pc->pc_copyout_slock);
  130         else
  131                 mtx_unlock(&pc->pc_copyout_mlock);
  132         vm_page_unhold_pages(m, plen);
  133         return (error);
  134 }
  135 
  136 struct copyinstr_arg0 {
  137         vm_offset_t kc;
  138         size_t len;
  139         size_t alen;
  140         bool end;
  141 };
  142 
  143 static void
  144 copyinstr_slow0(vm_offset_t kva, void *arg)
  145 {
  146         struct copyinstr_arg0 *ca;
  147         char c;
  148 
  149         ca = arg;
  150         MPASS(ca->alen == 0 && ca->len > 0 && !ca->end);
  151         while (ca->alen < ca->len && !ca->end) {
  152                 c = *(char *)(kva + ca->alen);
  153                 *(char *)ca->kc = c;
  154                 ca->alen++;
  155                 ca->kc++;
  156                 if (c == '\0')
  157                         ca->end = true;
  158         }
  159 }
  160 
  161 int
  162 copyinstr(const void *udaddr, void *kaddr, size_t maxlen, size_t *lencopied)
  163 {
  164         struct copyinstr_arg0 ca;
  165         vm_offset_t uc;
  166         size_t plen;
  167         int error;
  168 
  169         error = 0;
  170         ca.end = false;
  171         for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
  172             plen < maxlen && !ca.end; uc += ca.alen, plen += ca.alen) {
  173                 ca.len = round_page(uc) - uc;
  174                 if (ca.len == 0)
  175                         ca.len = PAGE_SIZE;
  176                 if (plen + ca.len > maxlen)
  177                         ca.len = maxlen - plen;
  178                 ca.alen = 0;
  179                 if (cp_slow0(uc, ca.len, false, copyinstr_slow0, &ca) != 0) {
  180                         error = EFAULT;
  181                         break;
  182                 }
  183         }
  184         if (!ca.end && plen == maxlen && error == 0)
  185                 error = ENAMETOOLONG;
  186         if (lencopied != NULL)
  187                 *lencopied = plen;
  188         return (error);
  189 }
  190 
  191 struct copyin_arg0 {
  192         vm_offset_t kc;
  193         size_t len;
  194 };
  195 
  196 static void
  197 copyin_slow0(vm_offset_t kva, void *arg)
  198 {
  199         struct copyin_arg0 *ca;
  200 
  201         ca = arg;
  202         bcopy((void *)kva, (void *)ca->kc, ca->len);
  203 }
  204 
  205 int
  206 copyin(const void *udaddr, void *kaddr, size_t len)
  207 {
  208         struct copyin_arg0 ca;
  209         vm_offset_t uc;
  210         size_t plen;
  211 
  212         if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
  213             (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
  214                 return (EFAULT);
  215         if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
  216             copyin_fast_tramp(udaddr, kaddr, len, pmap_get_kcr3()) == 0))
  217                 return (0);
  218         for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
  219             plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
  220                 ca.len = round_page(uc) - uc;
  221                 if (ca.len == 0)
  222                         ca.len = PAGE_SIZE;
  223                 if (plen + ca.len > len)
  224                         ca.len = len - plen;
  225                 if (cp_slow0(uc, ca.len, false, copyin_slow0, &ca) != 0)
  226                         return (EFAULT);
  227         }
  228         return (0);
  229 }
  230 
  231 static void
  232 copyout_slow0(vm_offset_t kva, void *arg)
  233 {
  234         struct copyin_arg0 *ca;
  235 
  236         ca = arg;
  237         bcopy((void *)ca->kc, (void *)kva, ca->len);
  238 }
  239 
  240 int
  241 copyout(const void *kaddr, void *udaddr, size_t len)
  242 {
  243         struct copyin_arg0 ca;
  244         vm_offset_t uc;
  245         size_t plen;
  246 
  247         if ((uintptr_t)udaddr + len < (uintptr_t)udaddr ||
  248             (uintptr_t)udaddr + len > VM_MAXUSER_ADDRESS)
  249                 return (EFAULT);
  250         if (len == 0 || (fast_copyout && len <= TRAMP_COPYOUT_SZ &&
  251             copyout_fast_tramp(kaddr, udaddr, len, pmap_get_kcr3()) == 0))
  252                 return (0);
  253         for (plen = 0, uc = (vm_offset_t)udaddr, ca.kc = (vm_offset_t)kaddr;
  254             plen < len; uc += ca.len, ca.kc += ca.len, plen += ca.len) {
  255                 ca.len = round_page(uc) - uc;
  256                 if (ca.len == 0)
  257                         ca.len = PAGE_SIZE;
  258                 if (plen + ca.len > len)
  259                         ca.len = len - plen;
  260                 if (cp_slow0(uc, ca.len, true, copyout_slow0, &ca) != 0)
  261                         return (EFAULT);
  262         }
  263         return (0);
  264 }
  265 
  266 /*
  267  * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
  268  * memory.
  269  */
  270 
  271 static void
  272 fubyte_slow0(vm_offset_t kva, void *arg)
  273 {
  274 
  275         *(int *)arg = *(u_char *)kva;
  276 }
  277 
  278 int
  279 fubyte(volatile const void *base)
  280 {
  281         int res;
  282 
  283         if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
  284             (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
  285                 return (-1);
  286         if (fast_copyout) {
  287                 res = fubyte_fast_tramp(base, pmap_get_kcr3());
  288                 if (res != -1)
  289                         return (res);
  290         }
  291         if (cp_slow0((vm_offset_t)base, sizeof(char), false, fubyte_slow0,
  292             &res) != 0)
  293                 return (-1);
  294         return (res);
  295 }
  296 
  297 static void
  298 fuword16_slow0(vm_offset_t kva, void *arg)
  299 {
  300 
  301         *(int *)arg = *(uint16_t *)kva;
  302 }
  303 
  304 int
  305 fuword16(volatile const void *base)
  306 {
  307         int res;
  308 
  309         if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
  310             (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
  311                 return (-1);
  312         if (fast_copyout) {
  313                 res = fuword16_fast_tramp(base, pmap_get_kcr3());
  314                 if (res != -1)
  315                         return (res);
  316         }
  317         if (cp_slow0((vm_offset_t)base, sizeof(uint16_t), false,
  318             fuword16_slow0, &res) != 0)
  319                 return (-1);
  320         return (res);
  321 }
  322 
  323 static void
  324 fueword_slow0(vm_offset_t kva, void *arg)
  325 {
  326 
  327         *(uint32_t *)arg = *(uint32_t *)kva;
  328 }
  329 
  330 int
  331 fueword(volatile const void *base, long *val)
  332 {
  333         uint32_t res;
  334 
  335         if ((uintptr_t)base + sizeof(*val) < (uintptr_t)base ||
  336             (uintptr_t)base + sizeof(*val) > VM_MAXUSER_ADDRESS)
  337                 return (-1);
  338         if (fast_copyout) {
  339                 if (fueword_fast_tramp(base, val, pmap_get_kcr3()) == 0)
  340                         return (0);
  341         }
  342         if (cp_slow0((vm_offset_t)base, sizeof(long), false, fueword_slow0,
  343             &res) != 0)
  344                 return (-1);
  345         *val = res;
  346         return (0);
  347 }
  348 
  349 int
  350 fueword32(volatile const void *base, int32_t *val)
  351 {
  352 
  353         return (fueword(base, (long *)val));
  354 }
  355 
  356 /*
  357  * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
  358  */
  359 
  360 static void
  361 subyte_slow0(vm_offset_t kva, void *arg)
  362 {
  363 
  364         *(u_char *)kva = *(int *)arg;
  365 }
  366 
  367 int
  368 subyte(volatile void *base, int byte)
  369 {
  370 
  371         if ((uintptr_t)base + sizeof(uint8_t) < (uintptr_t)base ||
  372             (uintptr_t)base + sizeof(uint8_t) > VM_MAXUSER_ADDRESS)
  373                 return (-1);
  374         if (fast_copyout && subyte_fast_tramp(base, byte, pmap_get_kcr3()) == 0)
  375                 return (0);
  376         return (cp_slow0((vm_offset_t)base, sizeof(u_char), true, subyte_slow0,
  377             &byte) != 0 ? -1 : 0);
  378 }
  379 
  380 static void
  381 suword16_slow0(vm_offset_t kva, void *arg)
  382 {
  383 
  384         *(int *)kva = *(uint16_t *)arg;
  385 }
  386 
  387 int
  388 suword16(volatile void *base, int word)
  389 {
  390 
  391         if ((uintptr_t)base + sizeof(uint16_t) < (uintptr_t)base ||
  392             (uintptr_t)base + sizeof(uint16_t) > VM_MAXUSER_ADDRESS)
  393                 return (-1);
  394         if (fast_copyout && suword16_fast_tramp(base, word, pmap_get_kcr3())
  395             == 0)
  396                 return (0);
  397         return (cp_slow0((vm_offset_t)base, sizeof(int16_t), true,
  398             suword16_slow0, &word) != 0 ? -1 : 0);
  399 }
  400 
  401 static void
  402 suword_slow0(vm_offset_t kva, void *arg)
  403 {
  404 
  405         *(int *)kva = *(uint32_t *)arg;
  406 }
  407 
  408 int
  409 suword(volatile void *base, long word)
  410 {
  411 
  412         if ((uintptr_t)base + sizeof(word) < (uintptr_t)base ||
  413             (uintptr_t)base + sizeof(word) > VM_MAXUSER_ADDRESS)
  414                 return (-1);
  415         if (fast_copyout && suword_fast_tramp(base, word, pmap_get_kcr3()) == 0)
  416                 return (0);
  417         return (cp_slow0((vm_offset_t)base, sizeof(long), true,
  418             suword_slow0, &word) != 0 ? -1 : 0);
  419 }
  420 
  421 int
  422 suword32(volatile void *base, int32_t word)
  423 {
  424 
  425         return (suword(base, word));
  426 }
  427 
  428 struct casueword_arg0 {
  429         uint32_t oldval;
  430         uint32_t newval;
  431         int res;
  432 };
  433 
  434 static void
  435 casueword_slow0(vm_offset_t kva, void *arg)
  436 {
  437         struct casueword_arg0 *ca;
  438 
  439         ca = arg;
  440         ca->res = 1 - atomic_fcmpset_int((u_int *)kva, &ca->oldval,
  441             ca->newval);
  442 }
  443 
  444 int
  445 casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp,
  446     uint32_t newval)
  447 {
  448         struct casueword_arg0 ca;
  449         int res;
  450 
  451         ca.oldval = oldval;
  452         ca.newval = newval;
  453         res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
  454             casueword_slow0, &ca);
  455         if (res == 0) {
  456                 *oldvalp = ca.oldval;
  457                 return (ca.res);
  458         }
  459         return (-1);
  460 }
  461 
  462 int
  463 casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, u_long newval)
  464 {
  465         struct casueword_arg0 ca;
  466         int res;
  467 
  468         ca.oldval = oldval;
  469         ca.newval = newval;
  470         res = cp_slow0((vm_offset_t)base, sizeof(int32_t), true,
  471             casueword_slow0, &ca);
  472         if (res == 0) {
  473                 *oldvalp = ca.oldval;
  474                 return (ca.res);
  475         }
  476         return (-1);
  477 }

Cache object: 3155d0a4774ddefd089cbf48f65988f8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.