The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/sys_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/10.3/sys/i386/i386/sys_machdep.c 292572 2015-12-21 22:16:09Z jhb $");
   34 
   35 #include "opt_capsicum.h"
   36 #include "opt_kstack_pages.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/capsicum.h>
   40 #include <sys/systm.h>
   41 #include <sys/lock.h>
   42 #include <sys/malloc.h>
   43 #include <sys/mutex.h>
   44 #include <sys/priv.h>
   45 #include <sys/proc.h>
   46 #include <sys/smp.h>
   47 #include <sys/sysproto.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 #include <vm/vm_map.h>
   52 #include <vm/vm_extern.h>
   53 
   54 #include <machine/cpu.h>
   55 #include <machine/pcb.h>
   56 #include <machine/pcb_ext.h>
   57 #include <machine/proc.h>
   58 #include <machine/sysarch.h>
   59 
   60 #include <security/audit/audit.h>
   61 
   62 #ifdef XEN 
   63 #include <machine/xen/xenfunc.h>
   64 
   65 void i386_reset_ldt(struct proc_ldt *pldt); 
   66 
   67 void 
   68 i386_reset_ldt(struct proc_ldt *pldt) 
   69 { 
   70         xen_set_ldt((vm_offset_t)pldt->ldt_base, pldt->ldt_len); 
   71 } 
   72 #else  
   73 #define i386_reset_ldt(x) 
   74 #endif 
   75 
   76 #include <vm/vm_kern.h>         /* for kernel_map */
   77 
   78 #define MAX_LD 8192
   79 #define LD_PER_PAGE 512
   80 #define NEW_MAX_LD(num)  ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
   81 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
   82 #define NULL_LDT_BASE   ((caddr_t)NULL)
   83 
   84 #ifdef SMP
   85 static void set_user_ldt_rv(struct vmspace *vmsp);
   86 #endif
   87 static int i386_set_ldt_data(struct thread *, int start, int num,
   88         union descriptor *descs);
   89 static int i386_ldt_grow(struct thread *td, int len);
   90 
   91 void
   92 fill_based_sd(struct segment_descriptor *sdp, uint32_t base)
   93 {
   94 
   95         sdp->sd_lobase = base & 0xffffff;
   96         sdp->sd_hibase = (base >> 24) & 0xff;
   97 #ifdef XEN
   98         /* need to do nosegneg like Linux */
   99         sdp->sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  100 #else                   
  101         sdp->sd_lolimit = 0xffff;       /* 4GB limit, wraps around */
  102 #endif
  103         sdp->sd_hilimit = 0xf;
  104         sdp->sd_type = SDT_MEMRWA;
  105         sdp->sd_dpl = SEL_UPL;
  106         sdp->sd_p = 1;
  107         sdp->sd_xx = 0;
  108         sdp->sd_def32 = 1;
  109         sdp->sd_gran = 1;
  110 }
  111 
  112 #ifndef _SYS_SYSPROTO_H_
  113 struct sysarch_args {
  114         int op;
  115         char *parms;
  116 };
  117 #endif
  118 
  119 int
  120 sysarch(td, uap)
  121         struct thread *td;
  122         register struct sysarch_args *uap;
  123 {
  124         int error;
  125         union descriptor *lp;
  126         union {
  127                 struct i386_ldt_args largs;
  128                 struct i386_ioperm_args iargs;
  129                 struct i386_get_xfpustate xfpu;
  130         } kargs;
  131         uint32_t base;
  132         struct segment_descriptor sd, *sdp;
  133 
  134         AUDIT_ARG_CMD(uap->op);
  135 
  136 #ifdef CAPABILITY_MODE
  137         /*
  138          * When adding new operations, add a new case statement here to
  139          * explicitly indicate whether or not the operation is safe to
  140          * perform in capability mode.
  141          */
  142         if (IN_CAPABILITY_MODE(td)) {
  143                 switch (uap->op) {
  144                 case I386_GET_LDT:
  145                 case I386_SET_LDT:
  146                 case I386_GET_IOPERM:
  147                 case I386_GET_FSBASE:
  148                 case I386_SET_FSBASE:
  149                 case I386_GET_GSBASE:
  150                 case I386_SET_GSBASE:
  151                 case I386_GET_XFPUSTATE:
  152                         break;
  153 
  154                 case I386_SET_IOPERM:
  155                 default:
  156 #ifdef KTRACE
  157                         if (KTRPOINT(td, KTR_CAPFAIL))
  158                                 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
  159 #endif
  160                         return (ECAPMODE);
  161                 }
  162         }
  163 #endif
  164 
  165         switch (uap->op) {
  166         case I386_GET_IOPERM:
  167         case I386_SET_IOPERM:
  168                 if ((error = copyin(uap->parms, &kargs.iargs,
  169                     sizeof(struct i386_ioperm_args))) != 0)
  170                         return (error);
  171                 break;
  172         case I386_GET_LDT:
  173         case I386_SET_LDT:
  174                 if ((error = copyin(uap->parms, &kargs.largs,
  175                     sizeof(struct i386_ldt_args))) != 0)
  176                         return (error);
  177                 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
  178                         return (EINVAL);
  179                 break;
  180         case I386_GET_XFPUSTATE:
  181                 if ((error = copyin(uap->parms, &kargs.xfpu,
  182                     sizeof(struct i386_get_xfpustate))) != 0)
  183                         return (error);
  184                 break;
  185         default:
  186                 break;
  187         }
  188 
  189         switch(uap->op) {
  190         case I386_GET_LDT:
  191                 error = i386_get_ldt(td, &kargs.largs);
  192                 break;
  193         case I386_SET_LDT:
  194                 if (kargs.largs.descs != NULL) {
  195                         lp = (union descriptor *)malloc(
  196                             kargs.largs.num * sizeof(union descriptor),
  197                             M_TEMP, M_WAITOK);
  198                         error = copyin(kargs.largs.descs, lp,
  199                             kargs.largs.num * sizeof(union descriptor));
  200                         if (error == 0)
  201                                 error = i386_set_ldt(td, &kargs.largs, lp);
  202                         free(lp, M_TEMP);
  203                 } else {
  204                         error = i386_set_ldt(td, &kargs.largs, NULL);
  205                 }
  206                 break;
  207         case I386_GET_IOPERM:
  208                 error = i386_get_ioperm(td, &kargs.iargs);
  209                 if (error == 0)
  210                         error = copyout(&kargs.iargs, uap->parms,
  211                             sizeof(struct i386_ioperm_args));
  212                 break;
  213         case I386_SET_IOPERM:
  214                 error = i386_set_ioperm(td, &kargs.iargs);
  215                 break;
  216         case I386_VM86:
  217                 error = vm86_sysarch(td, uap->parms);
  218                 break;
  219         case I386_GET_FSBASE:
  220                 sdp = &td->td_pcb->pcb_fsd;
  221                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  222                 error = copyout(&base, uap->parms, sizeof(base));
  223                 break;
  224         case I386_SET_FSBASE:
  225                 error = copyin(uap->parms, &base, sizeof(base));
  226                 if (error == 0) {
  227                         /*
  228                          * Construct a descriptor and store it in the pcb for
  229                          * the next context switch.  Also store it in the gdt
  230                          * so that the load of tf_fs into %fs will activate it
  231                          * at return to userland.
  232                          */
  233                         fill_based_sd(&sd, base);
  234                         critical_enter();
  235                         td->td_pcb->pcb_fsd = sd;
  236 #ifdef XEN
  237                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[0]),
  238                             *(uint64_t *)&sd);
  239 #else
  240                         PCPU_GET(fsgs_gdt)[0] = sd;
  241 #endif
  242                         critical_exit();
  243                         td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
  244                 }
  245                 break;
  246         case I386_GET_GSBASE:
  247                 sdp = &td->td_pcb->pcb_gsd;
  248                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  249                 error = copyout(&base, uap->parms, sizeof(base));
  250                 break;
  251         case I386_SET_GSBASE:
  252                 error = copyin(uap->parms, &base, sizeof(base));
  253                 if (error == 0) {
  254                         /*
  255                          * Construct a descriptor and store it in the pcb for
  256                          * the next context switch.  Also store it in the gdt
  257                          * because we have to do a load_gs() right now.
  258                          */
  259                         fill_based_sd(&sd, base);
  260                         critical_enter();
  261                         td->td_pcb->pcb_gsd = sd;
  262 #ifdef XEN
  263                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[1]),
  264                             *(uint64_t *)&sd);
  265 #else                   
  266                         PCPU_GET(fsgs_gdt)[1] = sd;
  267 #endif
  268                         critical_exit();
  269                         load_gs(GSEL(GUGS_SEL, SEL_UPL));
  270                 }
  271                 break;
  272         case I386_GET_XFPUSTATE:
  273                 if (kargs.xfpu.len > cpu_max_ext_state_size -
  274                     sizeof(union savefpu))
  275                         return (EINVAL);
  276                 npxgetregs(td);
  277                 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
  278                     kargs.xfpu.addr, kargs.xfpu.len);
  279                 break;
  280         default:
  281                 error = EINVAL;
  282                 break;
  283         }
  284         return (error);
  285 }
  286 
  287 int
  288 i386_extend_pcb(struct thread *td)
  289 {
  290         int i, offset;
  291         u_long *addr;
  292         struct pcb_ext *ext;
  293         struct soft_segment_descriptor ssd = {
  294                 0,                      /* segment base address (overwritten) */
  295                 ctob(IOPAGES + 1) - 1,  /* length */
  296                 SDT_SYS386TSS,          /* segment type */
  297                 0,                      /* priority level */
  298                 1,                      /* descriptor present */
  299                 0, 0,
  300                 0,                      /* default 32 size */
  301                 0                       /* granularity */
  302         };
  303 
  304         ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
  305             M_WAITOK | M_ZERO);
  306         /* -16 is so we can convert a trapframe into vm86trapframe inplace */
  307         ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16;
  308         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  309         /*
  310          * The last byte of the i/o map must be followed by an 0xff byte.
  311          * We arbitrarily allocate 16 bytes here, to keep the starting
  312          * address on a doubleword boundary.
  313          */
  314         offset = PAGE_SIZE - 16;
  315         ext->ext_tss.tss_ioopt = 
  316             (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
  317         ext->ext_iomap = (caddr_t)ext + offset;
  318         ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
  319 
  320         addr = (u_long *)ext->ext_vm86.vm86_intmap;
  321         for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
  322                 *addr++ = ~0;
  323 
  324         ssd.ssd_base = (unsigned)&ext->ext_tss;
  325         ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
  326         ssdtosd(&ssd, &ext->ext_tssd);
  327 
  328         KASSERT(td == curthread, ("giving TSS to !curthread"));
  329         KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
  330 
  331         /* Switch to the new TSS. */
  332         critical_enter();
  333         td->td_pcb->pcb_ext = ext;
  334         PCPU_SET(private_tss, 1);
  335         *PCPU_GET(tss_gdt) = ext->ext_tssd;
  336         ltr(GSEL(GPROC0_SEL, SEL_KPL));
  337         critical_exit();
  338 
  339         return 0;
  340 }
  341 
  342 int
  343 i386_set_ioperm(td, uap)
  344         struct thread *td;
  345         struct i386_ioperm_args *uap;
  346 {
  347         int i, error;
  348         char *iomap;
  349 
  350         if ((error = priv_check(td, PRIV_IO)) != 0)
  351                 return (error);
  352         if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
  353                 return (error);
  354         /*
  355          * XXX 
  356          * While this is restricted to root, we should probably figure out
  357          * whether any other driver is using this i/o address, as so not to
  358          * cause confusion.  This probably requires a global 'usage registry'.
  359          */
  360 
  361         if (td->td_pcb->pcb_ext == 0)
  362                 if ((error = i386_extend_pcb(td)) != 0)
  363                         return (error);
  364         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  365 
  366         if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
  367                 return (EINVAL);
  368 
  369         for (i = uap->start; i < uap->start + uap->length; i++) {
  370                 if (uap->enable)
  371                         iomap[i >> 3] &= ~(1 << (i & 7));
  372                 else
  373                         iomap[i >> 3] |= (1 << (i & 7));
  374         }
  375         return (error);
  376 }
  377 
  378 int
  379 i386_get_ioperm(td, uap)
  380         struct thread *td;
  381         struct i386_ioperm_args *uap;
  382 {
  383         int i, state;
  384         char *iomap;
  385 
  386         if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
  387                 return (EINVAL);
  388 
  389         if (td->td_pcb->pcb_ext == 0) {
  390                 uap->length = 0;
  391                 goto done;
  392         }
  393 
  394         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  395 
  396         i = uap->start;
  397         state = (iomap[i >> 3] >> (i & 7)) & 1;
  398         uap->enable = !state;
  399         uap->length = 1;
  400 
  401         for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
  402                 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
  403                         break;
  404                 uap->length++;
  405         }
  406 
  407 done:
  408         return (0);
  409 }
  410 
  411 /*
  412  * Update the GDT entry pointing to the LDT to point to the LDT of the
  413  * current process. Manage dt_lock holding/unholding autonomously.
  414  */   
  415 void
  416 set_user_ldt(struct mdproc *mdp)
  417 {
  418         struct proc_ldt *pldt;
  419         int dtlocked;
  420 
  421         dtlocked = 0;
  422         if (!mtx_owned(&dt_lock)) {
  423                 mtx_lock_spin(&dt_lock);
  424                 dtlocked = 1;
  425         }
  426 
  427         pldt = mdp->md_ldt;
  428 #ifdef XEN
  429         i386_reset_ldt(pldt);
  430         PCPU_SET(currentldt, (int)pldt);
  431 #else   
  432 #ifdef SMP
  433         gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
  434 #else
  435         gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
  436 #endif
  437         lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
  438         PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
  439 #endif /* XEN */ 
  440         if (dtlocked)
  441                 mtx_unlock_spin(&dt_lock);
  442 }
  443 
  444 #ifdef SMP
  445 static void
  446 set_user_ldt_rv(struct vmspace *vmsp)
  447 {
  448         struct thread *td;
  449 
  450         td = curthread;
  451         if (vmsp != td->td_proc->p_vmspace)
  452                 return;
  453 
  454         set_user_ldt(&td->td_proc->p_md);
  455 }
  456 #endif
  457 
  458 #ifdef XEN
  459 
  460 /* 
  461  * dt_lock must be held. Returns with dt_lock held. 
  462  */ 
  463 struct proc_ldt * 
  464 user_ldt_alloc(struct mdproc *mdp, int len) 
  465 { 
  466         struct proc_ldt *pldt, *new_ldt; 
  467  
  468         mtx_assert(&dt_lock, MA_OWNED); 
  469         mtx_unlock_spin(&dt_lock); 
  470         new_ldt = malloc(sizeof(struct proc_ldt), 
  471                 M_SUBPROC, M_WAITOK); 
  472  
  473         new_ldt->ldt_len = len = NEW_MAX_LD(len); 
  474         new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, 
  475             round_page(len * sizeof(union descriptor)), M_WAITOK);
  476         new_ldt->ldt_refcnt = 1; 
  477         new_ldt->ldt_active = 0; 
  478  
  479         mtx_lock_spin(&dt_lock);
  480         if ((pldt = mdp->md_ldt)) { 
  481                 if (len > pldt->ldt_len) 
  482                         len = pldt->ldt_len; 
  483                 bcopy(pldt->ldt_base, new_ldt->ldt_base, 
  484                     len * sizeof(union descriptor)); 
  485         } else { 
  486                 bcopy(ldt, new_ldt->ldt_base, PAGE_SIZE); 
  487         } 
  488         mtx_unlock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  489         pmap_map_readonly(kernel_pmap, (vm_offset_t)new_ldt->ldt_base, 
  490                           new_ldt->ldt_len*sizeof(union descriptor)); 
  491         mtx_lock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  492         return (new_ldt);
  493 } 
  494 #else
  495 /*
  496  * dt_lock must be held. Returns with dt_lock held.
  497  */
  498 struct proc_ldt *
  499 user_ldt_alloc(struct mdproc *mdp, int len)
  500 {
  501         struct proc_ldt *pldt, *new_ldt;
  502 
  503         mtx_assert(&dt_lock, MA_OWNED);
  504         mtx_unlock_spin(&dt_lock);
  505         new_ldt = malloc(sizeof(struct proc_ldt),
  506                 M_SUBPROC, M_WAITOK);
  507 
  508         new_ldt->ldt_len = len = NEW_MAX_LD(len);
  509         new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
  510             len * sizeof(union descriptor), M_WAITOK);
  511         new_ldt->ldt_refcnt = 1;
  512         new_ldt->ldt_active = 0;
  513 
  514         mtx_lock_spin(&dt_lock);
  515         gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
  516         gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
  517         ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
  518 
  519         if ((pldt = mdp->md_ldt) != NULL) {
  520                 if (len > pldt->ldt_len)
  521                         len = pldt->ldt_len;
  522                 bcopy(pldt->ldt_base, new_ldt->ldt_base,
  523                     len * sizeof(union descriptor));
  524         } else
  525                 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
  526         
  527         return (new_ldt);
  528 }
  529 #endif /* !XEN */
  530 
  531 /*
  532  * Must be called with dt_lock held.  Returns with dt_lock unheld.
  533  */
  534 void
  535 user_ldt_free(struct thread *td)
  536 {
  537         struct mdproc *mdp = &td->td_proc->p_md;
  538         struct proc_ldt *pldt;
  539 
  540         mtx_assert(&dt_lock, MA_OWNED);
  541         if ((pldt = mdp->md_ldt) == NULL) {
  542                 mtx_unlock_spin(&dt_lock);
  543                 return;
  544         }
  545 
  546         if (td == curthread) {
  547 #ifdef XEN
  548                 i386_reset_ldt(&default_proc_ldt);
  549                 PCPU_SET(currentldt, (int)&default_proc_ldt);
  550 #else
  551                 lldt(_default_ldt);
  552                 PCPU_SET(currentldt, _default_ldt);
  553 #endif
  554         }
  555 
  556         mdp->md_ldt = NULL;
  557         user_ldt_deref(pldt);
  558 }
  559 
  560 void
  561 user_ldt_deref(struct proc_ldt *pldt)
  562 {
  563 
  564         mtx_assert(&dt_lock, MA_OWNED);
  565         if (--pldt->ldt_refcnt == 0) {
  566                 mtx_unlock_spin(&dt_lock);
  567                 kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
  568                         pldt->ldt_len * sizeof(union descriptor));
  569                 free(pldt, M_SUBPROC);
  570         } else
  571                 mtx_unlock_spin(&dt_lock);
  572 }
  573 
  574 /*
  575  * Note for the authors of compat layers (linux, etc): copyout() in
  576  * the function below is not a problem since it presents data in
  577  * arch-specific format (i.e. i386-specific in this case), not in
  578  * the OS-specific one.
  579  */
  580 int
  581 i386_get_ldt(td, uap)
  582         struct thread *td;
  583         struct i386_ldt_args *uap;
  584 {
  585         int error = 0;
  586         struct proc_ldt *pldt;
  587         int nldt, num;
  588         union descriptor *lp;
  589 
  590 #ifdef  DEBUG
  591         printf("i386_get_ldt: start=%d num=%d descs=%p\n",
  592             uap->start, uap->num, (void *)uap->descs);
  593 #endif
  594 
  595         mtx_lock_spin(&dt_lock);
  596         if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
  597                 nldt = pldt->ldt_len;
  598                 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
  599                 mtx_unlock_spin(&dt_lock);
  600                 num = min(uap->num, nldt);
  601         } else {
  602                 mtx_unlock_spin(&dt_lock);
  603                 nldt = sizeof(ldt)/sizeof(ldt[0]);
  604                 num = min(uap->num, nldt);
  605                 lp = &ldt[uap->start];
  606         }
  607 
  608         if ((uap->start > (unsigned int)nldt) ||
  609             ((unsigned int)num > (unsigned int)nldt) ||
  610             ((unsigned int)(uap->start + num) > (unsigned int)nldt))
  611                 return(EINVAL);
  612 
  613         error = copyout(lp, uap->descs, num * sizeof(union descriptor));
  614         if (!error)
  615                 td->td_retval[0] = num;
  616 
  617         return(error);
  618 }
  619 
  620 int
  621 i386_set_ldt(td, uap, descs)
  622         struct thread *td;
  623         struct i386_ldt_args *uap;
  624         union descriptor *descs;
  625 {
  626         int error = 0, i;
  627         int largest_ld;
  628         struct mdproc *mdp = &td->td_proc->p_md;
  629         struct proc_ldt *pldt;
  630         union descriptor *dp;
  631 
  632 #ifdef  DEBUG
  633         printf("i386_set_ldt: start=%d num=%d descs=%p\n",
  634             uap->start, uap->num, (void *)uap->descs);
  635 #endif
  636 
  637         if (descs == NULL) {
  638                 /* Free descriptors */
  639                 if (uap->start == 0 && uap->num == 0) {
  640                         /*
  641                          * Treat this as a special case, so userland needn't
  642                          * know magic number NLDT.
  643                          */
  644                         uap->start = NLDT;
  645                         uap->num = MAX_LD - NLDT;
  646                 }
  647                 if (uap->num == 0)
  648                         return (EINVAL);
  649                 mtx_lock_spin(&dt_lock);
  650                 if ((pldt = mdp->md_ldt) == NULL ||
  651                     uap->start >= pldt->ldt_len) {
  652                         mtx_unlock_spin(&dt_lock);
  653                         return (0);
  654                 }
  655                 largest_ld = uap->start + uap->num;
  656                 if (largest_ld > pldt->ldt_len)
  657                         largest_ld = pldt->ldt_len;
  658                 i = largest_ld - uap->start;
  659                 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
  660                     sizeof(union descriptor) * i);
  661                 mtx_unlock_spin(&dt_lock);
  662                 return (0);
  663         }
  664 
  665         if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
  666                 /* verify range of descriptors to modify */
  667                 largest_ld = uap->start + uap->num;
  668                 if (uap->start >= MAX_LD || largest_ld > MAX_LD) {
  669                         return (EINVAL);
  670                 }
  671         }
  672 
  673         /* Check descriptors for access violations */
  674         for (i = 0; i < uap->num; i++) {
  675                 dp = &descs[i];
  676 
  677                 switch (dp->sd.sd_type) {
  678                 case SDT_SYSNULL:       /* system null */ 
  679                         dp->sd.sd_p = 0;
  680                         break;
  681                 case SDT_SYS286TSS: /* system 286 TSS available */
  682                 case SDT_SYSLDT:    /* system local descriptor table */
  683                 case SDT_SYS286BSY: /* system 286 TSS busy */
  684                 case SDT_SYSTASKGT: /* system task gate */
  685                 case SDT_SYS286IGT: /* system 286 interrupt gate */
  686                 case SDT_SYS286TGT: /* system 286 trap gate */
  687                 case SDT_SYSNULL2:  /* undefined by Intel */ 
  688                 case SDT_SYS386TSS: /* system 386 TSS available */
  689                 case SDT_SYSNULL3:  /* undefined by Intel */
  690                 case SDT_SYS386BSY: /* system 386 TSS busy */
  691                 case SDT_SYSNULL4:  /* undefined by Intel */ 
  692                 case SDT_SYS386IGT: /* system 386 interrupt gate */
  693                 case SDT_SYS386TGT: /* system 386 trap gate */
  694                 case SDT_SYS286CGT: /* system 286 call gate */ 
  695                 case SDT_SYS386CGT: /* system 386 call gate */
  696                         /* I can't think of any reason to allow a user proc
  697                          * to create a segment of these types.  They are
  698                          * for OS use only.
  699                          */
  700                         return (EACCES);
  701                         /*NOTREACHED*/
  702 
  703                 /* memory segment types */
  704                 case SDT_MEMEC:   /* memory execute only conforming */
  705                 case SDT_MEMEAC:  /* memory execute only accessed conforming */
  706                 case SDT_MEMERC:  /* memory execute read conforming */
  707                 case SDT_MEMERAC: /* memory execute read accessed conforming */
  708                          /* Must be "present" if executable and conforming. */
  709                         if (dp->sd.sd_p == 0)
  710                                 return (EACCES);
  711                         break;
  712                 case SDT_MEMRO:   /* memory read only */
  713                 case SDT_MEMROA:  /* memory read only accessed */
  714                 case SDT_MEMRW:   /* memory read write */
  715                 case SDT_MEMRWA:  /* memory read write accessed */
  716                 case SDT_MEMROD:  /* memory read only expand dwn limit */
  717                 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
  718                 case SDT_MEMRWD:  /* memory read write expand dwn limit */  
  719                 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
  720                 case SDT_MEME:    /* memory execute only */ 
  721                 case SDT_MEMEA:   /* memory execute only accessed */
  722                 case SDT_MEMER:   /* memory execute read */
  723                 case SDT_MEMERA:  /* memory execute read accessed */
  724                         break;
  725                 default:
  726                         return(EINVAL);
  727                         /*NOTREACHED*/
  728                 }
  729 
  730                 /* Only user (ring-3) descriptors may be present. */
  731                 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
  732                         return (EACCES);
  733         }
  734 
  735         if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
  736                 /* Allocate a free slot */
  737                 mtx_lock_spin(&dt_lock);
  738                 if ((pldt = mdp->md_ldt) == NULL) {
  739                         if ((error = i386_ldt_grow(td, NLDT + 1))) {
  740                                 mtx_unlock_spin(&dt_lock);
  741                                 return (error);
  742                         }
  743                         pldt = mdp->md_ldt;
  744                 }
  745 again:
  746                 /*
  747                  * start scanning a bit up to leave room for NVidia and
  748                  * Wine, which still user the "Blat" method of allocation.
  749                  */
  750                 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
  751                 for (i = NLDT; i < pldt->ldt_len; ++i) {
  752                         if (dp->sd.sd_type == SDT_SYSNULL)
  753                                 break;
  754                         dp++;
  755                 }
  756                 if (i >= pldt->ldt_len) {
  757                         if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
  758                                 mtx_unlock_spin(&dt_lock);
  759                                 return (error);
  760                         }
  761                         goto again;
  762                 }
  763                 uap->start = i;
  764                 error = i386_set_ldt_data(td, i, 1, descs);
  765                 mtx_unlock_spin(&dt_lock);
  766         } else {
  767                 largest_ld = uap->start + uap->num;
  768                 mtx_lock_spin(&dt_lock);
  769                 if (!(error = i386_ldt_grow(td, largest_ld))) {
  770                         error = i386_set_ldt_data(td, uap->start, uap->num,
  771                             descs);
  772                 }
  773                 mtx_unlock_spin(&dt_lock);
  774         }
  775         if (error == 0)
  776                 td->td_retval[0] = uap->start;
  777         return (error);
  778 }
  779 #ifdef XEN
  780 static int
  781 i386_set_ldt_data(struct thread *td, int start, int num,
  782         union descriptor *descs)
  783 {
  784         struct mdproc *mdp = &td->td_proc->p_md;
  785         struct proc_ldt *pldt = mdp->md_ldt;
  786 
  787         mtx_assert(&dt_lock, MA_OWNED);
  788 
  789         while (num) {
  790                 xen_update_descriptor(
  791                     &((union descriptor *)(pldt->ldt_base))[start],
  792                     descs);
  793                 num--;
  794                 start++;
  795                 descs++;
  796         }
  797         return (0);
  798 }
  799 #else
  800 static int
  801 i386_set_ldt_data(struct thread *td, int start, int num,
  802         union descriptor *descs)
  803 {
  804         struct mdproc *mdp = &td->td_proc->p_md;
  805         struct proc_ldt *pldt = mdp->md_ldt;
  806 
  807         mtx_assert(&dt_lock, MA_OWNED);
  808 
  809         /* Fill in range */
  810         bcopy(descs,
  811             &((union descriptor *)(pldt->ldt_base))[start],
  812             num * sizeof(union descriptor));
  813         return (0);
  814 }
  815 #endif /* !XEN */
  816 
  817 static int
  818 i386_ldt_grow(struct thread *td, int len) 
  819 {
  820         struct mdproc *mdp = &td->td_proc->p_md;
  821         struct proc_ldt *new_ldt, *pldt;
  822         caddr_t old_ldt_base = NULL_LDT_BASE;
  823         int old_ldt_len = 0;
  824 
  825         mtx_assert(&dt_lock, MA_OWNED);
  826 
  827         if (len > MAX_LD)
  828                 return (ENOMEM);
  829         if (len < NLDT + 1)
  830                 len = NLDT + 1;
  831 
  832         /* Allocate a user ldt. */
  833         if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
  834                 new_ldt = user_ldt_alloc(mdp, len);
  835                 if (new_ldt == NULL)
  836                         return (ENOMEM);
  837                 pldt = mdp->md_ldt;
  838 
  839                 if (pldt != NULL) {
  840                         if (new_ldt->ldt_len <= pldt->ldt_len) {
  841                                 /*
  842                                  * We just lost the race for allocation, so
  843                                  * free the new object and return.
  844                                  */
  845                                 mtx_unlock_spin(&dt_lock);
  846                                 kmem_free(kernel_arena,
  847                                    (vm_offset_t)new_ldt->ldt_base,
  848                                    new_ldt->ldt_len * sizeof(union descriptor));
  849                                 free(new_ldt, M_SUBPROC);
  850                                 mtx_lock_spin(&dt_lock);
  851                                 return (0);
  852                         }
  853 
  854                         /*
  855                          * We have to substitute the current LDT entry for
  856                          * curproc with the new one since its size grew.
  857                          */
  858                         old_ldt_base = pldt->ldt_base;
  859                         old_ldt_len = pldt->ldt_len;
  860                         pldt->ldt_sd = new_ldt->ldt_sd;
  861                         pldt->ldt_base = new_ldt->ldt_base;
  862                         pldt->ldt_len = new_ldt->ldt_len;
  863                 } else
  864                         mdp->md_ldt = pldt = new_ldt;
  865 #ifdef SMP
  866                 /*
  867                  * Signal other cpus to reload ldt.  We need to unlock dt_lock
  868                  * here because other CPU will contest on it since their
  869                  * curthreads won't hold the lock and will block when trying
  870                  * to acquire it.
  871                  */
  872                 mtx_unlock_spin(&dt_lock);
  873                 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
  874                     NULL, td->td_proc->p_vmspace);
  875 #else
  876                 set_user_ldt(&td->td_proc->p_md);
  877                 mtx_unlock_spin(&dt_lock);
  878 #endif
  879                 if (old_ldt_base != NULL_LDT_BASE) {
  880                         kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
  881                             old_ldt_len * sizeof(union descriptor));
  882                         free(new_ldt, M_SUBPROC);
  883                 }
  884                 mtx_lock_spin(&dt_lock);
  885         }
  886         return (0);
  887 }

Cache object: bb05033620e969b3b2dfacb37d90e055


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.