The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/sys_machdep.c

Version: -  FREEBSD  -  FREEBSD11  -  FREEBSD10  -  FREEBSD9  -  FREEBSD92  -  FREEBSD91  -  FREEBSD90  -  FREEBSD8  -  FREEBSD82  -  FREEBSD81  -  FREEBSD80  -  FREEBSD7  -  FREEBSD74  -  FREEBSD73  -  FREEBSD72  -  FREEBSD71  -  FREEBSD70  -  FREEBSD6  -  FREEBSD64  -  FREEBSD63  -  FREEBSD62  -  FREEBSD61  -  FREEBSD60  -  FREEBSD5  -  FREEBSD55  -  FREEBSD54  -  FREEBSD53  -  FREEBSD52  -  FREEBSD51  -  FREEBSD50  -  FREEBSD4  -  FREEBSD3  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: stable/10/sys/i386/i386/sys_machdep.c 306961 2016-10-10 11:53:54Z tijl $");
   34 
   35 #include "opt_capsicum.h"
   36 #include "opt_kstack_pages.h"
   37 
   38 #include <sys/param.h>
   39 #include <sys/capsicum.h>
   40 #include <sys/systm.h>
   41 #include <sys/lock.h>
   42 #include <sys/malloc.h>
   43 #include <sys/mutex.h>
   44 #include <sys/priv.h>
   45 #include <sys/proc.h>
   46 #include <sys/smp.h>
   47 #include <sys/sysproto.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 #include <vm/vm_map.h>
   52 #include <vm/vm_extern.h>
   53 
   54 #include <machine/cpu.h>
   55 #include <machine/pcb.h>
   56 #include <machine/pcb_ext.h>
   57 #include <machine/proc.h>
   58 #include <machine/sysarch.h>
   59 
   60 #include <security/audit/audit.h>
   61 
   62 #ifdef XEN 
   63 #include <machine/xen/xenfunc.h>
   64 
   65 void i386_reset_ldt(struct proc_ldt *pldt); 
   66 
   67 void 
   68 i386_reset_ldt(struct proc_ldt *pldt) 
   69 { 
   70         xen_set_ldt((vm_offset_t)pldt->ldt_base, pldt->ldt_len); 
   71 } 
   72 #else  
   73 #define i386_reset_ldt(x) 
   74 #endif 
   75 
   76 #include <vm/vm_kern.h>         /* for kernel_map */
   77 
   78 #define MAX_LD 8192
   79 #define LD_PER_PAGE 512
   80 #define NEW_MAX_LD(num)  ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
   81 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
   82 #define NULL_LDT_BASE   ((caddr_t)NULL)
   83 
   84 #ifdef SMP
   85 static void set_user_ldt_rv(struct vmspace *vmsp);
   86 #endif
   87 static int i386_set_ldt_data(struct thread *, int start, int num,
   88         union descriptor *descs);
   89 static int i386_ldt_grow(struct thread *td, int len);
   90 
   91 void
   92 fill_based_sd(struct segment_descriptor *sdp, uint32_t base)
   93 {
   94 
   95         sdp->sd_lobase = base & 0xffffff;
   96         sdp->sd_hibase = (base >> 24) & 0xff;
   97 #ifdef XEN
   98         /* need to do nosegneg like Linux */
   99         sdp->sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  100 #else                   
  101         sdp->sd_lolimit = 0xffff;       /* 4GB limit, wraps around */
  102 #endif
  103         sdp->sd_hilimit = 0xf;
  104         sdp->sd_type = SDT_MEMRWA;
  105         sdp->sd_dpl = SEL_UPL;
  106         sdp->sd_p = 1;
  107         sdp->sd_xx = 0;
  108         sdp->sd_def32 = 1;
  109         sdp->sd_gran = 1;
  110 }
  111 
  112 #ifndef _SYS_SYSPROTO_H_
  113 struct sysarch_args {
  114         int op;
  115         char *parms;
  116 };
  117 #endif
  118 
  119 int
  120 sysarch(td, uap)
  121         struct thread *td;
  122         register struct sysarch_args *uap;
  123 {
  124         int error;
  125         union descriptor *lp;
  126         union {
  127                 struct i386_ldt_args largs;
  128                 struct i386_ioperm_args iargs;
  129                 struct i386_get_xfpustate xfpu;
  130         } kargs;
  131         uint32_t base;
  132         struct segment_descriptor sd, *sdp;
  133 
  134         AUDIT_ARG_CMD(uap->op);
  135 
  136 #ifdef CAPABILITY_MODE
  137         /*
  138          * When adding new operations, add a new case statement here to
  139          * explicitly indicate whether or not the operation is safe to
  140          * perform in capability mode.
  141          */
  142         if (IN_CAPABILITY_MODE(td)) {
  143                 switch (uap->op) {
  144                 case I386_GET_LDT:
  145                 case I386_SET_LDT:
  146                 case I386_GET_IOPERM:
  147                 case I386_GET_FSBASE:
  148                 case I386_SET_FSBASE:
  149                 case I386_GET_GSBASE:
  150                 case I386_SET_GSBASE:
  151                 case I386_GET_XFPUSTATE:
  152                         break;
  153 
  154                 case I386_SET_IOPERM:
  155                 default:
  156 #ifdef KTRACE
  157                         if (KTRPOINT(td, KTR_CAPFAIL))
  158                                 ktrcapfail(CAPFAIL_SYSCALL, NULL, NULL);
  159 #endif
  160                         return (ECAPMODE);
  161                 }
  162         }
  163 #endif
  164 
  165         switch (uap->op) {
  166         case I386_GET_IOPERM:
  167         case I386_SET_IOPERM:
  168                 if ((error = copyin(uap->parms, &kargs.iargs,
  169                     sizeof(struct i386_ioperm_args))) != 0)
  170                         return (error);
  171                 break;
  172         case I386_GET_LDT:
  173         case I386_SET_LDT:
  174                 if ((error = copyin(uap->parms, &kargs.largs,
  175                     sizeof(struct i386_ldt_args))) != 0)
  176                         return (error);
  177                 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
  178                         return (EINVAL);
  179                 break;
  180         case I386_GET_XFPUSTATE:
  181                 if ((error = copyin(uap->parms, &kargs.xfpu,
  182                     sizeof(struct i386_get_xfpustate))) != 0)
  183                         return (error);
  184                 break;
  185         default:
  186                 break;
  187         }
  188 
  189         switch(uap->op) {
  190         case I386_GET_LDT:
  191                 error = i386_get_ldt(td, &kargs.largs);
  192                 break;
  193         case I386_SET_LDT:
  194                 if (kargs.largs.descs != NULL) {
  195                         lp = (union descriptor *)malloc(
  196                             kargs.largs.num * sizeof(union descriptor),
  197                             M_TEMP, M_WAITOK);
  198                         error = copyin(kargs.largs.descs, lp,
  199                             kargs.largs.num * sizeof(union descriptor));
  200                         if (error == 0)
  201                                 error = i386_set_ldt(td, &kargs.largs, lp);
  202                         free(lp, M_TEMP);
  203                 } else {
  204                         error = i386_set_ldt(td, &kargs.largs, NULL);
  205                 }
  206                 break;
  207         case I386_GET_IOPERM:
  208                 error = i386_get_ioperm(td, &kargs.iargs);
  209                 if (error == 0)
  210                         error = copyout(&kargs.iargs, uap->parms,
  211                             sizeof(struct i386_ioperm_args));
  212                 break;
  213         case I386_SET_IOPERM:
  214                 error = i386_set_ioperm(td, &kargs.iargs);
  215                 break;
  216         case I386_VM86:
  217                 error = vm86_sysarch(td, uap->parms);
  218                 break;
  219         case I386_GET_FSBASE:
  220                 sdp = &td->td_pcb->pcb_fsd;
  221                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  222                 error = copyout(&base, uap->parms, sizeof(base));
  223                 break;
  224         case I386_SET_FSBASE:
  225                 error = copyin(uap->parms, &base, sizeof(base));
  226                 if (error == 0) {
  227                         /*
  228                          * Construct a descriptor and store it in the pcb for
  229                          * the next context switch.  Also store it in the gdt
  230                          * so that the load of tf_fs into %fs will activate it
  231                          * at return to userland.
  232                          */
  233                         fill_based_sd(&sd, base);
  234                         critical_enter();
  235                         td->td_pcb->pcb_fsd = sd;
  236 #ifdef XEN
  237                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[0]),
  238                             *(uint64_t *)&sd);
  239 #else
  240                         PCPU_GET(fsgs_gdt)[0] = sd;
  241 #endif
  242                         critical_exit();
  243                         td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
  244                 }
  245                 break;
  246         case I386_GET_GSBASE:
  247                 sdp = &td->td_pcb->pcb_gsd;
  248                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  249                 error = copyout(&base, uap->parms, sizeof(base));
  250                 break;
  251         case I386_SET_GSBASE:
  252                 error = copyin(uap->parms, &base, sizeof(base));
  253                 if (error == 0) {
  254                         /*
  255                          * Construct a descriptor and store it in the pcb for
  256                          * the next context switch.  Also store it in the gdt
  257                          * because we have to do a load_gs() right now.
  258                          */
  259                         fill_based_sd(&sd, base);
  260                         critical_enter();
  261                         td->td_pcb->pcb_gsd = sd;
  262 #ifdef XEN
  263                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[1]),
  264                             *(uint64_t *)&sd);
  265 #else                   
  266                         PCPU_GET(fsgs_gdt)[1] = sd;
  267 #endif
  268                         critical_exit();
  269                         load_gs(GSEL(GUGS_SEL, SEL_UPL));
  270                 }
  271                 break;
  272         case I386_GET_XFPUSTATE:
  273                 if (kargs.xfpu.len > cpu_max_ext_state_size -
  274                     sizeof(union savefpu))
  275                         return (EINVAL);
  276                 npxgetregs(td);
  277                 error = copyout((char *)(get_pcb_user_save_td(td) + 1),
  278                     kargs.xfpu.addr, kargs.xfpu.len);
  279                 break;
  280         default:
  281                 error = EINVAL;
  282                 break;
  283         }
  284         return (error);
  285 }
  286 
  287 int
  288 i386_extend_pcb(struct thread *td)
  289 {
  290         int i, offset;
  291         u_long *addr;
  292         struct pcb_ext *ext;
  293         struct soft_segment_descriptor ssd = {
  294                 0,                      /* segment base address (overwritten) */
  295                 ctob(IOPAGES + 1) - 1,  /* length */
  296                 SDT_SYS386TSS,          /* segment type */
  297                 0,                      /* priority level */
  298                 1,                      /* descriptor present */
  299                 0, 0,
  300                 0,                      /* default 32 size */
  301                 0                       /* granularity */
  302         };
  303 
  304         ext = (struct pcb_ext *)kmem_malloc(kernel_arena, ctob(IOPAGES+1),
  305             M_WAITOK | M_ZERO);
  306         /* -16 is so we can convert a trapframe into vm86trapframe inplace */
  307         ext->ext_tss.tss_esp0 = (vm_offset_t)td->td_pcb - 16;
  308         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  309         /*
  310          * The last byte of the i/o map must be followed by an 0xff byte.
  311          * We arbitrarily allocate 16 bytes here, to keep the starting
  312          * address on a doubleword boundary.
  313          */
  314         offset = PAGE_SIZE - 16;
  315         ext->ext_tss.tss_ioopt = 
  316             (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
  317         ext->ext_iomap = (caddr_t)ext + offset;
  318         ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
  319 
  320         addr = (u_long *)ext->ext_vm86.vm86_intmap;
  321         for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
  322                 *addr++ = ~0;
  323 
  324         ssd.ssd_base = (unsigned)&ext->ext_tss;
  325         ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
  326         ssdtosd(&ssd, &ext->ext_tssd);
  327 
  328         KASSERT(td == curthread, ("giving TSS to !curthread"));
  329         KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
  330 
  331         /* Switch to the new TSS. */
  332         critical_enter();
  333         td->td_pcb->pcb_ext = ext;
  334         PCPU_SET(private_tss, 1);
  335         *PCPU_GET(tss_gdt) = ext->ext_tssd;
  336         ltr(GSEL(GPROC0_SEL, SEL_KPL));
  337         critical_exit();
  338 
  339         return 0;
  340 }
  341 
  342 int
  343 i386_set_ioperm(td, uap)
  344         struct thread *td;
  345         struct i386_ioperm_args *uap;
  346 {
  347         char *iomap;
  348         u_int i;
  349         int error;
  350 
  351         if ((error = priv_check(td, PRIV_IO)) != 0)
  352                 return (error);
  353         if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
  354                 return (error);
  355         /*
  356          * XXX 
  357          * While this is restricted to root, we should probably figure out
  358          * whether any other driver is using this i/o address, as so not to
  359          * cause confusion.  This probably requires a global 'usage registry'.
  360          */
  361 
  362         if (td->td_pcb->pcb_ext == 0)
  363                 if ((error = i386_extend_pcb(td)) != 0)
  364                         return (error);
  365         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  366 
  367         if (uap->start > uap->start + uap->length ||
  368             uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
  369                 return (EINVAL);
  370 
  371         for (i = uap->start; i < uap->start + uap->length; i++) {
  372                 if (uap->enable)
  373                         iomap[i >> 3] &= ~(1 << (i & 7));
  374                 else
  375                         iomap[i >> 3] |= (1 << (i & 7));
  376         }
  377         return (error);
  378 }
  379 
  380 int
  381 i386_get_ioperm(td, uap)
  382         struct thread *td;
  383         struct i386_ioperm_args *uap;
  384 {
  385         int i, state;
  386         char *iomap;
  387 
  388         if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
  389                 return (EINVAL);
  390 
  391         if (td->td_pcb->pcb_ext == 0) {
  392                 uap->length = 0;
  393                 goto done;
  394         }
  395 
  396         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  397 
  398         i = uap->start;
  399         state = (iomap[i >> 3] >> (i & 7)) & 1;
  400         uap->enable = !state;
  401         uap->length = 1;
  402 
  403         for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
  404                 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
  405                         break;
  406                 uap->length++;
  407         }
  408 
  409 done:
  410         return (0);
  411 }
  412 
  413 /*
  414  * Update the GDT entry pointing to the LDT to point to the LDT of the
  415  * current process. Manage dt_lock holding/unholding autonomously.
  416  */   
  417 void
  418 set_user_ldt(struct mdproc *mdp)
  419 {
  420         struct proc_ldt *pldt;
  421         int dtlocked;
  422 
  423         dtlocked = 0;
  424         if (!mtx_owned(&dt_lock)) {
  425                 mtx_lock_spin(&dt_lock);
  426                 dtlocked = 1;
  427         }
  428 
  429         pldt = mdp->md_ldt;
  430 #ifdef XEN
  431         i386_reset_ldt(pldt);
  432         PCPU_SET(currentldt, (int)pldt);
  433 #else   
  434 #ifdef SMP
  435         gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
  436 #else
  437         gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
  438 #endif
  439         lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
  440         PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
  441 #endif /* XEN */ 
  442         if (dtlocked)
  443                 mtx_unlock_spin(&dt_lock);
  444 }
  445 
  446 #ifdef SMP
  447 static void
  448 set_user_ldt_rv(struct vmspace *vmsp)
  449 {
  450         struct thread *td;
  451 
  452         td = curthread;
  453         if (vmsp != td->td_proc->p_vmspace)
  454                 return;
  455 
  456         set_user_ldt(&td->td_proc->p_md);
  457 }
  458 #endif
  459 
  460 #ifdef XEN
  461 
  462 /* 
  463  * dt_lock must be held. Returns with dt_lock held. 
  464  */ 
  465 struct proc_ldt * 
  466 user_ldt_alloc(struct mdproc *mdp, int len) 
  467 { 
  468         struct proc_ldt *pldt, *new_ldt; 
  469  
  470         mtx_assert(&dt_lock, MA_OWNED); 
  471         mtx_unlock_spin(&dt_lock); 
  472         new_ldt = malloc(sizeof(struct proc_ldt), 
  473                 M_SUBPROC, M_WAITOK); 
  474  
  475         new_ldt->ldt_len = len = NEW_MAX_LD(len); 
  476         new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena, 
  477             round_page(len * sizeof(union descriptor)), M_WAITOK);
  478         new_ldt->ldt_refcnt = 1; 
  479         new_ldt->ldt_active = 0; 
  480  
  481         mtx_lock_spin(&dt_lock);
  482         if ((pldt = mdp->md_ldt)) { 
  483                 if (len > pldt->ldt_len) 
  484                         len = pldt->ldt_len; 
  485                 bcopy(pldt->ldt_base, new_ldt->ldt_base, 
  486                     len * sizeof(union descriptor)); 
  487         } else { 
  488                 bcopy(ldt, new_ldt->ldt_base, PAGE_SIZE); 
  489         } 
  490         mtx_unlock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  491         pmap_map_readonly(kernel_pmap, (vm_offset_t)new_ldt->ldt_base, 
  492                           new_ldt->ldt_len*sizeof(union descriptor)); 
  493         mtx_lock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  494         return (new_ldt);
  495 } 
  496 #else
  497 /*
  498  * dt_lock must be held. Returns with dt_lock held.
  499  */
  500 struct proc_ldt *
  501 user_ldt_alloc(struct mdproc *mdp, int len)
  502 {
  503         struct proc_ldt *pldt, *new_ldt;
  504 
  505         mtx_assert(&dt_lock, MA_OWNED);
  506         mtx_unlock_spin(&dt_lock);
  507         new_ldt = malloc(sizeof(struct proc_ldt),
  508                 M_SUBPROC, M_WAITOK);
  509 
  510         new_ldt->ldt_len = len = NEW_MAX_LD(len);
  511         new_ldt->ldt_base = (caddr_t)kmem_malloc(kernel_arena,
  512             len * sizeof(union descriptor), M_WAITOK | M_ZERO);
  513         new_ldt->ldt_refcnt = 1;
  514         new_ldt->ldt_active = 0;
  515 
  516         mtx_lock_spin(&dt_lock);
  517         gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
  518         gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
  519         ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
  520 
  521         if ((pldt = mdp->md_ldt) != NULL) {
  522                 if (len > pldt->ldt_len)
  523                         len = pldt->ldt_len;
  524                 bcopy(pldt->ldt_base, new_ldt->ldt_base,
  525                     len * sizeof(union descriptor));
  526         } else
  527                 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
  528         
  529         return (new_ldt);
  530 }
  531 #endif /* !XEN */
  532 
  533 /*
  534  * Must be called with dt_lock held.  Returns with dt_lock unheld.
  535  */
  536 void
  537 user_ldt_free(struct thread *td)
  538 {
  539         struct mdproc *mdp = &td->td_proc->p_md;
  540         struct proc_ldt *pldt;
  541 
  542         mtx_assert(&dt_lock, MA_OWNED);
  543         if ((pldt = mdp->md_ldt) == NULL) {
  544                 mtx_unlock_spin(&dt_lock);
  545                 return;
  546         }
  547 
  548         if (td == curthread) {
  549 #ifdef XEN
  550                 i386_reset_ldt(&default_proc_ldt);
  551                 PCPU_SET(currentldt, (int)&default_proc_ldt);
  552 #else
  553                 lldt(_default_ldt);
  554                 PCPU_SET(currentldt, _default_ldt);
  555 #endif
  556         }
  557 
  558         mdp->md_ldt = NULL;
  559         user_ldt_deref(pldt);
  560 }
  561 
  562 void
  563 user_ldt_deref(struct proc_ldt *pldt)
  564 {
  565 
  566         mtx_assert(&dt_lock, MA_OWNED);
  567         if (--pldt->ldt_refcnt == 0) {
  568                 mtx_unlock_spin(&dt_lock);
  569                 kmem_free(kernel_arena, (vm_offset_t)pldt->ldt_base,
  570                         pldt->ldt_len * sizeof(union descriptor));
  571                 free(pldt, M_SUBPROC);
  572         } else
  573                 mtx_unlock_spin(&dt_lock);
  574 }
  575 
  576 /*
  577  * Note for the authors of compat layers (linux, etc): copyout() in
  578  * the function below is not a problem since it presents data in
  579  * arch-specific format (i.e. i386-specific in this case), not in
  580  * the OS-specific one.
  581  */
  582 int
  583 i386_get_ldt(td, uap)
  584         struct thread *td;
  585         struct i386_ldt_args *uap;
  586 {
  587         int error = 0;
  588         struct proc_ldt *pldt;
  589         int nldt, num;
  590         union descriptor *lp;
  591 
  592 #ifdef  DEBUG
  593         printf("i386_get_ldt: start=%d num=%d descs=%p\n",
  594             uap->start, uap->num, (void *)uap->descs);
  595 #endif
  596 
  597         mtx_lock_spin(&dt_lock);
  598         if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
  599                 nldt = pldt->ldt_len;
  600                 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
  601                 mtx_unlock_spin(&dt_lock);
  602                 num = min(uap->num, nldt);
  603         } else {
  604                 mtx_unlock_spin(&dt_lock);
  605                 nldt = sizeof(ldt)/sizeof(ldt[0]);
  606                 num = min(uap->num, nldt);
  607                 lp = &ldt[uap->start];
  608         }
  609 
  610         if ((uap->start > (unsigned int)nldt) ||
  611             ((unsigned int)num > (unsigned int)nldt) ||
  612             ((unsigned int)(uap->start + num) > (unsigned int)nldt))
  613                 return(EINVAL);
  614 
  615         error = copyout(lp, uap->descs, num * sizeof(union descriptor));
  616         if (!error)
  617                 td->td_retval[0] = num;
  618 
  619         return(error);
  620 }
  621 
  622 int
  623 i386_set_ldt(td, uap, descs)
  624         struct thread *td;
  625         struct i386_ldt_args *uap;
  626         union descriptor *descs;
  627 {
  628         int error = 0, i;
  629         int largest_ld;
  630         struct mdproc *mdp = &td->td_proc->p_md;
  631         struct proc_ldt *pldt;
  632         union descriptor *dp;
  633 
  634 #ifdef  DEBUG
  635         printf("i386_set_ldt: start=%d num=%d descs=%p\n",
  636             uap->start, uap->num, (void *)uap->descs);
  637 #endif
  638 
  639         if (descs == NULL) {
  640                 /* Free descriptors */
  641                 if (uap->start == 0 && uap->num == 0) {
  642                         /*
  643                          * Treat this as a special case, so userland needn't
  644                          * know magic number NLDT.
  645                          */
  646                         uap->start = NLDT;
  647                         uap->num = MAX_LD - NLDT;
  648                 }
  649                 if (uap->num == 0)
  650                         return (EINVAL);
  651                 mtx_lock_spin(&dt_lock);
  652                 if ((pldt = mdp->md_ldt) == NULL ||
  653                     uap->start >= pldt->ldt_len) {
  654                         mtx_unlock_spin(&dt_lock);
  655                         return (0);
  656                 }
  657                 largest_ld = uap->start + uap->num;
  658                 if (largest_ld > pldt->ldt_len)
  659                         largest_ld = pldt->ldt_len;
  660                 i = largest_ld - uap->start;
  661                 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
  662                     sizeof(union descriptor) * i);
  663                 mtx_unlock_spin(&dt_lock);
  664                 return (0);
  665         }
  666 
  667         if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
  668                 /* verify range of descriptors to modify */
  669                 largest_ld = uap->start + uap->num;
  670                 if (uap->start >= MAX_LD || largest_ld > MAX_LD) {
  671                         return (EINVAL);
  672                 }
  673         }
  674 
  675         /* Check descriptors for access violations */
  676         for (i = 0; i < uap->num; i++) {
  677                 dp = &descs[i];
  678 
  679                 switch (dp->sd.sd_type) {
  680                 case SDT_SYSNULL:       /* system null */ 
  681                         dp->sd.sd_p = 0;
  682                         break;
  683                 case SDT_SYS286TSS: /* system 286 TSS available */
  684                 case SDT_SYSLDT:    /* system local descriptor table */
  685                 case SDT_SYS286BSY: /* system 286 TSS busy */
  686                 case SDT_SYSTASKGT: /* system task gate */
  687                 case SDT_SYS286IGT: /* system 286 interrupt gate */
  688                 case SDT_SYS286TGT: /* system 286 trap gate */
  689                 case SDT_SYSNULL2:  /* undefined by Intel */ 
  690                 case SDT_SYS386TSS: /* system 386 TSS available */
  691                 case SDT_SYSNULL3:  /* undefined by Intel */
  692                 case SDT_SYS386BSY: /* system 386 TSS busy */
  693                 case SDT_SYSNULL4:  /* undefined by Intel */ 
  694                 case SDT_SYS386IGT: /* system 386 interrupt gate */
  695                 case SDT_SYS386TGT: /* system 386 trap gate */
  696                 case SDT_SYS286CGT: /* system 286 call gate */ 
  697                 case SDT_SYS386CGT: /* system 386 call gate */
  698                         /* I can't think of any reason to allow a user proc
  699                          * to create a segment of these types.  They are
  700                          * for OS use only.
  701                          */
  702                         return (EACCES);
  703                         /*NOTREACHED*/
  704 
  705                 /* memory segment types */
  706                 case SDT_MEMEC:   /* memory execute only conforming */
  707                 case SDT_MEMEAC:  /* memory execute only accessed conforming */
  708                 case SDT_MEMERC:  /* memory execute read conforming */
  709                 case SDT_MEMERAC: /* memory execute read accessed conforming */
  710                          /* Must be "present" if executable and conforming. */
  711                         if (dp->sd.sd_p == 0)
  712                                 return (EACCES);
  713                         break;
  714                 case SDT_MEMRO:   /* memory read only */
  715                 case SDT_MEMROA:  /* memory read only accessed */
  716                 case SDT_MEMRW:   /* memory read write */
  717                 case SDT_MEMRWA:  /* memory read write accessed */
  718                 case SDT_MEMROD:  /* memory read only expand dwn limit */
  719                 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
  720                 case SDT_MEMRWD:  /* memory read write expand dwn limit */  
  721                 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
  722                 case SDT_MEME:    /* memory execute only */ 
  723                 case SDT_MEMEA:   /* memory execute only accessed */
  724                 case SDT_MEMER:   /* memory execute read */
  725                 case SDT_MEMERA:  /* memory execute read accessed */
  726                         break;
  727                 default:
  728                         return(EINVAL);
  729                         /*NOTREACHED*/
  730                 }
  731 
  732                 /* Only user (ring-3) descriptors may be present. */
  733                 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
  734                         return (EACCES);
  735         }
  736 
  737         if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
  738                 /* Allocate a free slot */
  739                 mtx_lock_spin(&dt_lock);
  740                 if ((pldt = mdp->md_ldt) == NULL) {
  741                         if ((error = i386_ldt_grow(td, NLDT + 1))) {
  742                                 mtx_unlock_spin(&dt_lock);
  743                                 return (error);
  744                         }
  745                         pldt = mdp->md_ldt;
  746                 }
  747 again:
  748                 /*
  749                  * start scanning a bit up to leave room for NVidia and
  750                  * Wine, which still user the "Blat" method of allocation.
  751                  */
  752                 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
  753                 for (i = NLDT; i < pldt->ldt_len; ++i) {
  754                         if (dp->sd.sd_type == SDT_SYSNULL)
  755                                 break;
  756                         dp++;
  757                 }
  758                 if (i >= pldt->ldt_len) {
  759                         if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
  760                                 mtx_unlock_spin(&dt_lock);
  761                                 return (error);
  762                         }
  763                         goto again;
  764                 }
  765                 uap->start = i;
  766                 error = i386_set_ldt_data(td, i, 1, descs);
  767                 mtx_unlock_spin(&dt_lock);
  768         } else {
  769                 largest_ld = uap->start + uap->num;
  770                 mtx_lock_spin(&dt_lock);
  771                 if (!(error = i386_ldt_grow(td, largest_ld))) {
  772                         error = i386_set_ldt_data(td, uap->start, uap->num,
  773                             descs);
  774                 }
  775                 mtx_unlock_spin(&dt_lock);
  776         }
  777         if (error == 0)
  778                 td->td_retval[0] = uap->start;
  779         return (error);
  780 }
  781 #ifdef XEN
  782 static int
  783 i386_set_ldt_data(struct thread *td, int start, int num,
  784         union descriptor *descs)
  785 {
  786         struct mdproc *mdp = &td->td_proc->p_md;
  787         struct proc_ldt *pldt = mdp->md_ldt;
  788 
  789         mtx_assert(&dt_lock, MA_OWNED);
  790 
  791         while (num) {
  792                 xen_update_descriptor(
  793                     &((union descriptor *)(pldt->ldt_base))[start],
  794                     descs);
  795                 num--;
  796                 start++;
  797                 descs++;
  798         }
  799         return (0);
  800 }
  801 #else
  802 static int
  803 i386_set_ldt_data(struct thread *td, int start, int num,
  804         union descriptor *descs)
  805 {
  806         struct mdproc *mdp = &td->td_proc->p_md;
  807         struct proc_ldt *pldt = mdp->md_ldt;
  808 
  809         mtx_assert(&dt_lock, MA_OWNED);
  810 
  811         /* Fill in range */
  812         bcopy(descs,
  813             &((union descriptor *)(pldt->ldt_base))[start],
  814             num * sizeof(union descriptor));
  815         return (0);
  816 }
  817 #endif /* !XEN */
  818 
  819 static int
  820 i386_ldt_grow(struct thread *td, int len) 
  821 {
  822         struct mdproc *mdp = &td->td_proc->p_md;
  823         struct proc_ldt *new_ldt, *pldt;
  824         caddr_t old_ldt_base = NULL_LDT_BASE;
  825         int old_ldt_len = 0;
  826 
  827         mtx_assert(&dt_lock, MA_OWNED);
  828 
  829         if (len > MAX_LD)
  830                 return (ENOMEM);
  831         if (len < NLDT + 1)
  832                 len = NLDT + 1;
  833 
  834         /* Allocate a user ldt. */
  835         if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
  836                 new_ldt = user_ldt_alloc(mdp, len);
  837                 if (new_ldt == NULL)
  838                         return (ENOMEM);
  839                 pldt = mdp->md_ldt;
  840 
  841                 if (pldt != NULL) {
  842                         if (new_ldt->ldt_len <= pldt->ldt_len) {
  843                                 /*
  844                                  * We just lost the race for allocation, so
  845                                  * free the new object and return.
  846                                  */
  847                                 mtx_unlock_spin(&dt_lock);
  848                                 kmem_free(kernel_arena,
  849                                    (vm_offset_t)new_ldt->ldt_base,
  850                                    new_ldt->ldt_len * sizeof(union descriptor));
  851                                 free(new_ldt, M_SUBPROC);
  852                                 mtx_lock_spin(&dt_lock);
  853                                 return (0);
  854                         }
  855 
  856                         /*
  857                          * We have to substitute the current LDT entry for
  858                          * curproc with the new one since its size grew.
  859                          */
  860                         old_ldt_base = pldt->ldt_base;
  861                         old_ldt_len = pldt->ldt_len;
  862                         pldt->ldt_sd = new_ldt->ldt_sd;
  863                         pldt->ldt_base = new_ldt->ldt_base;
  864                         pldt->ldt_len = new_ldt->ldt_len;
  865                 } else
  866                         mdp->md_ldt = pldt = new_ldt;
  867 #ifdef SMP
  868                 /*
  869                  * Signal other cpus to reload ldt.  We need to unlock dt_lock
  870                  * here because other CPU will contest on it since their
  871                  * curthreads won't hold the lock and will block when trying
  872                  * to acquire it.
  873                  */
  874                 mtx_unlock_spin(&dt_lock);
  875                 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
  876                     NULL, td->td_proc->p_vmspace);
  877 #else
  878                 set_user_ldt(&td->td_proc->p_md);
  879                 mtx_unlock_spin(&dt_lock);
  880 #endif
  881                 if (old_ldt_base != NULL_LDT_BASE) {
  882                         kmem_free(kernel_arena, (vm_offset_t)old_ldt_base,
  883                             old_ldt_len * sizeof(union descriptor));
  884                         free(new_ldt, M_SUBPROC);
  885                 }
  886                 mtx_lock_spin(&dt_lock);
  887         }
  888         return (0);
  889 }

Cache object: 7980d6721aae636363003d6994643cb1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.