The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/sys_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/8.1/sys/i386/i386/sys_machdep.c 199583 2009-11-20 15:27:52Z jhb $");
   34 
   35 #include "opt_kstack_pages.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/priv.h>
   43 #include <sys/proc.h>
   44 #include <sys/smp.h>
   45 #include <sys/sysproto.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/pmap.h>
   49 #include <vm/vm_map.h>
   50 #include <vm/vm_extern.h>
   51 
   52 #include <machine/cpu.h>
   53 #include <machine/pcb.h>
   54 #include <machine/pcb_ext.h>
   55 #include <machine/proc.h>
   56 #include <machine/sysarch.h>
   57 
   58 #include <security/audit/audit.h>
   59 
   60 #ifdef XEN 
   61 #include <machine/xen/xenfunc.h>
   62 
   63 void i386_reset_ldt(struct proc_ldt *pldt); 
   64 
   65 void 
   66 i386_reset_ldt(struct proc_ldt *pldt) 
   67 { 
   68         xen_set_ldt((vm_offset_t)pldt->ldt_base, pldt->ldt_len); 
   69 } 
   70 #else  
   71 #define i386_reset_ldt(x) 
   72 #endif 
   73 
   74 #include <vm/vm_kern.h>         /* for kernel_map */
   75 
   76 #define MAX_LD 8192
   77 #define LD_PER_PAGE 512
   78 #define NEW_MAX_LD(num)  ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
   79 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
   80 #define NULL_LDT_BASE   ((caddr_t)NULL)
   81 
   82 #ifdef SMP
   83 static void set_user_ldt_rv(struct vmspace *vmsp);
   84 #endif
   85 static int i386_set_ldt_data(struct thread *, int start, int num,
   86         union descriptor *descs);
   87 static int i386_ldt_grow(struct thread *td, int len);
   88 
   89 #ifndef _SYS_SYSPROTO_H_
   90 struct sysarch_args {
   91         int op;
   92         char *parms;
   93 };
   94 #endif
   95 
   96 int
   97 sysarch(td, uap)
   98         struct thread *td;
   99         register struct sysarch_args *uap;
  100 {
  101         int error;
  102         union descriptor *lp;
  103         union {
  104                 struct i386_ldt_args largs;
  105                 struct i386_ioperm_args iargs;
  106         } kargs;
  107         uint32_t base;
  108         struct segment_descriptor sd, *sdp;
  109 
  110         AUDIT_ARG_CMD(uap->op);
  111         switch (uap->op) {
  112         case I386_GET_IOPERM:
  113         case I386_SET_IOPERM:
  114                 if ((error = copyin(uap->parms, &kargs.iargs,
  115                     sizeof(struct i386_ioperm_args))) != 0)
  116                         return (error);
  117                 break;
  118         case I386_GET_LDT:
  119         case I386_SET_LDT:
  120                 if ((error = copyin(uap->parms, &kargs.largs,
  121                     sizeof(struct i386_ldt_args))) != 0)
  122                         return (error);
  123                 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
  124                         return (EINVAL);
  125                 break;
  126         default:
  127                 break;
  128         }
  129 
  130         switch(uap->op) {
  131         case I386_GET_LDT:
  132                 error = i386_get_ldt(td, &kargs.largs);
  133                 break;
  134         case I386_SET_LDT:
  135                 if (kargs.largs.descs != NULL) {
  136                         lp = (union descriptor *)kmem_alloc(kernel_map,
  137                             kargs.largs.num * sizeof(union descriptor));
  138                         if (lp == NULL) {
  139                                 error = ENOMEM;
  140                                 break;
  141                         }
  142                         error = copyin(kargs.largs.descs, lp,
  143                             kargs.largs.num * sizeof(union descriptor));
  144                         if (error == 0)
  145                                 error = i386_set_ldt(td, &kargs.largs, lp);
  146                         kmem_free(kernel_map, (vm_offset_t)lp,
  147                             kargs.largs.num * sizeof(union descriptor));
  148                 } else {
  149                         error = i386_set_ldt(td, &kargs.largs, NULL);
  150                 }
  151                 break;
  152         case I386_GET_IOPERM:
  153                 error = i386_get_ioperm(td, &kargs.iargs);
  154                 if (error == 0)
  155                         error = copyout(&kargs.iargs, uap->parms,
  156                             sizeof(struct i386_ioperm_args));
  157                 break;
  158         case I386_SET_IOPERM:
  159                 error = i386_set_ioperm(td, &kargs.iargs);
  160                 break;
  161         case I386_VM86:
  162                 error = vm86_sysarch(td, uap->parms);
  163                 break;
  164         case I386_GET_FSBASE:
  165                 sdp = &td->td_pcb->pcb_fsd;
  166                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  167                 error = copyout(&base, uap->parms, sizeof(base));
  168                 break;
  169         case I386_SET_FSBASE:
  170                 error = copyin(uap->parms, &base, sizeof(base));
  171                 if (!error) {
  172                         /*
  173                          * Construct a descriptor and store it in the pcb for
  174                          * the next context switch.  Also store it in the gdt
  175                          * so that the load of tf_fs into %fs will activate it
  176                          * at return to userland.
  177                          */
  178                         sd.sd_lobase = base & 0xffffff;
  179                         sd.sd_hibase = (base >> 24) & 0xff;
  180 #ifdef XEN
  181                         /* need to do nosegneg like Linux */
  182                         sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  183 #else                   
  184                         sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
  185 #endif
  186                         sd.sd_hilimit = 0xf;
  187                         sd.sd_type  = SDT_MEMRWA;
  188                         sd.sd_dpl   = SEL_UPL;
  189                         sd.sd_p     = 1;
  190                         sd.sd_xx    = 0;
  191                         sd.sd_def32 = 1;
  192                         sd.sd_gran  = 1;
  193                         critical_enter();
  194                         td->td_pcb->pcb_fsd = sd;
  195 #ifdef XEN
  196                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[0]),
  197                             *(uint64_t *)&sd);
  198 #else
  199                         PCPU_GET(fsgs_gdt)[0] = sd;
  200 #endif
  201                         critical_exit();
  202                         td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
  203                 }
  204                 break;
  205         case I386_GET_GSBASE:
  206                 sdp = &td->td_pcb->pcb_gsd;
  207                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  208                 error = copyout(&base, uap->parms, sizeof(base));
  209                 break;
  210         case I386_SET_GSBASE:
  211                 error = copyin(uap->parms, &base, sizeof(base));
  212                 if (!error) {
  213                         /*
  214                          * Construct a descriptor and store it in the pcb for
  215                          * the next context switch.  Also store it in the gdt
  216                          * because we have to do a load_gs() right now.
  217                          */
  218                         sd.sd_lobase = base & 0xffffff;
  219                         sd.sd_hibase = (base >> 24) & 0xff;
  220 
  221 #ifdef XEN
  222                         /* need to do nosegneg like Linux */
  223                         sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  224 #else   
  225                         sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
  226 #endif
  227                         sd.sd_hilimit = 0xf;
  228                         sd.sd_type  = SDT_MEMRWA;
  229                         sd.sd_dpl   = SEL_UPL;
  230                         sd.sd_p     = 1;
  231                         sd.sd_xx    = 0;
  232                         sd.sd_def32 = 1;
  233                         sd.sd_gran  = 1;
  234                         critical_enter();
  235                         td->td_pcb->pcb_gsd = sd;
  236 #ifdef XEN
  237                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[1]),
  238                             *(uint64_t *)&sd);
  239 #else                   
  240                         PCPU_GET(fsgs_gdt)[1] = sd;
  241 #endif
  242                         critical_exit();
  243                         load_gs(GSEL(GUGS_SEL, SEL_UPL));
  244                 }
  245                 break;
  246         default:
  247                 error = EINVAL;
  248                 break;
  249         }
  250         return (error);
  251 }
  252 
  253 int
  254 i386_extend_pcb(struct thread *td)
  255 {
  256         int i, offset;
  257         u_long *addr;
  258         struct pcb_ext *ext;
  259         struct soft_segment_descriptor ssd = {
  260                 0,                      /* segment base address (overwritten) */
  261                 ctob(IOPAGES + 1) - 1,  /* length */
  262                 SDT_SYS386TSS,          /* segment type */
  263                 0,                      /* priority level */
  264                 1,                      /* descriptor present */
  265                 0, 0,
  266                 0,                      /* default 32 size */
  267                 0                       /* granularity */
  268         };
  269 
  270         ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
  271         if (ext == 0)
  272                 return (ENOMEM);
  273         bzero(ext, sizeof(struct pcb_ext)); 
  274         /* -16 is so we can convert a trapframe into vm86trapframe inplace */
  275         ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
  276             sizeof(struct pcb) - 16;
  277         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  278         /*
  279          * The last byte of the i/o map must be followed by an 0xff byte.
  280          * We arbitrarily allocate 16 bytes here, to keep the starting
  281          * address on a doubleword boundary.
  282          */
  283         offset = PAGE_SIZE - 16;
  284         ext->ext_tss.tss_ioopt = 
  285             (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
  286         ext->ext_iomap = (caddr_t)ext + offset;
  287         ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
  288 
  289         addr = (u_long *)ext->ext_vm86.vm86_intmap;
  290         for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
  291                 *addr++ = ~0;
  292 
  293         ssd.ssd_base = (unsigned)&ext->ext_tss;
  294         ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
  295         ssdtosd(&ssd, &ext->ext_tssd);
  296 
  297         KASSERT(td == curthread, ("giving TSS to !curthread"));
  298         KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
  299 
  300         /* Switch to the new TSS. */
  301         critical_enter();
  302         td->td_pcb->pcb_ext = ext;
  303         PCPU_SET(private_tss, 1);
  304         *PCPU_GET(tss_gdt) = ext->ext_tssd;
  305         ltr(GSEL(GPROC0_SEL, SEL_KPL));
  306         critical_exit();
  307 
  308         return 0;
  309 }
  310 
  311 int
  312 i386_set_ioperm(td, uap)
  313         struct thread *td;
  314         struct i386_ioperm_args *uap;
  315 {
  316         int i, error;
  317         char *iomap;
  318 
  319         if ((error = priv_check(td, PRIV_IO)) != 0)
  320                 return (error);
  321         if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
  322                 return (error);
  323         /*
  324          * XXX 
  325          * While this is restricted to root, we should probably figure out
  326          * whether any other driver is using this i/o address, as so not to
  327          * cause confusion.  This probably requires a global 'usage registry'.
  328          */
  329 
  330         if (td->td_pcb->pcb_ext == 0)
  331                 if ((error = i386_extend_pcb(td)) != 0)
  332                         return (error);
  333         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  334 
  335         if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
  336                 return (EINVAL);
  337 
  338         for (i = uap->start; i < uap->start + uap->length; i++) {
  339                 if (uap->enable)
  340                         iomap[i >> 3] &= ~(1 << (i & 7));
  341                 else
  342                         iomap[i >> 3] |= (1 << (i & 7));
  343         }
  344         return (error);
  345 }
  346 
  347 int
  348 i386_get_ioperm(td, uap)
  349         struct thread *td;
  350         struct i386_ioperm_args *uap;
  351 {
  352         int i, state;
  353         char *iomap;
  354 
  355         if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
  356                 return (EINVAL);
  357 
  358         if (td->td_pcb->pcb_ext == 0) {
  359                 uap->length = 0;
  360                 goto done;
  361         }
  362 
  363         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  364 
  365         i = uap->start;
  366         state = (iomap[i >> 3] >> (i & 7)) & 1;
  367         uap->enable = !state;
  368         uap->length = 1;
  369 
  370         for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
  371                 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
  372                         break;
  373                 uap->length++;
  374         }
  375 
  376 done:
  377         return (0);
  378 }
  379 
  380 /*
  381  * Update the GDT entry pointing to the LDT to point to the LDT of the
  382  * current process. Manage dt_lock holding/unholding autonomously.
  383  */   
  384 void
  385 set_user_ldt(struct mdproc *mdp)
  386 {
  387         struct proc_ldt *pldt;
  388         int dtlocked;
  389 
  390         dtlocked = 0;
  391         if (!mtx_owned(&dt_lock)) {
  392                 mtx_lock_spin(&dt_lock);
  393                 dtlocked = 1;
  394         }
  395 
  396         pldt = mdp->md_ldt;
  397 #ifdef XEN
  398         i386_reset_ldt(pldt);
  399         PCPU_SET(currentldt, (int)pldt);
  400 #else   
  401 #ifdef SMP
  402         gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
  403 #else
  404         gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
  405 #endif
  406         lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
  407         PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
  408 #endif /* XEN */ 
  409         if (dtlocked)
  410                 mtx_unlock_spin(&dt_lock);
  411 }
  412 
  413 #ifdef SMP
  414 static void
  415 set_user_ldt_rv(struct vmspace *vmsp)
  416 {
  417         struct thread *td;
  418 
  419         td = curthread;
  420         if (vmsp != td->td_proc->p_vmspace)
  421                 return;
  422 
  423         set_user_ldt(&td->td_proc->p_md);
  424 }
  425 #endif
  426 
  427 #ifdef XEN
  428 
  429 /* 
  430  * dt_lock must be held. Returns with dt_lock held. 
  431  */ 
  432 struct proc_ldt * 
  433 user_ldt_alloc(struct mdproc *mdp, int len) 
  434 { 
  435         struct proc_ldt *pldt, *new_ldt; 
  436  
  437         mtx_assert(&dt_lock, MA_OWNED); 
  438         mtx_unlock_spin(&dt_lock); 
  439         new_ldt = malloc(sizeof(struct proc_ldt), 
  440                 M_SUBPROC, M_WAITOK); 
  441  
  442         new_ldt->ldt_len = len = NEW_MAX_LD(len); 
  443         new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map, 
  444                 round_page(len * sizeof(union descriptor))); 
  445         if (new_ldt->ldt_base == NULL) { 
  446                 free(new_ldt, M_SUBPROC);
  447                 mtx_lock_spin(&dt_lock);
  448                 return (NULL);
  449         } 
  450         new_ldt->ldt_refcnt = 1; 
  451         new_ldt->ldt_active = 0; 
  452  
  453         if ((pldt = mdp->md_ldt)) { 
  454                 if (len > pldt->ldt_len) 
  455                         len = pldt->ldt_len; 
  456                 bcopy(pldt->ldt_base, new_ldt->ldt_base, 
  457                     len * sizeof(union descriptor)); 
  458         } else { 
  459                 bcopy(ldt, new_ldt->ldt_base, PAGE_SIZE); 
  460         } 
  461         pmap_map_readonly(kernel_pmap, (vm_offset_t)new_ldt->ldt_base, 
  462                           new_ldt->ldt_len*sizeof(union descriptor)); 
  463         return (new_ldt);
  464 } 
  465 #else
  466 /*
  467  * dt_lock must be held. Returns with dt_lock held.
  468  */
  469 struct proc_ldt *
  470 user_ldt_alloc(struct mdproc *mdp, int len)
  471 {
  472         struct proc_ldt *pldt, *new_ldt;
  473 
  474         mtx_assert(&dt_lock, MA_OWNED);
  475         mtx_unlock_spin(&dt_lock);
  476         new_ldt = malloc(sizeof(struct proc_ldt),
  477                 M_SUBPROC, M_WAITOK);
  478 
  479         new_ldt->ldt_len = len = NEW_MAX_LD(len);
  480         new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
  481                 len * sizeof(union descriptor));
  482         if (new_ldt->ldt_base == NULL) {
  483                 free(new_ldt, M_SUBPROC);
  484                 mtx_lock_spin(&dt_lock);
  485                 return (NULL);
  486         }
  487         new_ldt->ldt_refcnt = 1;
  488         new_ldt->ldt_active = 0;
  489 
  490         mtx_lock_spin(&dt_lock);
  491         gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
  492         gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
  493         ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
  494 
  495         if ((pldt = mdp->md_ldt) != NULL) {
  496                 if (len > pldt->ldt_len)
  497                         len = pldt->ldt_len;
  498                 bcopy(pldt->ldt_base, new_ldt->ldt_base,
  499                     len * sizeof(union descriptor));
  500         } else
  501                 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
  502         
  503         return (new_ldt);
  504 }
  505 #endif /* !XEN */
  506 
  507 /*
  508  * Must be called with dt_lock held.  Returns with dt_lock unheld.
  509  */
  510 void
  511 user_ldt_free(struct thread *td)
  512 {
  513         struct mdproc *mdp = &td->td_proc->p_md;
  514         struct proc_ldt *pldt;
  515 
  516         mtx_assert(&dt_lock, MA_OWNED);
  517         if ((pldt = mdp->md_ldt) == NULL) {
  518                 mtx_unlock_spin(&dt_lock);
  519                 return;
  520         }
  521 
  522         if (td == PCPU_GET(curthread)) {
  523                 lldt(_default_ldt);
  524                 PCPU_SET(currentldt, _default_ldt);
  525         }
  526 
  527         mdp->md_ldt = NULL;
  528         user_ldt_deref(pldt);
  529 }
  530 
  531 void
  532 user_ldt_deref(struct proc_ldt *pldt)
  533 {
  534 
  535         mtx_assert(&dt_lock, MA_OWNED);
  536         if (--pldt->ldt_refcnt == 0) {
  537                 mtx_unlock_spin(&dt_lock);
  538                 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
  539                         pldt->ldt_len * sizeof(union descriptor));
  540                 free(pldt, M_SUBPROC);
  541         } else
  542                 mtx_unlock_spin(&dt_lock);
  543 }
  544 
  545 /*
  546  * Note for the authors of compat layers (linux, etc): copyout() in
  547  * the function below is not a problem since it presents data in
  548  * arch-specific format (i.e. i386-specific in this case), not in
  549  * the OS-specific one.
  550  */
  551 int
  552 i386_get_ldt(td, uap)
  553         struct thread *td;
  554         struct i386_ldt_args *uap;
  555 {
  556         int error = 0;
  557         struct proc_ldt *pldt;
  558         int nldt, num;
  559         union descriptor *lp;
  560 
  561 #ifdef  DEBUG
  562         printf("i386_get_ldt: start=%d num=%d descs=%p\n",
  563             uap->start, uap->num, (void *)uap->descs);
  564 #endif
  565 
  566         mtx_lock_spin(&dt_lock);
  567         if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
  568                 nldt = pldt->ldt_len;
  569                 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
  570                 mtx_unlock_spin(&dt_lock);
  571                 num = min(uap->num, nldt);
  572         } else {
  573                 mtx_unlock_spin(&dt_lock);
  574                 nldt = sizeof(ldt)/sizeof(ldt[0]);
  575                 num = min(uap->num, nldt);
  576                 lp = &ldt[uap->start];
  577         }
  578 
  579         if ((uap->start > (unsigned int)nldt) ||
  580             ((unsigned int)num > (unsigned int)nldt) ||
  581             ((unsigned int)(uap->start + num) > (unsigned int)nldt))
  582                 return(EINVAL);
  583 
  584         error = copyout(lp, uap->descs, num * sizeof(union descriptor));
  585         if (!error)
  586                 td->td_retval[0] = num;
  587 
  588         return(error);
  589 }
  590 
  591 int
  592 i386_set_ldt(td, uap, descs)
  593         struct thread *td;
  594         struct i386_ldt_args *uap;
  595         union descriptor *descs;
  596 {
  597         int error = 0, i;
  598         int largest_ld;
  599         struct mdproc *mdp = &td->td_proc->p_md;
  600         struct proc_ldt *pldt;
  601         union descriptor *dp;
  602 
  603 #ifdef  DEBUG
  604         printf("i386_set_ldt: start=%d num=%d descs=%p\n",
  605             uap->start, uap->num, (void *)uap->descs);
  606 #endif
  607 
  608         if (descs == NULL) {
  609                 /* Free descriptors */
  610                 if (uap->start == 0 && uap->num == 0) {
  611                         /*
  612                          * Treat this as a special case, so userland needn't
  613                          * know magic number NLDT.
  614                          */
  615                         uap->start = NLDT;
  616                         uap->num = MAX_LD - NLDT;
  617                 }
  618                 if (uap->num <= 0)
  619                         return (EINVAL);
  620                 mtx_lock_spin(&dt_lock);
  621                 if ((pldt = mdp->md_ldt) == NULL ||
  622                     uap->start >= pldt->ldt_len) {
  623                         mtx_unlock_spin(&dt_lock);
  624                         return (0);
  625                 }
  626                 largest_ld = uap->start + uap->num;
  627                 if (largest_ld > pldt->ldt_len)
  628                         largest_ld = pldt->ldt_len;
  629                 i = largest_ld - uap->start;
  630                 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
  631                     sizeof(union descriptor) * i);
  632                 mtx_unlock_spin(&dt_lock);
  633                 return (0);
  634         }
  635 
  636         if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
  637                 /* verify range of descriptors to modify */
  638                 largest_ld = uap->start + uap->num;
  639                 if (uap->start >= MAX_LD ||
  640                     uap->num < 0 || largest_ld > MAX_LD) {
  641                         return (EINVAL);
  642                 }
  643         }
  644 
  645         /* Check descriptors for access violations */
  646         for (i = 0; i < uap->num; i++) {
  647                 dp = &descs[i];
  648 
  649                 switch (dp->sd.sd_type) {
  650                 case SDT_SYSNULL:       /* system null */ 
  651                         dp->sd.sd_p = 0;
  652                         break;
  653                 case SDT_SYS286TSS: /* system 286 TSS available */
  654                 case SDT_SYSLDT:    /* system local descriptor table */
  655                 case SDT_SYS286BSY: /* system 286 TSS busy */
  656                 case SDT_SYSTASKGT: /* system task gate */
  657                 case SDT_SYS286IGT: /* system 286 interrupt gate */
  658                 case SDT_SYS286TGT: /* system 286 trap gate */
  659                 case SDT_SYSNULL2:  /* undefined by Intel */ 
  660                 case SDT_SYS386TSS: /* system 386 TSS available */
  661                 case SDT_SYSNULL3:  /* undefined by Intel */
  662                 case SDT_SYS386BSY: /* system 386 TSS busy */
  663                 case SDT_SYSNULL4:  /* undefined by Intel */ 
  664                 case SDT_SYS386IGT: /* system 386 interrupt gate */
  665                 case SDT_SYS386TGT: /* system 386 trap gate */
  666                 case SDT_SYS286CGT: /* system 286 call gate */ 
  667                 case SDT_SYS386CGT: /* system 386 call gate */
  668                         /* I can't think of any reason to allow a user proc
  669                          * to create a segment of these types.  They are
  670                          * for OS use only.
  671                          */
  672                         return (EACCES);
  673                         /*NOTREACHED*/
  674 
  675                 /* memory segment types */
  676                 case SDT_MEMEC:   /* memory execute only conforming */
  677                 case SDT_MEMEAC:  /* memory execute only accessed conforming */
  678                 case SDT_MEMERC:  /* memory execute read conforming */
  679                 case SDT_MEMERAC: /* memory execute read accessed conforming */
  680                          /* Must be "present" if executable and conforming. */
  681                         if (dp->sd.sd_p == 0)
  682                                 return (EACCES);
  683                         break;
  684                 case SDT_MEMRO:   /* memory read only */
  685                 case SDT_MEMROA:  /* memory read only accessed */
  686                 case SDT_MEMRW:   /* memory read write */
  687                 case SDT_MEMRWA:  /* memory read write accessed */
  688                 case SDT_MEMROD:  /* memory read only expand dwn limit */
  689                 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
  690                 case SDT_MEMRWD:  /* memory read write expand dwn limit */  
  691                 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
  692                 case SDT_MEME:    /* memory execute only */ 
  693                 case SDT_MEMEA:   /* memory execute only accessed */
  694                 case SDT_MEMER:   /* memory execute read */
  695                 case SDT_MEMERA:  /* memory execute read accessed */
  696                         break;
  697                 default:
  698                         return(EINVAL);
  699                         /*NOTREACHED*/
  700                 }
  701 
  702                 /* Only user (ring-3) descriptors may be present. */
  703                 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
  704                         return (EACCES);
  705         }
  706 
  707         if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
  708                 /* Allocate a free slot */
  709                 mtx_lock_spin(&dt_lock);
  710                 if ((pldt = mdp->md_ldt) == NULL) {
  711                         if ((error = i386_ldt_grow(td, NLDT + 1))) {
  712                                 mtx_unlock_spin(&dt_lock);
  713                                 return (error);
  714                         }
  715                         pldt = mdp->md_ldt;
  716                 }
  717 again:
  718                 /*
  719                  * start scanning a bit up to leave room for NVidia and
  720                  * Wine, which still user the "Blat" method of allocation.
  721                  */
  722                 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
  723                 for (i = NLDT; i < pldt->ldt_len; ++i) {
  724                         if (dp->sd.sd_type == SDT_SYSNULL)
  725                                 break;
  726                         dp++;
  727                 }
  728                 if (i >= pldt->ldt_len) {
  729                         if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
  730                                 mtx_unlock_spin(&dt_lock);
  731                                 return (error);
  732                         }
  733                         goto again;
  734                 }
  735                 uap->start = i;
  736                 error = i386_set_ldt_data(td, i, 1, descs);
  737                 mtx_unlock_spin(&dt_lock);
  738         } else {
  739                 largest_ld = uap->start + uap->num;
  740                 mtx_lock_spin(&dt_lock);
  741                 if (!(error = i386_ldt_grow(td, largest_ld))) {
  742                         error = i386_set_ldt_data(td, uap->start, uap->num,
  743                             descs);
  744                 }
  745                 mtx_unlock_spin(&dt_lock);
  746         }
  747         if (error == 0)
  748                 td->td_retval[0] = uap->start;
  749         return (error);
  750 }
  751 #ifdef XEN
  752 static int
  753 i386_set_ldt_data(struct thread *td, int start, int num,
  754         union descriptor *descs)
  755 {
  756         struct mdproc *mdp = &td->td_proc->p_md;
  757         struct proc_ldt *pldt = mdp->md_ldt;
  758 
  759         mtx_assert(&dt_lock, MA_OWNED);
  760 
  761         /* Fill in range */
  762         bcopy(descs,
  763             &((union descriptor *)(pldt->ldt_base))[start],
  764             num * sizeof(union descriptor));
  765         return (0);
  766 }
  767 #else
  768 static int
  769 i386_set_ldt_data(struct thread *td, int start, int num,
  770         union descriptor *descs)
  771 {
  772         struct mdproc *mdp = &td->td_proc->p_md;
  773         struct proc_ldt *pldt = mdp->md_ldt;
  774 
  775         mtx_assert(&dt_lock, MA_OWNED);
  776 
  777         /* Fill in range */
  778         bcopy(descs,
  779             &((union descriptor *)(pldt->ldt_base))[start],
  780             num * sizeof(union descriptor));
  781         return (0);
  782 }
  783 #endif /* !XEN */
  784 
  785 static int
  786 i386_ldt_grow(struct thread *td, int len) 
  787 {
  788         struct mdproc *mdp = &td->td_proc->p_md;
  789         struct proc_ldt *new_ldt, *pldt;
  790         caddr_t old_ldt_base = NULL_LDT_BASE;
  791         int old_ldt_len = 0;
  792 
  793         mtx_assert(&dt_lock, MA_OWNED);
  794 
  795         if (len > MAX_LD)
  796                 return (ENOMEM);
  797         if (len < NLDT + 1)
  798                 len = NLDT + 1;
  799 
  800         /* Allocate a user ldt. */
  801         if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
  802                 new_ldt = user_ldt_alloc(mdp, len);
  803                 if (new_ldt == NULL)
  804                         return (ENOMEM);
  805                 pldt = mdp->md_ldt;
  806 
  807                 if (pldt != NULL) {
  808                         if (new_ldt->ldt_len <= pldt->ldt_len) {
  809                                 /*
  810                                  * We just lost the race for allocation, so
  811                                  * free the new object and return.
  812                                  */
  813                                 mtx_unlock_spin(&dt_lock);
  814                                 kmem_free(kernel_map,
  815                                    (vm_offset_t)new_ldt->ldt_base,
  816                                    new_ldt->ldt_len * sizeof(union descriptor));
  817                                 free(new_ldt, M_SUBPROC);
  818                                 mtx_lock_spin(&dt_lock);
  819                                 return (0);
  820                         }
  821 
  822                         /*
  823                          * We have to substitute the current LDT entry for
  824                          * curproc with the new one since its size grew.
  825                          */
  826                         old_ldt_base = pldt->ldt_base;
  827                         old_ldt_len = pldt->ldt_len;
  828                         pldt->ldt_sd = new_ldt->ldt_sd;
  829                         pldt->ldt_base = new_ldt->ldt_base;
  830                         pldt->ldt_len = new_ldt->ldt_len;
  831                 } else
  832                         mdp->md_ldt = pldt = new_ldt;
  833 #ifdef SMP
  834                 /*
  835                  * Signal other cpus to reload ldt.  We need to unlock dt_lock
  836                  * here because other CPU will contest on it since their
  837                  * curthreads won't hold the lock and will block when trying
  838                  * to acquire it.
  839                  */
  840                 mtx_unlock_spin(&dt_lock);
  841                 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
  842                     NULL, td->td_proc->p_vmspace);
  843 #else
  844                 set_user_ldt(&td->td_proc->p_md);
  845                 mtx_unlock_spin(&dt_lock);
  846 #endif
  847                 if (old_ldt_base != NULL_LDT_BASE) {
  848                         kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
  849                             old_ldt_len * sizeof(union descriptor));
  850                         free(new_ldt, M_SUBPROC);
  851                 }
  852                 mtx_lock_spin(&dt_lock);
  853         }
  854         return (0);
  855 }

Cache object: 8eaca37d7f06647cc293d2e0eef909b7


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.