The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/sys_machdep.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions and the following disclaimer.
   10  * 2. Redistributions in binary form must reproduce the above copyright
   11  *    notice, this list of conditions and the following disclaimer in the
   12  *    documentation and/or other materials provided with the distribution.
   13  * 4. Neither the name of the University nor the names of its contributors
   14  *    may be used to endorse or promote products derived from this software
   15  *    without specific prior written permission.
   16  *
   17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   27  * SUCH DAMAGE.
   28  *
   29  *      from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
   30  */
   31 
   32 #include <sys/cdefs.h>
   33 __FBSDID("$FreeBSD: releng/8.3/sys/i386/i386/sys_machdep.c 231151 2012-02-07 19:23:31Z jhb $");
   34 
   35 #include "opt_kstack_pages.h"
   36 
   37 #include <sys/param.h>
   38 #include <sys/systm.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/mutex.h>
   42 #include <sys/priv.h>
   43 #include <sys/proc.h>
   44 #include <sys/smp.h>
   45 #include <sys/sysproto.h>
   46 
   47 #include <vm/vm.h>
   48 #include <vm/pmap.h>
   49 #include <vm/vm_map.h>
   50 #include <vm/vm_extern.h>
   51 
   52 #include <machine/cpu.h>
   53 #include <machine/pcb.h>
   54 #include <machine/pcb_ext.h>
   55 #include <machine/proc.h>
   56 #include <machine/sysarch.h>
   57 
   58 #include <security/audit/audit.h>
   59 
   60 #ifdef XEN 
   61 #include <machine/xen/xenfunc.h>
   62 
   63 void i386_reset_ldt(struct proc_ldt *pldt); 
   64 
   65 void 
   66 i386_reset_ldt(struct proc_ldt *pldt) 
   67 { 
   68         xen_set_ldt((vm_offset_t)pldt->ldt_base, pldt->ldt_len); 
   69 } 
   70 #else  
   71 #define i386_reset_ldt(x) 
   72 #endif 
   73 
   74 #include <vm/vm_kern.h>         /* for kernel_map */
   75 
   76 #define MAX_LD 8192
   77 #define LD_PER_PAGE 512
   78 #define NEW_MAX_LD(num)  ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
   79 #define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
   80 #define NULL_LDT_BASE   ((caddr_t)NULL)
   81 
   82 #ifdef SMP
   83 static void set_user_ldt_rv(struct vmspace *vmsp);
   84 #endif
   85 static int i386_set_ldt_data(struct thread *, int start, int num,
   86         union descriptor *descs);
   87 static int i386_ldt_grow(struct thread *td, int len);
   88 
   89 #ifndef _SYS_SYSPROTO_H_
   90 struct sysarch_args {
   91         int op;
   92         char *parms;
   93 };
   94 #endif
   95 
   96 int
   97 sysarch(td, uap)
   98         struct thread *td;
   99         register struct sysarch_args *uap;
  100 {
  101         int error;
  102         union descriptor *lp;
  103         union {
  104                 struct i386_ldt_args largs;
  105                 struct i386_ioperm_args iargs;
  106         } kargs;
  107         uint32_t base;
  108         struct segment_descriptor sd, *sdp;
  109 
  110         AUDIT_ARG_CMD(uap->op);
  111         switch (uap->op) {
  112         case I386_GET_IOPERM:
  113         case I386_SET_IOPERM:
  114                 if ((error = copyin(uap->parms, &kargs.iargs,
  115                     sizeof(struct i386_ioperm_args))) != 0)
  116                         return (error);
  117                 break;
  118         case I386_GET_LDT:
  119         case I386_SET_LDT:
  120                 if ((error = copyin(uap->parms, &kargs.largs,
  121                     sizeof(struct i386_ldt_args))) != 0)
  122                         return (error);
  123                 if (kargs.largs.num > MAX_LD || kargs.largs.num <= 0)
  124                         return (EINVAL);
  125                 break;
  126         default:
  127                 break;
  128         }
  129 
  130         switch(uap->op) {
  131         case I386_GET_LDT:
  132                 error = i386_get_ldt(td, &kargs.largs);
  133                 break;
  134         case I386_SET_LDT:
  135                 if (kargs.largs.descs != NULL) {
  136                         lp = (union descriptor *)kmem_alloc(kernel_map,
  137                             kargs.largs.num * sizeof(union descriptor));
  138                         if (lp == NULL) {
  139                                 error = ENOMEM;
  140                                 break;
  141                         }
  142                         error = copyin(kargs.largs.descs, lp,
  143                             kargs.largs.num * sizeof(union descriptor));
  144                         if (error == 0)
  145                                 error = i386_set_ldt(td, &kargs.largs, lp);
  146                         kmem_free(kernel_map, (vm_offset_t)lp,
  147                             kargs.largs.num * sizeof(union descriptor));
  148                 } else {
  149                         error = i386_set_ldt(td, &kargs.largs, NULL);
  150                 }
  151                 break;
  152         case I386_GET_IOPERM:
  153                 error = i386_get_ioperm(td, &kargs.iargs);
  154                 if (error == 0)
  155                         error = copyout(&kargs.iargs, uap->parms,
  156                             sizeof(struct i386_ioperm_args));
  157                 break;
  158         case I386_SET_IOPERM:
  159                 error = i386_set_ioperm(td, &kargs.iargs);
  160                 break;
  161         case I386_VM86:
  162                 error = vm86_sysarch(td, uap->parms);
  163                 break;
  164         case I386_GET_FSBASE:
  165                 sdp = &td->td_pcb->pcb_fsd;
  166                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  167                 error = copyout(&base, uap->parms, sizeof(base));
  168                 break;
  169         case I386_SET_FSBASE:
  170                 error = copyin(uap->parms, &base, sizeof(base));
  171                 if (!error) {
  172                         /*
  173                          * Construct a descriptor and store it in the pcb for
  174                          * the next context switch.  Also store it in the gdt
  175                          * so that the load of tf_fs into %fs will activate it
  176                          * at return to userland.
  177                          */
  178                         sd.sd_lobase = base & 0xffffff;
  179                         sd.sd_hibase = (base >> 24) & 0xff;
  180 #ifdef XEN
  181                         /* need to do nosegneg like Linux */
  182                         sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  183 #else                   
  184                         sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
  185 #endif
  186                         sd.sd_hilimit = 0xf;
  187                         sd.sd_type  = SDT_MEMRWA;
  188                         sd.sd_dpl   = SEL_UPL;
  189                         sd.sd_p     = 1;
  190                         sd.sd_xx    = 0;
  191                         sd.sd_def32 = 1;
  192                         sd.sd_gran  = 1;
  193                         critical_enter();
  194                         td->td_pcb->pcb_fsd = sd;
  195 #ifdef XEN
  196                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[0]),
  197                             *(uint64_t *)&sd);
  198 #else
  199                         PCPU_GET(fsgs_gdt)[0] = sd;
  200 #endif
  201                         critical_exit();
  202                         td->td_frame->tf_fs = GSEL(GUFS_SEL, SEL_UPL);
  203                 }
  204                 break;
  205         case I386_GET_GSBASE:
  206                 sdp = &td->td_pcb->pcb_gsd;
  207                 base = sdp->sd_hibase << 24 | sdp->sd_lobase;
  208                 error = copyout(&base, uap->parms, sizeof(base));
  209                 break;
  210         case I386_SET_GSBASE:
  211                 error = copyin(uap->parms, &base, sizeof(base));
  212                 if (!error) {
  213                         /*
  214                          * Construct a descriptor and store it in the pcb for
  215                          * the next context switch.  Also store it in the gdt
  216                          * because we have to do a load_gs() right now.
  217                          */
  218                         sd.sd_lobase = base & 0xffffff;
  219                         sd.sd_hibase = (base >> 24) & 0xff;
  220 
  221 #ifdef XEN
  222                         /* need to do nosegneg like Linux */
  223                         sd.sd_lolimit = (HYPERVISOR_VIRT_START >> 12) & 0xffff;
  224 #else   
  225                         sd.sd_lolimit = 0xffff; /* 4GB limit, wraps around */
  226 #endif
  227                         sd.sd_hilimit = 0xf;
  228                         sd.sd_type  = SDT_MEMRWA;
  229                         sd.sd_dpl   = SEL_UPL;
  230                         sd.sd_p     = 1;
  231                         sd.sd_xx    = 0;
  232                         sd.sd_def32 = 1;
  233                         sd.sd_gran  = 1;
  234                         critical_enter();
  235                         td->td_pcb->pcb_gsd = sd;
  236 #ifdef XEN
  237                         HYPERVISOR_update_descriptor(vtomach(&PCPU_GET(fsgs_gdt)[1]),
  238                             *(uint64_t *)&sd);
  239 #else                   
  240                         PCPU_GET(fsgs_gdt)[1] = sd;
  241 #endif
  242                         critical_exit();
  243                         load_gs(GSEL(GUGS_SEL, SEL_UPL));
  244                 }
  245                 break;
  246         default:
  247                 error = EINVAL;
  248                 break;
  249         }
  250         return (error);
  251 }
  252 
  253 int
  254 i386_extend_pcb(struct thread *td)
  255 {
  256         int i, offset;
  257         u_long *addr;
  258         struct pcb_ext *ext;
  259         struct soft_segment_descriptor ssd = {
  260                 0,                      /* segment base address (overwritten) */
  261                 ctob(IOPAGES + 1) - 1,  /* length */
  262                 SDT_SYS386TSS,          /* segment type */
  263                 0,                      /* priority level */
  264                 1,                      /* descriptor present */
  265                 0, 0,
  266                 0,                      /* default 32 size */
  267                 0                       /* granularity */
  268         };
  269 
  270         ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
  271         if (ext == 0)
  272                 return (ENOMEM);
  273         bzero(ext, sizeof(struct pcb_ext)); 
  274         /* -16 is so we can convert a trapframe into vm86trapframe inplace */
  275         ext->ext_tss.tss_esp0 = td->td_kstack + ctob(KSTACK_PAGES) -
  276             sizeof(struct pcb) - 16;
  277         ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
  278         /*
  279          * The last byte of the i/o map must be followed by an 0xff byte.
  280          * We arbitrarily allocate 16 bytes here, to keep the starting
  281          * address on a doubleword boundary.
  282          */
  283         offset = PAGE_SIZE - 16;
  284         ext->ext_tss.tss_ioopt = 
  285             (offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
  286         ext->ext_iomap = (caddr_t)ext + offset;
  287         ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
  288 
  289         addr = (u_long *)ext->ext_vm86.vm86_intmap;
  290         for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
  291                 *addr++ = ~0;
  292 
  293         ssd.ssd_base = (unsigned)&ext->ext_tss;
  294         ssd.ssd_limit -= ((unsigned)&ext->ext_tss - (unsigned)ext);
  295         ssdtosd(&ssd, &ext->ext_tssd);
  296 
  297         KASSERT(td == curthread, ("giving TSS to !curthread"));
  298         KASSERT(td->td_pcb->pcb_ext == 0, ("already have a TSS!"));
  299 
  300         /* Switch to the new TSS. */
  301         critical_enter();
  302         td->td_pcb->pcb_ext = ext;
  303         PCPU_SET(private_tss, 1);
  304         *PCPU_GET(tss_gdt) = ext->ext_tssd;
  305         ltr(GSEL(GPROC0_SEL, SEL_KPL));
  306         critical_exit();
  307 
  308         return 0;
  309 }
  310 
  311 int
  312 i386_set_ioperm(td, uap)
  313         struct thread *td;
  314         struct i386_ioperm_args *uap;
  315 {
  316         int i, error;
  317         char *iomap;
  318 
  319         if ((error = priv_check(td, PRIV_IO)) != 0)
  320                 return (error);
  321         if ((error = securelevel_gt(td->td_ucred, 0)) != 0)
  322                 return (error);
  323         /*
  324          * XXX 
  325          * While this is restricted to root, we should probably figure out
  326          * whether any other driver is using this i/o address, as so not to
  327          * cause confusion.  This probably requires a global 'usage registry'.
  328          */
  329 
  330         if (td->td_pcb->pcb_ext == 0)
  331                 if ((error = i386_extend_pcb(td)) != 0)
  332                         return (error);
  333         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  334 
  335         if (uap->start + uap->length > IOPAGES * PAGE_SIZE * NBBY)
  336                 return (EINVAL);
  337 
  338         for (i = uap->start; i < uap->start + uap->length; i++) {
  339                 if (uap->enable)
  340                         iomap[i >> 3] &= ~(1 << (i & 7));
  341                 else
  342                         iomap[i >> 3] |= (1 << (i & 7));
  343         }
  344         return (error);
  345 }
  346 
  347 int
  348 i386_get_ioperm(td, uap)
  349         struct thread *td;
  350         struct i386_ioperm_args *uap;
  351 {
  352         int i, state;
  353         char *iomap;
  354 
  355         if (uap->start >= IOPAGES * PAGE_SIZE * NBBY)
  356                 return (EINVAL);
  357 
  358         if (td->td_pcb->pcb_ext == 0) {
  359                 uap->length = 0;
  360                 goto done;
  361         }
  362 
  363         iomap = (char *)td->td_pcb->pcb_ext->ext_iomap;
  364 
  365         i = uap->start;
  366         state = (iomap[i >> 3] >> (i & 7)) & 1;
  367         uap->enable = !state;
  368         uap->length = 1;
  369 
  370         for (i = uap->start + 1; i < IOPAGES * PAGE_SIZE * NBBY; i++) {
  371                 if (state != ((iomap[i >> 3] >> (i & 7)) & 1))
  372                         break;
  373                 uap->length++;
  374         }
  375 
  376 done:
  377         return (0);
  378 }
  379 
  380 /*
  381  * Update the GDT entry pointing to the LDT to point to the LDT of the
  382  * current process. Manage dt_lock holding/unholding autonomously.
  383  */   
  384 void
  385 set_user_ldt(struct mdproc *mdp)
  386 {
  387         struct proc_ldt *pldt;
  388         int dtlocked;
  389 
  390         dtlocked = 0;
  391         if (!mtx_owned(&dt_lock)) {
  392                 mtx_lock_spin(&dt_lock);
  393                 dtlocked = 1;
  394         }
  395 
  396         pldt = mdp->md_ldt;
  397 #ifdef XEN
  398         i386_reset_ldt(pldt);
  399         PCPU_SET(currentldt, (int)pldt);
  400 #else   
  401 #ifdef SMP
  402         gdt[PCPU_GET(cpuid) * NGDT + GUSERLDT_SEL].sd = pldt->ldt_sd;
  403 #else
  404         gdt[GUSERLDT_SEL].sd = pldt->ldt_sd;
  405 #endif
  406         lldt(GSEL(GUSERLDT_SEL, SEL_KPL));
  407         PCPU_SET(currentldt, GSEL(GUSERLDT_SEL, SEL_KPL));
  408 #endif /* XEN */ 
  409         if (dtlocked)
  410                 mtx_unlock_spin(&dt_lock);
  411 }
  412 
  413 #ifdef SMP
  414 static void
  415 set_user_ldt_rv(struct vmspace *vmsp)
  416 {
  417         struct thread *td;
  418 
  419         td = curthread;
  420         if (vmsp != td->td_proc->p_vmspace)
  421                 return;
  422 
  423         set_user_ldt(&td->td_proc->p_md);
  424 }
  425 #endif
  426 
  427 #ifdef XEN
  428 
  429 /* 
  430  * dt_lock must be held. Returns with dt_lock held. 
  431  */ 
  432 struct proc_ldt * 
  433 user_ldt_alloc(struct mdproc *mdp, int len) 
  434 { 
  435         struct proc_ldt *pldt, *new_ldt; 
  436  
  437         mtx_assert(&dt_lock, MA_OWNED); 
  438         mtx_unlock_spin(&dt_lock); 
  439         new_ldt = malloc(sizeof(struct proc_ldt), 
  440                 M_SUBPROC, M_WAITOK); 
  441  
  442         new_ldt->ldt_len = len = NEW_MAX_LD(len); 
  443         new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map, 
  444                 round_page(len * sizeof(union descriptor))); 
  445         if (new_ldt->ldt_base == NULL) { 
  446                 free(new_ldt, M_SUBPROC);
  447                 mtx_lock_spin(&dt_lock);
  448                 return (NULL);
  449         } 
  450         new_ldt->ldt_refcnt = 1; 
  451         new_ldt->ldt_active = 0; 
  452  
  453         mtx_lock_spin(&dt_lock);
  454         if ((pldt = mdp->md_ldt)) { 
  455                 if (len > pldt->ldt_len) 
  456                         len = pldt->ldt_len; 
  457                 bcopy(pldt->ldt_base, new_ldt->ldt_base, 
  458                     len * sizeof(union descriptor)); 
  459         } else { 
  460                 bcopy(ldt, new_ldt->ldt_base, PAGE_SIZE); 
  461         } 
  462         mtx_unlock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  463         pmap_map_readonly(kernel_pmap, (vm_offset_t)new_ldt->ldt_base, 
  464                           new_ldt->ldt_len*sizeof(union descriptor)); 
  465         mtx_lock_spin(&dt_lock);  /* XXX kill once pmap locking fixed. */
  466         return (new_ldt);
  467 } 
  468 #else
  469 /*
  470  * dt_lock must be held. Returns with dt_lock held.
  471  */
  472 struct proc_ldt *
  473 user_ldt_alloc(struct mdproc *mdp, int len)
  474 {
  475         struct proc_ldt *pldt, *new_ldt;
  476 
  477         mtx_assert(&dt_lock, MA_OWNED);
  478         mtx_unlock_spin(&dt_lock);
  479         new_ldt = malloc(sizeof(struct proc_ldt),
  480                 M_SUBPROC, M_WAITOK);
  481 
  482         new_ldt->ldt_len = len = NEW_MAX_LD(len);
  483         new_ldt->ldt_base = (caddr_t)kmem_alloc(kernel_map,
  484                 len * sizeof(union descriptor));
  485         if (new_ldt->ldt_base == NULL) {
  486                 free(new_ldt, M_SUBPROC);
  487                 mtx_lock_spin(&dt_lock);
  488                 return (NULL);
  489         }
  490         new_ldt->ldt_refcnt = 1;
  491         new_ldt->ldt_active = 0;
  492 
  493         mtx_lock_spin(&dt_lock);
  494         gdt_segs[GUSERLDT_SEL].ssd_base = (unsigned)new_ldt->ldt_base;
  495         gdt_segs[GUSERLDT_SEL].ssd_limit = len * sizeof(union descriptor) - 1;
  496         ssdtosd(&gdt_segs[GUSERLDT_SEL], &new_ldt->ldt_sd);
  497 
  498         if ((pldt = mdp->md_ldt) != NULL) {
  499                 if (len > pldt->ldt_len)
  500                         len = pldt->ldt_len;
  501                 bcopy(pldt->ldt_base, new_ldt->ldt_base,
  502                     len * sizeof(union descriptor));
  503         } else
  504                 bcopy(ldt, new_ldt->ldt_base, sizeof(ldt));
  505         
  506         return (new_ldt);
  507 }
  508 #endif /* !XEN */
  509 
  510 /*
  511  * Must be called with dt_lock held.  Returns with dt_lock unheld.
  512  */
  513 void
  514 user_ldt_free(struct thread *td)
  515 {
  516         struct mdproc *mdp = &td->td_proc->p_md;
  517         struct proc_ldt *pldt;
  518 
  519         mtx_assert(&dt_lock, MA_OWNED);
  520         if ((pldt = mdp->md_ldt) == NULL) {
  521                 mtx_unlock_spin(&dt_lock);
  522                 return;
  523         }
  524 
  525         if (td == curthread) {
  526 #ifdef XEN
  527                 i386_reset_ldt(&default_proc_ldt);
  528                 PCPU_SET(currentldt, (int)&default_proc_ldt);
  529 #else
  530                 lldt(_default_ldt);
  531                 PCPU_SET(currentldt, _default_ldt);
  532 #endif
  533         }
  534 
  535         mdp->md_ldt = NULL;
  536         user_ldt_deref(pldt);
  537 }
  538 
  539 void
  540 user_ldt_deref(struct proc_ldt *pldt)
  541 {
  542 
  543         mtx_assert(&dt_lock, MA_OWNED);
  544         if (--pldt->ldt_refcnt == 0) {
  545                 mtx_unlock_spin(&dt_lock);
  546                 kmem_free(kernel_map, (vm_offset_t)pldt->ldt_base,
  547                         pldt->ldt_len * sizeof(union descriptor));
  548                 free(pldt, M_SUBPROC);
  549         } else
  550                 mtx_unlock_spin(&dt_lock);
  551 }
  552 
  553 /*
  554  * Note for the authors of compat layers (linux, etc): copyout() in
  555  * the function below is not a problem since it presents data in
  556  * arch-specific format (i.e. i386-specific in this case), not in
  557  * the OS-specific one.
  558  */
  559 int
  560 i386_get_ldt(td, uap)
  561         struct thread *td;
  562         struct i386_ldt_args *uap;
  563 {
  564         int error = 0;
  565         struct proc_ldt *pldt;
  566         int nldt, num;
  567         union descriptor *lp;
  568 
  569 #ifdef  DEBUG
  570         printf("i386_get_ldt: start=%d num=%d descs=%p\n",
  571             uap->start, uap->num, (void *)uap->descs);
  572 #endif
  573 
  574         mtx_lock_spin(&dt_lock);
  575         if ((pldt = td->td_proc->p_md.md_ldt) != NULL) {
  576                 nldt = pldt->ldt_len;
  577                 lp = &((union descriptor *)(pldt->ldt_base))[uap->start];
  578                 mtx_unlock_spin(&dt_lock);
  579                 num = min(uap->num, nldt);
  580         } else {
  581                 mtx_unlock_spin(&dt_lock);
  582                 nldt = sizeof(ldt)/sizeof(ldt[0]);
  583                 num = min(uap->num, nldt);
  584                 lp = &ldt[uap->start];
  585         }
  586 
  587         if ((uap->start > (unsigned int)nldt) ||
  588             ((unsigned int)num > (unsigned int)nldt) ||
  589             ((unsigned int)(uap->start + num) > (unsigned int)nldt))
  590                 return(EINVAL);
  591 
  592         error = copyout(lp, uap->descs, num * sizeof(union descriptor));
  593         if (!error)
  594                 td->td_retval[0] = num;
  595 
  596         return(error);
  597 }
  598 
  599 int
  600 i386_set_ldt(td, uap, descs)
  601         struct thread *td;
  602         struct i386_ldt_args *uap;
  603         union descriptor *descs;
  604 {
  605         int error = 0, i;
  606         int largest_ld;
  607         struct mdproc *mdp = &td->td_proc->p_md;
  608         struct proc_ldt *pldt;
  609         union descriptor *dp;
  610 
  611 #ifdef  DEBUG
  612         printf("i386_set_ldt: start=%d num=%d descs=%p\n",
  613             uap->start, uap->num, (void *)uap->descs);
  614 #endif
  615 
  616         if (descs == NULL) {
  617                 /* Free descriptors */
  618                 if (uap->start == 0 && uap->num == 0) {
  619                         /*
  620                          * Treat this as a special case, so userland needn't
  621                          * know magic number NLDT.
  622                          */
  623                         uap->start = NLDT;
  624                         uap->num = MAX_LD - NLDT;
  625                 }
  626                 if (uap->num <= 0)
  627                         return (EINVAL);
  628                 mtx_lock_spin(&dt_lock);
  629                 if ((pldt = mdp->md_ldt) == NULL ||
  630                     uap->start >= pldt->ldt_len) {
  631                         mtx_unlock_spin(&dt_lock);
  632                         return (0);
  633                 }
  634                 largest_ld = uap->start + uap->num;
  635                 if (largest_ld > pldt->ldt_len)
  636                         largest_ld = pldt->ldt_len;
  637                 i = largest_ld - uap->start;
  638                 bzero(&((union descriptor *)(pldt->ldt_base))[uap->start],
  639                     sizeof(union descriptor) * i);
  640                 mtx_unlock_spin(&dt_lock);
  641                 return (0);
  642         }
  643 
  644         if (!(uap->start == LDT_AUTO_ALLOC && uap->num == 1)) {
  645                 /* verify range of descriptors to modify */
  646                 largest_ld = uap->start + uap->num;
  647                 if (uap->start >= MAX_LD ||
  648                     uap->num < 0 || largest_ld > MAX_LD) {
  649                         return (EINVAL);
  650                 }
  651         }
  652 
  653         /* Check descriptors for access violations */
  654         for (i = 0; i < uap->num; i++) {
  655                 dp = &descs[i];
  656 
  657                 switch (dp->sd.sd_type) {
  658                 case SDT_SYSNULL:       /* system null */ 
  659                         dp->sd.sd_p = 0;
  660                         break;
  661                 case SDT_SYS286TSS: /* system 286 TSS available */
  662                 case SDT_SYSLDT:    /* system local descriptor table */
  663                 case SDT_SYS286BSY: /* system 286 TSS busy */
  664                 case SDT_SYSTASKGT: /* system task gate */
  665                 case SDT_SYS286IGT: /* system 286 interrupt gate */
  666                 case SDT_SYS286TGT: /* system 286 trap gate */
  667                 case SDT_SYSNULL2:  /* undefined by Intel */ 
  668                 case SDT_SYS386TSS: /* system 386 TSS available */
  669                 case SDT_SYSNULL3:  /* undefined by Intel */
  670                 case SDT_SYS386BSY: /* system 386 TSS busy */
  671                 case SDT_SYSNULL4:  /* undefined by Intel */ 
  672                 case SDT_SYS386IGT: /* system 386 interrupt gate */
  673                 case SDT_SYS386TGT: /* system 386 trap gate */
  674                 case SDT_SYS286CGT: /* system 286 call gate */ 
  675                 case SDT_SYS386CGT: /* system 386 call gate */
  676                         /* I can't think of any reason to allow a user proc
  677                          * to create a segment of these types.  They are
  678                          * for OS use only.
  679                          */
  680                         return (EACCES);
  681                         /*NOTREACHED*/
  682 
  683                 /* memory segment types */
  684                 case SDT_MEMEC:   /* memory execute only conforming */
  685                 case SDT_MEMEAC:  /* memory execute only accessed conforming */
  686                 case SDT_MEMERC:  /* memory execute read conforming */
  687                 case SDT_MEMERAC: /* memory execute read accessed conforming */
  688                          /* Must be "present" if executable and conforming. */
  689                         if (dp->sd.sd_p == 0)
  690                                 return (EACCES);
  691                         break;
  692                 case SDT_MEMRO:   /* memory read only */
  693                 case SDT_MEMROA:  /* memory read only accessed */
  694                 case SDT_MEMRW:   /* memory read write */
  695                 case SDT_MEMRWA:  /* memory read write accessed */
  696                 case SDT_MEMROD:  /* memory read only expand dwn limit */
  697                 case SDT_MEMRODA: /* memory read only expand dwn lim accessed */
  698                 case SDT_MEMRWD:  /* memory read write expand dwn limit */  
  699                 case SDT_MEMRWDA: /* memory read write expand dwn lim acessed */
  700                 case SDT_MEME:    /* memory execute only */ 
  701                 case SDT_MEMEA:   /* memory execute only accessed */
  702                 case SDT_MEMER:   /* memory execute read */
  703                 case SDT_MEMERA:  /* memory execute read accessed */
  704                         break;
  705                 default:
  706                         return(EINVAL);
  707                         /*NOTREACHED*/
  708                 }
  709 
  710                 /* Only user (ring-3) descriptors may be present. */
  711                 if ((dp->sd.sd_p != 0) && (dp->sd.sd_dpl != SEL_UPL))
  712                         return (EACCES);
  713         }
  714 
  715         if (uap->start == LDT_AUTO_ALLOC && uap->num == 1) {
  716                 /* Allocate a free slot */
  717                 mtx_lock_spin(&dt_lock);
  718                 if ((pldt = mdp->md_ldt) == NULL) {
  719                         if ((error = i386_ldt_grow(td, NLDT + 1))) {
  720                                 mtx_unlock_spin(&dt_lock);
  721                                 return (error);
  722                         }
  723                         pldt = mdp->md_ldt;
  724                 }
  725 again:
  726                 /*
  727                  * start scanning a bit up to leave room for NVidia and
  728                  * Wine, which still user the "Blat" method of allocation.
  729                  */
  730                 dp = &((union descriptor *)(pldt->ldt_base))[NLDT];
  731                 for (i = NLDT; i < pldt->ldt_len; ++i) {
  732                         if (dp->sd.sd_type == SDT_SYSNULL)
  733                                 break;
  734                         dp++;
  735                 }
  736                 if (i >= pldt->ldt_len) {
  737                         if ((error = i386_ldt_grow(td, pldt->ldt_len+1))) {
  738                                 mtx_unlock_spin(&dt_lock);
  739                                 return (error);
  740                         }
  741                         goto again;
  742                 }
  743                 uap->start = i;
  744                 error = i386_set_ldt_data(td, i, 1, descs);
  745                 mtx_unlock_spin(&dt_lock);
  746         } else {
  747                 largest_ld = uap->start + uap->num;
  748                 mtx_lock_spin(&dt_lock);
  749                 if (!(error = i386_ldt_grow(td, largest_ld))) {
  750                         error = i386_set_ldt_data(td, uap->start, uap->num,
  751                             descs);
  752                 }
  753                 mtx_unlock_spin(&dt_lock);
  754         }
  755         if (error == 0)
  756                 td->td_retval[0] = uap->start;
  757         return (error);
  758 }
  759 #ifdef XEN
  760 static int
  761 i386_set_ldt_data(struct thread *td, int start, int num,
  762         union descriptor *descs)
  763 {
  764         struct mdproc *mdp = &td->td_proc->p_md;
  765         struct proc_ldt *pldt = mdp->md_ldt;
  766 
  767         mtx_assert(&dt_lock, MA_OWNED);
  768 
  769         while (num) {
  770                 xen_update_descriptor(
  771                     &((union descriptor *)(pldt->ldt_base))[start],
  772                     descs);
  773                 num--;
  774                 start++;
  775                 descs++;
  776         }
  777         return (0);
  778 }
  779 #else
  780 static int
  781 i386_set_ldt_data(struct thread *td, int start, int num,
  782         union descriptor *descs)
  783 {
  784         struct mdproc *mdp = &td->td_proc->p_md;
  785         struct proc_ldt *pldt = mdp->md_ldt;
  786 
  787         mtx_assert(&dt_lock, MA_OWNED);
  788 
  789         /* Fill in range */
  790         bcopy(descs,
  791             &((union descriptor *)(pldt->ldt_base))[start],
  792             num * sizeof(union descriptor));
  793         return (0);
  794 }
  795 #endif /* !XEN */
  796 
  797 static int
  798 i386_ldt_grow(struct thread *td, int len) 
  799 {
  800         struct mdproc *mdp = &td->td_proc->p_md;
  801         struct proc_ldt *new_ldt, *pldt;
  802         caddr_t old_ldt_base = NULL_LDT_BASE;
  803         int old_ldt_len = 0;
  804 
  805         mtx_assert(&dt_lock, MA_OWNED);
  806 
  807         if (len > MAX_LD)
  808                 return (ENOMEM);
  809         if (len < NLDT + 1)
  810                 len = NLDT + 1;
  811 
  812         /* Allocate a user ldt. */
  813         if ((pldt = mdp->md_ldt) == NULL || len > pldt->ldt_len) {
  814                 new_ldt = user_ldt_alloc(mdp, len);
  815                 if (new_ldt == NULL)
  816                         return (ENOMEM);
  817                 pldt = mdp->md_ldt;
  818 
  819                 if (pldt != NULL) {
  820                         if (new_ldt->ldt_len <= pldt->ldt_len) {
  821                                 /*
  822                                  * We just lost the race for allocation, so
  823                                  * free the new object and return.
  824                                  */
  825                                 mtx_unlock_spin(&dt_lock);
  826                                 kmem_free(kernel_map,
  827                                    (vm_offset_t)new_ldt->ldt_base,
  828                                    new_ldt->ldt_len * sizeof(union descriptor));
  829                                 free(new_ldt, M_SUBPROC);
  830                                 mtx_lock_spin(&dt_lock);
  831                                 return (0);
  832                         }
  833 
  834                         /*
  835                          * We have to substitute the current LDT entry for
  836                          * curproc with the new one since its size grew.
  837                          */
  838                         old_ldt_base = pldt->ldt_base;
  839                         old_ldt_len = pldt->ldt_len;
  840                         pldt->ldt_sd = new_ldt->ldt_sd;
  841                         pldt->ldt_base = new_ldt->ldt_base;
  842                         pldt->ldt_len = new_ldt->ldt_len;
  843                 } else
  844                         mdp->md_ldt = pldt = new_ldt;
  845 #ifdef SMP
  846                 /*
  847                  * Signal other cpus to reload ldt.  We need to unlock dt_lock
  848                  * here because other CPU will contest on it since their
  849                  * curthreads won't hold the lock and will block when trying
  850                  * to acquire it.
  851                  */
  852                 mtx_unlock_spin(&dt_lock);
  853                 smp_rendezvous(NULL, (void (*)(void *))set_user_ldt_rv,
  854                     NULL, td->td_proc->p_vmspace);
  855 #else
  856                 set_user_ldt(&td->td_proc->p_md);
  857                 mtx_unlock_spin(&dt_lock);
  858 #endif
  859                 if (old_ldt_base != NULL_LDT_BASE) {
  860                         kmem_free(kernel_map, (vm_offset_t)old_ldt_base,
  861                             old_ldt_len * sizeof(union descriptor));
  862                         free(new_ldt, M_SUBPROC);
  863                 }
  864                 mtx_lock_spin(&dt_lock);
  865         }
  866         return (0);
  867 }

Cache object: d8411dfe6a9342efee223043e02995ea


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.