The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/bsd_i386.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 #ifdef  MACH_BSD
   26 #include <cpus.h>
   27 #include <mach_rt.h>
   28 #include <mach_debug.h>
   29 #include <mach_ldebug.h>
   30 
   31 #include <mach/kern_return.h>
   32 #include <mach/thread_status.h>
   33 #include <mach/vm_param.h>
   34 
   35 #include <kern/counters.h>
   36 #include <kern/cpu_data.h>
   37 #include <kern/mach_param.h>
   38 #include <kern/task.h>
   39 #include <kern/thread.h>
   40 #include <kern/thread_swap.h>
   41 #include <kern/sched_prim.h>
   42 #include <kern/misc_protos.h>
   43 #include <kern/assert.h>
   44 #include <kern/spl.h>
   45 #include <kern/syscall_sw.h>
   46 #include <ipc/ipc_port.h>
   47 #include <vm/vm_kern.h>
   48 #include <vm/pmap.h>
   49 
   50 #include <i386/thread.h>
   51 #include <i386/eflags.h>
   52 #include <i386/proc_reg.h>
   53 #include <i386/seg.h>
   54 #include <i386/tss.h>
   55 #include <i386/user_ldt.h>
   56 #include <i386/fpu.h>
   57 #include <i386/iopb_entries.h>
   58 #include <i386/machdep_call.h>
   59 
   60 #include <sys/syscall.h>
   61 #include <sys/ktrace.h>
   62 struct proc;
   63 
   64 kern_return_t
   65 thread_userstack(
   66     thread_t,
   67     int,
   68     thread_state_t,
   69     unsigned int,
   70     vm_offset_t *,
   71         int *
   72 );
   73 
   74 kern_return_t
   75 thread_entrypoint(
   76     thread_t,
   77     int,
   78     thread_state_t,
   79     unsigned int,
   80     vm_offset_t *
   81 ); 
   82 
   83 struct i386_saved_state *
   84 get_user_regs(
   85         thread_act_t);
   86 
   87 unsigned int get_msr_exportmask(void);
   88 
   89 unsigned int get_msr_nbits(void);
   90 
   91 unsigned int get_msr_rbits(void);
   92 
   93 kern_return_t
   94 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
   95 
   96 /*
   97  * thread_userstack:
   98  *
   99  * Return the user stack pointer from the machine
  100  * dependent thread state info.
  101  */
  102 kern_return_t
  103 thread_userstack(
  104     thread_t            thread,
  105     int                 flavor,
  106     thread_state_t      tstate,
  107     unsigned int        count,
  108     vm_offset_t         *user_stack,
  109         int                                     *customstack
  110 )
  111 {
  112         struct i386_saved_state *state;
  113         i386_thread_state_t *state25;
  114         vm_offset_t     uesp;
  115 
  116         if (customstack)
  117                         *customstack = 0;
  118 
  119         switch (flavor) {
  120         case i386_THREAD_STATE: /* FIXME */
  121                 state25 = (i386_thread_state_t *) tstate;
  122                 if (state25->esp)
  123                         *user_stack = state25->esp;
  124                 if (customstack && state25->esp)
  125                         *customstack = 1;
  126                 else
  127                         *customstack = 0;
  128                 break;
  129 
  130         case i386_NEW_THREAD_STATE:
  131                 if (count < i386_NEW_THREAD_STATE_COUNT)
  132                         return (KERN_INVALID_ARGUMENT);
  133                 else {
  134                         state = (struct i386_saved_state *) tstate;
  135                         uesp = state->uesp;
  136                 }
  137 
  138                 /* If a valid user stack is specified, use it. */
  139                 if (uesp)
  140                         *user_stack = uesp;
  141                 if (customstack && uesp)
  142                         *customstack = 1;
  143                 else
  144                         *customstack = 0;
  145                 break;
  146         default :
  147                 return (KERN_INVALID_ARGUMENT);
  148         }
  149                 
  150         return (KERN_SUCCESS);
  151 }    
  152 
  153 kern_return_t
  154 thread_entrypoint(
  155     thread_t            thread,
  156     int                 flavor,
  157     thread_state_t      tstate,
  158     unsigned int        count,
  159     vm_offset_t         *entry_point
  160 )
  161 { 
  162     struct i386_saved_state     *state;
  163     i386_thread_state_t *state25;
  164 
  165     /*
  166      * Set a default.
  167      */
  168     if (*entry_point == 0)
  169         *entry_point = VM_MIN_ADDRESS;
  170                 
  171     switch (flavor) {
  172     case i386_THREAD_STATE:
  173         state25 = (i386_thread_state_t *) tstate;
  174         *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
  175         break;
  176 
  177     case i386_NEW_THREAD_STATE:
  178         if (count < i386_THREAD_STATE_COUNT)
  179             return (KERN_INVALID_ARGUMENT);
  180         else {
  181                 state = (struct i386_saved_state *) tstate;
  182 
  183                 /*
  184                 * If a valid entry point is specified, use it.
  185                 */
  186                 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
  187         }
  188         break;
  189     }
  190 
  191     return (KERN_SUCCESS);
  192 }   
  193 
  194 struct i386_saved_state *
  195 get_user_regs(thread_act_t th)
  196 {
  197         if (th->mact.pcb)
  198                 return(USER_REGS(th));
  199         else {
  200                 printf("[get_user_regs: thread does not have pcb]");
  201                 return NULL;
  202         }
  203 }
  204 
  205 /*
  206  * Duplicate parent state in child
  207  * for U**X fork.
  208  */
  209 kern_return_t
  210 machine_thread_dup(
  211     thread_act_t                parent,
  212     thread_act_t                child
  213 )
  214 {
  215         struct i386_saved_state *parent_state, *child_state;
  216         struct i386_machine_state       *ims;
  217         struct i386_float_state         floatregs;
  218 
  219 #ifdef  XXX
  220         /* Save the FPU state */
  221         if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) {
  222                 fp_state_save(parent);
  223         }
  224 #endif
  225 
  226         if (child->mact.pcb == NULL || parent->mact.pcb == NULL)
  227                 return (KERN_FAILURE);
  228 
  229         /* Copy over the i386_saved_state registers */
  230         child->mact.pcb->iss = parent->mact.pcb->iss;
  231 
  232         /* Check to see if parent is using floating point
  233          * and if so, copy the registers to the child
  234          * FIXME - make sure this works.
  235          */
  236 
  237         if (parent->mact.pcb->ims.ifps)  {
  238                 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
  239                         fpu_set_state(child, &floatregs);
  240         }
  241         
  242         /* FIXME - should a user specified LDT, TSS and V86 info
  243          * be duplicated as well?? - probably not.
  244          */
  245 
  246         return (KERN_SUCCESS);
  247 }
  248 
  249 /* 
  250  * FIXME - thread_set_child
  251  */
  252 
  253 void thread_set_child(thread_act_t child, int pid);
  254 void
  255 thread_set_child(thread_act_t child, int pid)
  256 {
  257         child->mact.pcb->iss.eax = pid;
  258         child->mact.pcb->iss.edx = 1;
  259         child->mact.pcb->iss.efl &= ~EFL_CF;
  260 }
  261 void thread_set_parent(thread_act_t parent, int pid);
  262 void
  263 thread_set_parent(thread_act_t parent, int pid)
  264 {
  265         parent->mact.pcb->iss.eax = pid;
  266         parent->mact.pcb->iss.edx = 0;
  267         parent->mact.pcb->iss.efl &= ~EFL_CF;
  268 }
  269 
  270 
  271 
  272 /*
  273  * Move pages from one kernel virtual address to another.
  274  * Both addresses are assumed to reside in the Sysmap,
  275  * and size must be a multiple of the page size.
  276  */
  277 void
  278 pagemove(
  279         register caddr_t from,
  280         register caddr_t to,
  281         int size)
  282 {
  283         pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size);
  284 }
  285 
  286 /*
  287  * System Call handling code
  288  */
  289 
  290 #define ERESTART        -1              /* restart syscall */
  291 #define EJUSTRETURN     -2              /* don't modify regs, just return */
  292 
  293 struct sysent {         /* system call table */
  294         unsigned short          sy_narg;                /* number of args */
  295         char                    sy_parallel;    /* can execute in parallel */
  296         char                    sy_funnel;      /* funnel type */
  297         unsigned long           (*sy_call)(void *, void *, int *);      /* implementing function */
  298 };
  299 
  300 #define NO_FUNNEL 0
  301 #define KERNEL_FUNNEL 1
  302 #define NETWORK_FUNNEL 2
  303 
  304 extern funnel_t * kernel_flock;
  305 extern funnel_t * network_flock;
  306 
  307 extern struct sysent sysent[];
  308 
  309 int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
  310 
  311 void * get_bsduthreadarg(thread_act_t);
  312 
  313 void unix_syscall(struct i386_saved_state *);
  314 
  315 void
  316 unix_syscall_return(int error)
  317 {
  318     thread_act_t                thread;
  319         volatile int *rval;
  320         struct i386_saved_state *regs;
  321         struct proc *p;
  322         struct proc *current_proc();
  323         unsigned short code;
  324         vm_offset_t params;
  325         struct sysent *callp;
  326         extern int nsysent;
  327 
  328     thread = current_act();
  329     rval = (int *)get_bsduthreadrval(thread);
  330         p = current_proc();
  331 
  332         regs = USER_REGS(thread);
  333 
  334         /* reconstruct code for tracing before blasting eax */
  335         code = regs->eax;
  336         params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
  337         callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
  338         if (callp == sysent) {
  339           code = fuword(params);
  340         }
  341 
  342         if (error == ERESTART) {
  343                 regs->eip -= 7;
  344         }
  345         else if (error != EJUSTRETURN) {
  346                 if (error) {
  347                     regs->eax = error;
  348                     regs->efl |= EFL_CF;        /* carry bit */
  349                 } else { /* (not error) */
  350                     regs->eax = rval[0];
  351                     regs->edx = rval[1];
  352                     regs->efl &= ~EFL_CF;
  353                 } 
  354         }
  355 
  356         ktrsysret(p, code, error, rval[0], callp->sy_funnel);
  357 
  358         KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
  359                 error, rval[0], rval[1], 0, 0);
  360 
  361         if (callp->sy_funnel != NO_FUNNEL)
  362                 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
  363 
  364     thread_exception_return();
  365     /* NOTREACHED */
  366 }
  367 
  368 
  369 void
  370 unix_syscall(struct i386_saved_state *regs)
  371 {
  372     thread_act_t                thread;
  373     void        *vt; 
  374     unsigned short      code;
  375     struct sysent               *callp;
  376         int     nargs, error;
  377         volatile int *rval;
  378         int funnel_type;
  379     vm_offset_t         params;
  380     extern int nsysent;
  381         struct proc *p;
  382         struct proc *current_proc();
  383 
  384     thread = current_act();
  385     p = current_proc();
  386     rval = (int *)get_bsduthreadrval(thread);
  387 
  388     //printf("[scall : eax %x]",  regs->eax);
  389     code = regs->eax;
  390     params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
  391     callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
  392     if (callp == sysent) {
  393         code = fuword(params);
  394         params += sizeof (int);
  395         callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
  396     }
  397     
  398     vt = get_bsduthreadarg(thread);
  399 
  400     if ((nargs = (callp->sy_narg * sizeof (int))) &&
  401             (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
  402         regs->eax = error;
  403         regs->efl |= EFL_CF;
  404         thread_exception_return();
  405         /* NOTREACHED */
  406     }
  407     
  408     rval[0] = 0;
  409     rval[1] = regs->edx;
  410 
  411         funnel_type = callp->sy_funnel;
  412         if(funnel_type == KERNEL_FUNNEL)
  413                 (void) thread_funnel_set(kernel_flock, TRUE);
  414         else if (funnel_type == NETWORK_FUNNEL)
  415                 (void) thread_funnel_set(network_flock, TRUE);
  416         
  417    set_bsduthreadargs(thread, regs, NULL);
  418 
  419     if (callp->sy_narg > 8)
  420         panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
  421 
  422         ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
  423 
  424         { 
  425           int *ip = (int *)vt;
  426           KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
  427               *ip, *(ip+1), *(ip+2), *(ip+3), 0);
  428         }
  429 
  430     error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]);
  431         
  432 #if 0
  433         /* May be needed with vfork changes */
  434         regs = USER_REGS(thread);
  435 #endif
  436         if (error == ERESTART) {
  437                 regs->eip -= 7;
  438         }
  439         else if (error != EJUSTRETURN) {
  440                 if (error) {
  441                     regs->eax = error;
  442                     regs->efl |= EFL_CF;        /* carry bit */
  443                 } else { /* (not error) */
  444                     regs->eax = rval[0];
  445                     regs->edx = rval[1];
  446                     regs->efl &= ~EFL_CF;
  447                 } 
  448         }
  449 
  450         ktrsysret(p, code, error, rval[0], funnel_type);
  451 
  452         KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
  453                 error, rval[0], rval[1], 0, 0);
  454 
  455         if(funnel_type != NO_FUNNEL)
  456                 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
  457 
  458     thread_exception_return();
  459     /* NOTREACHED */
  460 }
  461 
  462 
  463 void
  464 machdep_syscall( struct i386_saved_state *regs)
  465 {
  466     int                         trapno, nargs;
  467     machdep_call_t              *entry;
  468     thread_t                    thread;
  469         struct proc *p;
  470         struct proc *current_proc();
  471     
  472     trapno = regs->eax;
  473     if (trapno < 0 || trapno >= machdep_call_count) {
  474         regs->eax = (unsigned int)kern_invalid();
  475 
  476         thread_exception_return();
  477         /* NOTREACHED */
  478     }
  479     
  480     entry = &machdep_call_table[trapno];
  481     nargs = entry->nargs;
  482 
  483     if (nargs > 0) {
  484         int                     args[nargs];
  485 
  486         if (copyin((char *) regs->uesp + sizeof (int),
  487                     (char *) args,
  488                     nargs * sizeof (int))) {
  489 
  490             regs->eax = KERN_INVALID_ADDRESS;
  491 
  492             thread_exception_return();
  493             /* NOTREACHED */
  494         }
  495 
  496         switch (nargs) {
  497             case 1:
  498                 regs->eax = (*entry->routine)(args[0]);
  499                 break;
  500             case 2:
  501                 regs->eax = (*entry->routine)(args[0],args[1]);
  502                 break;
  503             case 3:
  504                 regs->eax = (*entry->routine)(args[0],args[1],args[2]);
  505                 break;
  506             case 4:
  507                 regs->eax = (*entry->routine)(args[0],args[1],args[2],args[3]);
  508                 break;
  509             default:
  510                 panic("machdep_syscall(): too many args");
  511         }
  512     }
  513     else
  514         regs->eax = (*entry->routine)();
  515 
  516     if (current_thread()->funnel_lock)
  517         (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
  518 
  519     thread_exception_return();
  520     /* NOTREACHED */
  521 }
  522 
  523 
  524 kern_return_t
  525 thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
  526 {
  527   struct real_descriptor desc;
  528   extern struct fake_descriptor *mp_ldt[];
  529   struct real_descriptor  *ldtp;
  530   int mycpu = cpu_number();
  531 
  532   ldtp = (struct real_descriptor *)mp_ldt[mycpu];
  533   desc.limit_low = 1;
  534   desc.limit_high = 0;
  535   desc.base_low = addr & 0xffff;
  536   desc.base_med = (addr >> 16) & 0xff;
  537   desc.base_high = (addr >> 24) & 0xff;
  538   desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
  539   desc.granularity = SZ_32|SZ_G;
  540   pcb->cthread_desc = desc;
  541   ldtp[sel_idx(USER_CTHREAD)] = desc;
  542   return(KERN_SUCCESS);
  543 }
  544 
  545 kern_return_t
  546 thread_set_cthread_self(int self)
  547 {
  548    current_act()->mact.pcb->cthread_self = (unsigned int)self;
  549    
  550    return (KERN_SUCCESS);
  551 }
  552 
  553 kern_return_t
  554 thread_get_cthread_self(void)
  555 {
  556     return ((kern_return_t)current_act()->mact.pcb->cthread_self);
  557 }
  558 
  559 kern_return_t
  560 thread_fast_set_cthread_self(int self)
  561 {
  562   pcb_t pcb;
  563   pcb = (pcb_t)current_act()->mact.pcb;
  564   thread_compose_cthread_desc((unsigned int)self, pcb);
  565   pcb->cthread_self = (unsigned int)self; /* preserve old func too */
  566   return (USER_CTHREAD);
  567 }
  568 
  569 void
  570 mach25_syscall(struct i386_saved_state *regs)
  571 {
  572         printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
  573                         regs->eip, regs->eax, -regs->eax);
  574         panic("FIXME!");
  575 }
  576 #endif  /* MACH_BSD */
  577 
  578 
  579 /* This routine is called from assembly before each and every mach trap.
  580  */
  581 
  582 extern unsigned int mach_call_start(unsigned int, unsigned int *);
  583 
  584 __private_extern__
  585 unsigned int
  586 mach_call_start(unsigned int call_number, unsigned int *args)
  587 {
  588         int i, argc;
  589         unsigned int kdarg[3];
  590 
  591 /* Always prepare to trace mach system calls */
  592 
  593         kdarg[0]=0;
  594         kdarg[1]=0;
  595         kdarg[2]=0;
  596 
  597         argc = mach_trap_table[call_number>>4].mach_trap_arg_count;
  598         
  599         if (argc > 3)
  600                 argc = 3;
  601         
  602         for (i=0; i < argc; i++)
  603           kdarg[i] = (int)*(args + i);
  604         
  605         KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START,
  606                               kdarg[0], kdarg[1], kdarg[2], 0, 0);
  607 
  608         return call_number; /* pass this back thru */
  609 }
  610 
  611 /* This routine is called from assembly after each mach system call
  612  */
  613 
  614 extern unsigned int mach_call_end(unsigned int, unsigned int);
  615 
  616 __private_extern__
  617 unsigned int
  618 mach_call_end(unsigned int call_number, unsigned int retval)
  619 {
  620   KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END,
  621                 retval, 0, 0, 0, 0);
  622         return retval;  /* pass this back thru */
  623 }
  624 

Cache object: e21ca2b67e61d0f4f8e6cd971f3d476b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.