The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/osfmk/i386/locore.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
    3  *
    4  * @APPLE_LICENSE_HEADER_START@
    5  * 
    6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
    7  * 
    8  * This file contains Original Code and/or Modifications of Original Code
    9  * as defined in and that are subject to the Apple Public Source License
   10  * Version 2.0 (the 'License'). You may not use this file except in
   11  * compliance with the License. Please obtain a copy of the License at
   12  * http://www.opensource.apple.com/apsl/ and read it before using this
   13  * file.
   14  * 
   15  * The Original Code and all software distributed under the License are
   16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
   17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
   18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
   19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
   20  * Please see the License for the specific language governing rights and
   21  * limitations under the License.
   22  * 
   23  * @APPLE_LICENSE_HEADER_END@
   24  */
   25 /*
   26  * @OSF_COPYRIGHT@
   27  */
   28 /* 
   29  * Mach Operating System
   30  * Copyright (c) 1991,1990 Carnegie Mellon University
   31  * All Rights Reserved.
   32  * 
   33  * Permission to use, copy, modify and distribute this software and its
   34  * documentation is hereby granted, provided that both the copyright
   35  * notice and this permission notice appear in all copies of the
   36  * software, derivative works or modified versions, and any portions
   37  * thereof, and that both notices appear in supporting documentation.
   38  * 
   39  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
   40  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
   41  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   42  * 
   43  * Carnegie Mellon requests users of this software to return to
   44  * 
   45  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   46  *  School of Computer Science
   47  *  Carnegie Mellon University
   48  *  Pittsburgh PA 15213-3890
   49  * 
   50  * any improvements or extensions that they make and grant Carnegie Mellon
   51  * the rights to redistribute these changes.
   52  */
   53 
   54 #include <cpus.h>
   55 #include <etap.h>
   56 #include <etap_event_monitor.h>
   57 #include <mach_rt.h>
   58 #include <platforms.h>
   59 #include <mach_kdb.h>
   60 #include <mach_kgdb.h>
   61 #include <mach_kdp.h>
   62 #include <stat_time.h>
   63 #include <mach_assert.h>
   64 
   65 #include <sys/errno.h>
   66 #include <i386/asm.h>
   67 #include <i386/cpuid.h>
   68 #include <i386/eflags.h>
   69 #include <i386/proc_reg.h>
   70 #include <i386/trap.h>
   71 #include <assym.s>
   72 #include <mach/exception_types.h>
   73 
   74 #include <i386/mp.h>
   75 
   76 #define PREEMPT_DEBUG_LOG 0
   77 
   78 #if __MACHO__
   79 /* Under Mach-O, etext is a variable which contains
   80  * the last text address
   81  */
   82 #define ETEXT_ADDR      (EXT(etext))
   83 #else
   84 /* Under ELF and other non-Mach-O formats, the address of
   85  * etext represents the last text address
   86  */
   87 #define ETEXT_ADDR      $ EXT(etext)
   88 #endif
   89 
   90 #if     NCPUS > 1
   91 
   92 #define CX(addr,reg)    addr(,reg,4)
   93 
   94 #else
   95 #define CPU_NUMBER(reg)
   96 #define CX(addr,reg)    addr
   97 
   98 #endif  /* NCPUS > 1 */
   99 
  100         .text
  101 locore_start:
  102 
  103 /*
  104  * Fault recovery.
  105  */
  106 
  107 #ifdef  __MACHO__
  108 #define RECOVERY_SECTION        .section        __VECTORS, __recover 
  109 #define RETRY_SECTION   .section        __VECTORS, __retries
  110 #else
  111 #define RECOVERY_SECTION        .text
  112 #define RECOVERY_SECTION        .text
  113 #endif
  114 
  115 #define RECOVER_TABLE_START     \
  116         .align 2                ; \
  117         .globl  EXT(recover_table) ;\
  118 LEXT(recover_table)             ;\
  119         .text
  120 
  121 #define RECOVER(addr)           \
  122         .align  2;              \
  123         .long   9f              ;\
  124         .long   addr            ;\
  125         .text                   ;\
  126 9:
  127 
  128 #define RECOVER_TABLE_END               \
  129         .align  2                       ;\
  130         .globl  EXT(recover_table_end)  ;\
  131 LEXT(recover_table_end)                 ;\
  132         .text
  133 
  134 /*
  135  * Retry table for certain successful faults.
  136  */
  137 #define RETRY_TABLE_START       \
  138         .align  3;              \
  139         .globl  EXT(retry_table) ;\
  140 LEXT(retry_table)               ;\
  141         .text
  142 
  143 #define RETRY(addr)             \
  144         .align 3                ;\
  145         .long   9f              ;\
  146         .long   addr            ;\
  147         .text                   ;\
  148 9:
  149 
  150 #define RETRY_TABLE_END                 \
  151         .align 3;                       \
  152         .globl  EXT(retry_table_end)    ;\
  153 LEXT(retry_table_end)                   ;\
  154         .text
  155 
  156 /*
  157  * Allocate recovery and retry tables.
  158  */
  159         RECOVERY_SECTION
  160         RECOVER_TABLE_START
  161         RETRY_SECTION
  162         RETRY_TABLE_START
  163 
  164 /*
  165  * Timing routines.
  166  */
  167 #if     STAT_TIME
  168 
  169 #define TIME_TRAP_UENTRY
  170 #define TIME_TRAP_UEXIT
  171 #define TIME_INT_ENTRY
  172 #define TIME_INT_EXIT
  173 
  174 #else   /* microsecond timing */
  175 
  176 /*
  177  * Microsecond timing.
  178  * Assumes a free-running microsecond counter.
  179  * no TIMER_MAX check needed.
  180  */
  181 
  182 /*
  183  * There is only one current time-stamp per CPU, since only
  184  * the time-stamp in the current timer is used.
  185  * To save time, we allocate the current time-stamps here.
  186  */
  187         .comm   EXT(current_tstamp), 4*NCPUS
  188 
  189 /*
  190  * Update time on user trap entry.
  191  * 11 instructions (including cli on entry)
  192  * Assumes CPU number in %edx.
  193  * Uses %ebx, %ecx.
  194  */
  195 #define TIME_TRAP_UENTRY \
  196         cli                                     /* block interrupts */  ;\
  197         movl    VA_ETC,%ebx                     /* get timer value */   ;\
  198         movl    CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
  199         movl    %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
  200         subl    %ecx,%ebx                       /* elapsed = new-old */ ;\
  201         movl    CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
  202         addl    %ebx,LOW_BITS(%ecx)             /* add to low bits */   ;\
  203         jns     0f                              /* if overflow, */      ;\
  204         call    timer_normalize                 /* normalize timer */   ;\
  205 0:      addl    $(TH_SYS_TIMER-TH_USER_TIMER),%ecx                      ;\
  206                                                 /* switch to sys timer */;\
  207         movl    %ecx,CX(EXT(current_timer),%edx) /* make it current */  ;\
  208         sti                                     /* allow interrupts */
  209 
  210 /*
  211  * update time on user trap exit.
  212  * 10 instructions.
  213  * Assumes CPU number in %edx.
  214  * Uses %ebx, %ecx.
  215  */
  216 #define TIME_TRAP_UEXIT \
  217         cli                                     /* block interrupts */  ;\
  218         movl    VA_ETC,%ebx                     /* get timer */         ;\
  219         movl    CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
  220         movl    %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
  221         subl    %ecx,%ebx                       /* elapsed = new-old */ ;\
  222         movl    CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
  223         addl    %ebx,LOW_BITS(%ecx)             /* add to low bits */   ;\
  224         jns     0f                              /* if overflow, */      ;\
  225         call    timer_normalize                 /* normalize timer */   ;\
  226 0:      addl    $(TH_USER_TIMER-TH_SYS_TIMER),%ecx                      ;\
  227                                                 /* switch to user timer */;\
  228         movl    %ecx,CX(EXT(current_timer),%edx) /* make it current */
  229 
  230 /*
  231  * update time on interrupt entry.
  232  * 9 instructions.
  233  * Assumes CPU number in %edx.
  234  * Leaves old timer in %ebx.
  235  * Uses %ecx.
  236  */
  237 #define TIME_INT_ENTRY \
  238         movl    VA_ETC,%ecx                     /* get timer */         ;\
  239         movl    CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
  240         movl    %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
  241         subl    %ebx,%ecx                       /* elapsed = new-old */ ;\
  242         movl    CX(EXT(current_timer),%edx),%ebx /* get current timer */;\
  243         addl    %ecx,LOW_BITS(%ebx)             /* add to low bits */   ;\
  244         leal    CX(0,%edx),%ecx                 /* timer is 16 bytes */ ;\
  245         lea     CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
  246         movl    %ecx,CX(EXT(current_timer),%edx) /* set timer */
  247 
  248 /*
  249  * update time on interrupt exit.
  250  * 11 instructions
  251  * Assumes CPU number in %edx, old timer in %ebx.
  252  * Uses %eax, %ecx.
  253  */
  254 #define TIME_INT_EXIT \
  255         movl    VA_ETC,%eax                     /* get timer */         ;\
  256         movl    CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
  257         movl    %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
  258         subl    %ecx,%eax                       /* elapsed = new-old */ ;\
  259         movl    CX(EXT(current_timer),%edx),%ecx /* get current timer */;\
  260         addl    %eax,LOW_BITS(%ecx)             /* add to low bits */   ;\
  261         jns     0f                              /* if overflow, */      ;\
  262         call    timer_normalize                 /* normalize timer */   ;\
  263 0:      testb   $0x80,LOW_BITS+3(%ebx)          /* old timer overflow? */;\
  264         jz      0f                              /* if overflow, */      ;\
  265         movl    %ebx,%ecx                       /* get old timer */     ;\
  266         call    timer_normalize                 /* normalize timer */   ;\
  267 0:      movl    %ebx,CX(EXT(current_timer),%edx) /* set timer */
  268 
  269 
  270 /*
  271  * Normalize timer in ecx.
  272  * Preserves edx; clobbers eax.
  273  */
  274         .align  ALIGN
  275 timer_high_unit:
  276         .long   TIMER_HIGH_UNIT                 /* div has no immediate opnd */
  277 
  278 timer_normalize:
  279         pushl   %edx                            /* save registersz */
  280         pushl   %eax
  281         xorl    %edx,%edx                       /* clear divisor high */
  282         movl    LOW_BITS(%ecx),%eax             /* get divisor low */
  283         divl    timer_high_unit,%eax            /* quotient in eax */
  284                                                 /* remainder in edx */
  285         addl    %eax,HIGH_BITS_CHECK(%ecx)      /* add high_inc to check */
  286         movl    %edx,LOW_BITS(%ecx)             /* remainder to low_bits */
  287         addl    %eax,HIGH_BITS(%ecx)            /* add high_inc to high bits */
  288         popl    %eax                            /* restore register */
  289         popl    %edx
  290         ret
  291 
  292 /*
  293  * Switch to a new timer.
  294  */
  295 Entry(timer_switch)
  296         CPU_NUMBER(%edx)                        /* get this CPU */
  297         movl    VA_ETC,%ecx                     /* get timer */
  298         movl    CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
  299         movl    %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
  300         subl    %ecx,%eax                       /* elapsed = new - old */
  301         movl    CX(EXT(current_timer),%edx),%ecx /* get current timer */
  302         addl    %eax,LOW_BITS(%ecx)             /* add to low bits */
  303         jns     0f                              /* if overflow, */
  304         call    timer_normalize                 /* normalize timer */
  305 0:
  306         movl    S_ARG0,%ecx                     /* get new timer */
  307         movl    %ecx,CX(EXT(current_timer),%edx) /* set timer */
  308         ret
  309 
  310 /*
  311  * Initialize the first timer for a CPU.
  312  */
  313 Entry(start_timer)
  314         CPU_NUMBER(%edx)                        /* get this CPU */
  315         movl    VA_ETC,%ecx                     /* get timer */
  316         movl    %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
  317         movl    S_ARG0,%ecx                     /* get timer */
  318         movl    %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
  319         ret
  320 
  321 #endif  /* accurate timing */
  322 
  323 /*
  324  * Encapsulate the transfer of exception stack frames between a PCB
  325  * and a thread stack.  Since the whole point of these is to emulate
  326  * a call or exception that changes privilege level, both macros
  327  * assume that there is no user esp or ss stored in the source
  328  * frame (because there was no change of privilege to generate them).
  329  */
  330 
  331 /*
  332  * Transfer a stack frame from a thread's user stack to its PCB.
  333  * We assume the thread and stack addresses have been loaded into
  334  * registers (our arguments).
  335  *
  336  * The macro overwrites edi, esi, ecx and whatever registers hold the
  337  * thread and stack addresses (which can't be one of the above three).
  338  * The thread address is overwritten with the address of its saved state
  339  * (where the frame winds up).
  340  *
  341  * Must be called on kernel stack.
  342  */
  343 #define FRAME_STACK_TO_PCB(thread, stkp)                                ;\
  344         movl    ACT_PCB(thread),thread  /* get act`s PCB */             ;\
  345         leal    PCB_ISS(thread),%edi    /* point to PCB`s saved state */;\
  346         movl    %edi,thread             /* save for later */            ;\
  347         movl    stkp,%esi               /* point to start of frame */   ;\
  348         movl    $ R_UESP,%ecx                                           ;\
  349         sarl    $2,%ecx                 /* word count for transfer */   ;\
  350         cld                             /* we`re incrementing */        ;\
  351         rep                                                             ;\
  352         movsl                           /* transfer the frame */        ;\
  353         addl    $ R_UESP,stkp           /* derive true "user" esp */    ;\
  354         movl    stkp,R_UESP(thread)     /* store in PCB */              ;\
  355         movl    $0,%ecx                                                 ;\
  356         mov     %ss,%cx                 /* get current ss */            ;\
  357         movl    %ecx,R_SS(thread)       /* store in PCB */
  358 
  359 /*
  360  * Transfer a stack frame from a thread's PCB to the stack pointed
  361  * to by the PCB.  We assume the thread address has been loaded into
  362  * a register (our argument).
  363  *
  364  * The macro overwrites edi, esi, ecx and whatever register holds the
  365  * thread address (which can't be one of the above three).  The
  366  * thread address is overwritten with the address of its saved state
  367  * (where the frame winds up).
  368  *
  369  * Must be called on kernel stack.
  370  */
  371 #define FRAME_PCB_TO_STACK(thread)                                      ;\
  372         movl    ACT_PCB(thread),%esi    /* get act`s PCB */             ;\
  373         leal    PCB_ISS(%esi),%esi      /* point to PCB`s saved state */;\
  374         movl    R_UESP(%esi),%edi       /* point to end of dest frame */;\
  375         movl    ACT_MAP(thread),%ecx    /* get act's map */             ;\
  376         movl    MAP_PMAP(%ecx),%ecx     /* get map's pmap */            ;\
  377         cmpl    EXT(kernel_pmap), %ecx  /* If kernel loaded task */     ;\
  378         jz      1f                      /* use kernel data segment */   ;\
  379         movl    $ USER_DS,%cx           /* else use user data segment */;\
  380         mov     %cx,%es                                                 ;\
  381 1:                                                                      ;\
  382         movl    $ R_UESP,%ecx                                           ;\
  383         subl    %ecx,%edi               /* derive start of frame */     ;\
  384         movl    %edi,thread             /* save for later */            ;\
  385         sarl    $2,%ecx                 /* word count for transfer */   ;\
  386         cld                             /* we`re incrementing */        ;\
  387         rep                                                             ;\
  388         movsl                           /* transfer the frame */        ;\
  389         mov     %ss,%cx                 /* restore kernel segments */   ;\
  390         mov     %cx,%es                                                 
  391 
  392 #undef PDEBUG
  393 
  394 #ifdef PDEBUG
  395 
  396 /*
  397  * Traditional, not ANSI.
  398  */
  399 #define CAH(label) \
  400         .data ;\
  401         .globl label/**/count ;\
  402 label/**/count: ;\
  403         .long   0 ;\
  404         .globl label/**/limit ;\
  405 label/**/limit: ;\
  406         .long   0 ;\
  407         .text ;\
  408         addl    $1,%ss:label/**/count ;\
  409         cmpl    $0,label/**/limit ;\
  410         jz      label/**/exit ;\
  411         pushl   %eax ;\
  412 label/**/loop: ;\
  413         movl    %ss:label/**/count,%eax ;\
  414         cmpl    %eax,%ss:label/**/limit ;\
  415         je      label/**/loop ;\
  416         popl    %eax ;\
  417 label/**/exit:
  418 
  419 #else   /* PDEBUG */
  420 
  421 #define CAH(label)
  422 
  423 #endif  /* PDEBUG */
  424 
  425 #if     MACH_KDB
  426 /*
  427  * Last-ditch debug code to handle faults that might result
  428  * from entering kernel (from collocated server) on an invalid
  429  * stack.  On collocated entry, there's no hardware-initiated
  430  * stack switch, so a valid stack must be in place when an
  431  * exception occurs, or we may double-fault.
  432  *
  433  * In case of a double-fault, our only recourse is to switch
  434  * hardware "tasks", so that we avoid using the current stack.
  435  *
  436  * The idea here is just to get the processor into the debugger,
  437  * post-haste.  No attempt is made to fix up whatever error got
  438  * us here, so presumably continuing from the debugger will
  439  * simply land us here again -- at best.
  440  */
  441 #if     0
  442 /*
  443  * Note that the per-fault entry points are not currently
  444  * functional.  The only way to make them work would be to
  445  * set up separate TSS's for each fault type, which doesn't
  446  * currently seem worthwhile.  (The offset part of a task
  447  * gate is always ignored.)  So all faults that task switch
  448  * currently resume at db_task_start.
  449  */
  450 /*
  451  * Double fault (Murphy's point) - error code (0) on stack
  452  */
  453 Entry(db_task_dbl_fault)
  454         popl    %eax
  455         movl    $(T_DOUBLE_FAULT),%ebx
  456         jmp     db_task_start
  457 /*
  458  * Segment not present - error code on stack
  459  */
  460 Entry(db_task_seg_np)
  461         popl    %eax
  462         movl    $(T_SEGMENT_NOT_PRESENT),%ebx
  463         jmp     db_task_start
  464 /*
  465  * Stack fault - error code on (current) stack
  466  */
  467 Entry(db_task_stk_fault)
  468         popl    %eax
  469         movl    $(T_STACK_FAULT),%ebx
  470         jmp     db_task_start
  471 /*
  472  * General protection fault - error code on stack
  473  */
  474 Entry(db_task_gen_prot)
  475         popl    %eax
  476         movl    $(T_GENERAL_PROTECTION),%ebx
  477         jmp     db_task_start
  478 #endif  /* 0 */
  479 /*
  480  * The entry point where execution resumes after last-ditch debugger task
  481  * switch.
  482  */
  483 Entry(db_task_start)
  484         movl    %esp,%edx
  485         subl    $ISS_SIZE,%edx
  486         movl    %edx,%esp               /* allocate i386_saved_state on stack */
  487         movl    %eax,R_ERR(%esp)
  488         movl    %ebx,R_TRAPNO(%esp)
  489         pushl   %edx
  490 #if     NCPUS > 1
  491         CPU_NUMBER(%edx)
  492         movl    CX(EXT(mp_dbtss),%edx),%edx
  493         movl    TSS_LINK(%edx),%eax
  494 #else
  495         movl    EXT(dbtss)+TSS_LINK,%eax
  496 #endif
  497         pushl   %eax                    /* pass along selector of previous TSS */
  498         call    EXT(db_tss_to_frame)
  499         popl    %eax                    /* get rid of TSS selector */
  500         call    EXT(db_trap_from_asm)
  501         addl    $0x4,%esp
  502         /*
  503          * And now...?
  504          */
  505         iret                            /* ha, ha, ha... */
  506 #endif  /* MACH_KDB */
  507 
  508 /*
  509  * Trap/interrupt entry points.
  510  *
  511  * All traps must create the following save area on the PCB "stack":
  512  *
  513  *      gs
  514  *      fs
  515  *      es
  516  *      ds
  517  *      edi
  518  *      esi
  519  *      ebp
  520  *      cr2 if page fault - otherwise unused
  521  *      ebx
  522  *      edx
  523  *      ecx
  524  *      eax
  525  *      trap number
  526  *      error code
  527  *      eip
  528  *      cs
  529  *      eflags
  530  *      user esp - if from user
  531  *      user ss  - if from user
  532  *      es       - if from V86 thread
  533  *      ds       - if from V86 thread
  534  *      fs       - if from V86 thread
  535  *      gs       - if from V86 thread
  536  *
  537  */
  538 
  539 /*
  540  * General protection or segment-not-present fault.
  541  * Check for a GP/NP fault in the kernel_return
  542  * sequence; if there, report it as a GP/NP fault on the user's instruction.
  543  *
  544  * esp->     0: trap code (NP or GP)
  545  *           4: segment number in error
  546  *           8  eip
  547  *          12  cs
  548  *          16  eflags
  549  *          20  old registers (trap is from kernel)
  550  */
  551 Entry(t_gen_prot)
  552         pushl   $(T_GENERAL_PROTECTION) /* indicate fault type */
  553         jmp     trap_check_kernel_exit  /* check for kernel exit sequence */
  554 
  555 Entry(t_segnp)
  556         pushl   $(T_SEGMENT_NOT_PRESENT)
  557                                         /* indicate fault type */
  558 
  559 trap_check_kernel_exit:
  560         testl   $(EFL_VM),16(%esp)      /* is trap from V86 mode? */
  561         jnz     EXT(alltraps)           /* isn`t kernel trap if so */
  562         testl   $3,12(%esp)             /* is trap from kernel mode? */
  563         jne     EXT(alltraps)           /* if so: */
  564                                         /* check for the kernel exit sequence */
  565         cmpl    $ EXT(kret_iret),8(%esp)        /* on IRET? */
  566         je      fault_iret
  567         cmpl    $ EXT(kret_popl_ds),8(%esp) /* popping DS? */
  568         je      fault_popl_ds
  569         cmpl    $ EXT(kret_popl_es),8(%esp) /* popping ES? */
  570         je      fault_popl_es
  571         cmpl    $ EXT(kret_popl_fs),8(%esp) /* popping FS? */
  572         je      fault_popl_fs
  573         cmpl    $ EXT(kret_popl_gs),8(%esp) /* popping GS? */
  574         je      fault_popl_gs
  575 take_fault:                             /* if none of the above: */
  576         jmp     EXT(alltraps)           /* treat as normal trap. */
  577 
  578 /*
  579  * GP/NP fault on IRET: CS or SS is in error.
  580  * All registers contain the user's values.
  581  *
  582  * on SP is
  583  *  0   trap number
  584  *  4   errcode
  585  *  8   eip
  586  * 12   cs              --> trapno
  587  * 16   efl             --> errcode
  588  * 20   user eip
  589  * 24   user cs
  590  * 28   user eflags
  591  * 32   user esp
  592  * 36   user ss
  593  */
  594 fault_iret:
  595         movl    %eax,8(%esp)            /* save eax (we don`t need saved eip) */
  596         popl    %eax                    /* get trap number */
  597         movl    %eax,12-4(%esp)         /* put in user trap number */
  598         popl    %eax                    /* get error code */
  599         movl    %eax,16-8(%esp)         /* put in user errcode */
  600         popl    %eax                    /* restore eax */
  601         CAH(fltir)
  602         jmp     EXT(alltraps)           /* take fault */
  603 
  604 /*
  605  * Fault restoring a segment register.  The user's registers are still
  606  * saved on the stack.  The offending segment register has not been
  607  * popped.
  608  */
  609 fault_popl_ds:
  610         popl    %eax                    /* get trap number */
  611         popl    %edx                    /* get error code */
  612         addl    $12,%esp                /* pop stack to user regs */
  613         jmp     push_es                 /* (DS on top of stack) */
  614 fault_popl_es:
  615         popl    %eax                    /* get trap number */
  616         popl    %edx                    /* get error code */
  617         addl    $12,%esp                /* pop stack to user regs */
  618         jmp     push_fs                 /* (ES on top of stack) */
  619 fault_popl_fs:
  620         popl    %eax                    /* get trap number */
  621         popl    %edx                    /* get error code */
  622         addl    $12,%esp                /* pop stack to user regs */
  623         jmp     push_gs                 /* (FS on top of stack) */
  624 fault_popl_gs:
  625         popl    %eax                    /* get trap number */
  626         popl    %edx                    /* get error code */
  627         addl    $12,%esp                /* pop stack to user regs */
  628         jmp     push_segregs            /* (GS on top of stack) */
  629 
  630 push_es:
  631         pushl   %es                     /* restore es, */
  632 push_fs:
  633         pushl   %fs                     /* restore fs, */
  634 push_gs:
  635         pushl   %gs                     /* restore gs. */
  636 push_segregs:
  637         movl    %eax,R_TRAPNO(%esp)     /* set trap number */
  638         movl    %edx,R_ERR(%esp)        /* set error code */
  639         CAH(fltpp)
  640         jmp     trap_set_segs           /* take trap */
  641 
  642 /*
  643  * Debug trap.  Check for single-stepping across system call into
  644  * kernel.  If this is the case, taking the debug trap has turned
  645  * off single-stepping - save the flags register with the trace
  646  * bit set.
  647  */
  648 Entry(t_debug)
  649         testl   $(EFL_VM),8(%esp)       /* is trap from V86 mode? */
  650         jnz     0f                      /* isn`t kernel trap if so */
  651         testl   $3,4(%esp)              /* is trap from kernel mode? */
  652         jnz     0f                      /* if so: */
  653         cmpl    $syscall_entry,(%esp)   /* system call entry? */
  654         jne     1f                      /* if so: */
  655                                         /* flags are sitting where syscall */
  656                                         /* wants them */
  657         addl    $8,%esp                 /* remove eip/cs */
  658         jmp     syscall_entry_2         /* continue system call entry */
  659 
  660 1:      cmpl    $trap_unix_addr,(%esp)
  661         jne     0f
  662         addl    $8,%esp
  663         jmp     trap_unix_2
  664 
  665 0:      pushl   $0                      /* otherwise: */
  666         pushl   $(T_DEBUG)              /* handle as normal */
  667         jmp     EXT(alltraps)           /* debug fault */
  668 
  669 /*
  670  * Page fault traps save cr2.
  671  */
  672 Entry(t_page_fault)
  673         pushl   $(T_PAGE_FAULT)         /* mark a page fault trap */
  674         pusha                           /* save the general registers */
  675         movl    %cr2,%eax               /* get the faulting address */
  676         movl    %eax,12(%esp)           /* save in esp save slot */
  677         jmp     trap_push_segs          /* continue fault */
  678 
  679 /*
  680  * All 'exceptions' enter here with:
  681  *      esp->   trap number
  682  *              error code
  683  *              old eip
  684  *              old cs
  685  *              old eflags
  686  *              old esp         if trapped from user
  687  *              old ss          if trapped from user
  688  *
  689  * NB: below use of CPU_NUMBER assumes that macro will use correct
  690  * segment register for any kernel data accesses.
  691  */
  692 Entry(alltraps)
  693         pusha                           /* save the general registers */
  694 trap_push_segs:
  695         pushl   %ds                     /* save the segment registers */
  696         pushl   %es
  697         pushl   %fs
  698         pushl   %gs
  699 
  700 trap_set_segs:
  701         movl    %ss,%ax
  702         movl    %ax,%ds
  703         movl    %ax,%es                 /* switch to kernel data seg */
  704         cld                             /* clear direction flag */
  705         testl   $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
  706         jnz     trap_from_user          /* user mode trap if so */
  707         testb   $3,R_CS(%esp)           /* user mode trap? */
  708         jnz     trap_from_user
  709         CPU_NUMBER(%edx)
  710         cmpl    $0,CX(EXT(active_kloaded),%edx)
  711         je      trap_from_kernel        /* if clear, truly in kernel */
  712 #ifdef FIXME
  713         cmpl    ETEXT_ADDR,R_EIP(%esp)  /* pc within kernel? */
  714         jb      trap_from_kernel
  715 #endif
  716 trap_from_kloaded:
  717         /*
  718          * We didn't enter here "through" PCB (i.e., using ring 0 stack),
  719          * so transfer the stack frame into the PCB explicitly, then
  720          * start running on resulting "PCB stack".  We have to set
  721          * up a simulated "uesp" manually, since there's none in the
  722          * frame.
  723          */
  724         mov     $ CPU_DATA,%dx
  725         mov     %dx,%gs
  726         CAH(atstart)
  727         CPU_NUMBER(%edx)
  728         movl    CX(EXT(active_kloaded),%edx),%ebx
  729         movl    CX(EXT(kernel_stack),%edx),%eax
  730         xchgl   %esp,%eax
  731         FRAME_STACK_TO_PCB(%ebx,%eax)
  732         CAH(atend)
  733         jmp     EXT(take_trap)
  734 
  735 trap_from_user:
  736         mov     $ CPU_DATA,%ax
  737         mov     %ax,%gs
  738 
  739         CPU_NUMBER(%edx)
  740         TIME_TRAP_UENTRY
  741 
  742         movl    CX(EXT(kernel_stack),%edx),%ebx
  743         xchgl   %ebx,%esp               /* switch to kernel stack */
  744                                         /* user regs pointer already set */
  745 LEXT(take_trap)
  746         pushl   %ebx                    /* record register save area */
  747         pushl   %ebx                    /* pass register save area to trap */
  748         call    EXT(user_trap)          /* call user trap routine */
  749         movl    4(%esp),%esp            /* switch back to PCB stack */
  750 
  751 /*
  752  * Return from trap or system call, checking for ASTs.
  753  * On PCB stack.
  754  */
  755 
  756 LEXT(return_from_trap)
  757         CPU_NUMBER(%edx)
  758         cmpl    $0,CX(EXT(need_ast),%edx)
  759         je      EXT(return_to_user)     /* if we need an AST: */
  760 
  761         movl    CX(EXT(kernel_stack),%edx),%esp
  762                                         /* switch to kernel stack */
  763         pushl   $0                      /* push preemption flag */
  764         call    EXT(i386_astintr)       /* take the AST */
  765         addl    $4,%esp                 /* pop preemption flag */
  766         popl    %esp                    /* switch back to PCB stack (w/exc link) */
  767         jmp     EXT(return_from_trap)   /* and check again (rare) */
  768                                         /* ASTs after this point will */
  769                                         /* have to wait */
  770 
  771 /*
  772  * Arrange the checks needed for kernel-loaded (or kernel-loading)
  773  * threads so that branch is taken in kernel-loaded case.
  774  */
  775 LEXT(return_to_user)
  776         TIME_TRAP_UEXIT
  777         CPU_NUMBER(%eax)
  778         cmpl    $0,CX(EXT(active_kloaded),%eax)
  779         jnz     EXT(return_xfer_stack)
  780         movl    $ CPD_ACTIVE_THREAD,%ebx
  781         movl    %gs:(%ebx),%ebx                 /* get active thread */
  782 
  783 #if     MACH_RT
  784 #if     MACH_ASSERT
  785         movl    $ CPD_PREEMPTION_LEVEL,%ebx
  786         cmpl    $0,%gs:(%ebx)
  787         je      EXT(return_from_kernel)
  788         int     $3
  789 #endif  /* MACH_ASSERT */
  790 #endif  /* MACH_RT */
  791 
  792 /*
  793  * Return from kernel mode to interrupted thread.
  794  */
  795 
  796 LEXT(return_from_kernel)
  797 LEXT(kret_popl_gs)
  798         popl    %gs                     /* restore segment registers */
  799 LEXT(kret_popl_fs)
  800         popl    %fs
  801 LEXT(kret_popl_es)
  802         popl    %es
  803 LEXT(kret_popl_ds)
  804         popl    %ds
  805         popa                            /* restore general registers */
  806         addl    $8,%esp                 /* discard trap number and error code */
  807 
  808 LEXT(kret_iret)
  809         iret                            /* return from interrupt */
  810 
  811 
  812 LEXT(return_xfer_stack)
  813         /*
  814          * If we're on PCB stack in a kernel-loaded task, we have
  815          * to transfer saved state back to thread stack and swap
  816          * stack pointers here, because the hardware's not going
  817          * to do so for us.
  818          */
  819         CAH(rxsstart)
  820         CPU_NUMBER(%eax)
  821         movl    CX(EXT(kernel_stack),%eax),%esp
  822         movl    CX(EXT(active_kloaded),%eax),%eax
  823         FRAME_PCB_TO_STACK(%eax)
  824         movl    %eax,%esp
  825         CAH(rxsend)
  826         jmp     EXT(return_from_kernel)
  827 
  828 /*
  829  * Hate to put this here, but setting up a separate swap_func for
  830  * kernel-loaded threads no longer works, since thread executes
  831  * "for a while" (i.e., until it reaches glue code) when first
  832  * created, even if it's nominally suspended.  Hence we can't
  833  * transfer the PCB when the thread first resumes, because we
  834  * haven't initialized it yet.
  835  */
  836 /*
  837  * Have to force transfer to new stack "manually".  Use a string
  838  * move to transfer all of our saved state to the stack pointed
  839  * to by iss.uesp, then install a pointer to it as our current
  840  * stack pointer.
  841  */
  842 LEXT(return_kernel_loading)
  843         CPU_NUMBER(%eax)
  844         movl    CX(EXT(kernel_stack),%eax),%esp
  845         movl    $ CPD_ACTIVE_THREAD,%ebx
  846         movl    %gs:(%ebx),%ebx                 /* get active thread */
  847         movl    %ebx,%edx                       /* save for later */
  848         FRAME_PCB_TO_STACK(%ebx)
  849         movl    %ebx,%esp                       /* start running on new stack */
  850         movl    $0,CX(EXT(active_kloaded),%eax) /* set cached indicator */
  851         jmp     EXT(return_from_kernel)
  852 
  853 /*
  854  * Trap from kernel mode.  No need to switch stacks or load segment registers.
  855  */
  856 trap_from_kernel:
  857 #if     MACH_KDB || MACH_KGDB
  858         mov     $ CPU_DATA,%ax
  859         mov     %ax,%gs
  860         movl    %esp,%ebx               /* save current stack */
  861 
  862         cmpl    EXT(int_stack_high),%esp /* on an interrupt stack? */
  863         jb      6f                      /* OK if so */
  864 
  865 #if     MACH_KGDB 
  866         cmpl    $0,EXT(kgdb_active)     /* Unexpected trap in kgdb */
  867         je      0f                      /* no */
  868 
  869         pushl   %esp                    /* Already on kgdb stack */
  870         cli
  871         call    EXT(kgdb_trap)
  872         addl    $4,%esp
  873         jmp     EXT(return_from_kernel)
  874 0:                                      /* should kgdb handle this exception? */
  875         cmpl $(T_NO_FPU),R_TRAPNO(%esp) /* FPU disabled? */
  876         je      2f                      /* yes */
  877         cmpl $(T_PAGE_FAULT),R_TRAPNO(%esp)     /* page fault? */
  878         je      2f                      /* yes */
  879 1:
  880         cli                             /* disable interrupts */
  881         CPU_NUMBER(%edx)                /* get CPU number */
  882         movl    CX(EXT(kgdb_stacks),%edx),%ebx
  883         xchgl   %ebx,%esp               /* switch to kgdb stack */
  884         pushl   %ebx                    /* pass old sp as an arg */
  885         call    EXT(kgdb_from_kernel)
  886         popl    %esp                    /* switch back to kernel stack */
  887         jmp     EXT(return_from_kernel)
  888 2:
  889 #endif  /* MACH_KGDB */
  890 
  891 #if     MACH_KDB
  892         cmpl    $0,EXT(db_active)       /* could trap be from ddb? */
  893         je      3f                      /* no */
  894 #if     NCPUS > 1
  895         CPU_NUMBER(%edx)                /* see if this CPU is in ddb */
  896         cmpl    $0,CX(EXT(kdb_active),%edx)
  897         je      3f                      /* no */
  898 #endif  /* NCPUS > 1 */
  899         pushl   %esp
  900         call    EXT(db_trap_from_asm)
  901         addl    $0x4,%esp
  902         jmp     EXT(return_from_kernel)
  903 
  904 3:
  905         /*
  906          * Dilemma:  don't want to switch to kernel_stack if trap
  907          * "belongs" to ddb; don't want to switch to db_stack if
  908          * trap "belongs" to kernel.  So have to duplicate here the
  909          * set of trap types that kernel_trap() handles.  Note that
  910          * "unexpected" page faults will not be handled by kernel_trap().
  911          * In this panic-worthy case, we fall into the debugger with
  912          * kernel_stack containing the call chain that led to the
  913          * bogus fault.
  914          */
  915         movl    R_TRAPNO(%esp),%edx
  916         cmpl    $(T_PAGE_FAULT),%edx
  917         je      4f
  918         cmpl    $(T_NO_FPU),%edx
  919         je      4f
  920         cmpl    $(T_FPU_FAULT),%edx
  921         je      4f
  922         cmpl    $(T_FLOATING_POINT_ERROR),%edx
  923         je      4f
  924         cmpl    $(T_PREEMPT),%edx
  925         jne     7f
  926 4:
  927 #endif  /* MACH_KDB */
  928 
  929         CPU_NUMBER(%edx)                /* get CPU number */
  930         cmpl    CX(EXT(kernel_stack),%edx),%esp
  931                                         /* if not already on kernel stack, */
  932         ja      5f                      /*   check some more */
  933         cmpl    CX(EXT(active_stacks),%edx),%esp
  934         ja      6f                      /* on kernel stack: no switch */
  935 5:
  936         movl    CX(EXT(kernel_stack),%edx),%esp
  937 6:
  938         pushl   %ebx                    /* save old stack */
  939         pushl   %ebx                    /* pass as parameter */
  940         call    EXT(kernel_trap)        /* to kernel trap routine */
  941         addl    $4,%esp                 /* pop parameter */
  942         testl   %eax,%eax
  943         jne     8f
  944         /*
  945          * If kernel_trap returns false, trap wasn't handled.
  946          */
  947 7:
  948 #if     MACH_KDB
  949         CPU_NUMBER(%edx)
  950         movl    CX(EXT(db_stacks),%edx),%esp
  951         pushl   %ebx                    /* pass old stack as parameter */
  952         call    EXT(db_trap_from_asm)
  953 #endif  /* MACH_KDB */
  954 #if     MACH_KGDB
  955         cli                             /* disable interrupts */
  956         CPU_NUMBER(%edx)                /* get CPU number */
  957         movl    CX(EXT(kgdb_stacks),%edx),%esp
  958         pushl   %ebx                    /* pass old stack as parameter */
  959         call    EXT(kgdb_from_kernel)
  960 #endif  /* MACH_KGDB */
  961         addl    $4,%esp                 /* pop parameter */
  962         testl   %eax,%eax
  963         jne     8f
  964         /*
  965          * Likewise, if kdb_trap/kgdb_from_kernel returns false, trap
  966          * wasn't handled.
  967          */
  968         pushl   %ebx                    /* pass old stack as parameter */
  969         call    EXT(panic_trap)
  970         addl    $4,%esp                 /* pop parameter */
  971 8:
  972         movl    %ebx,%esp               /* get old stack (from callee-saves reg) */
  973 #else   /* MACH_KDB || MACH_KGDB */
  974         pushl   %esp                    /* pass parameter */
  975         call    EXT(kernel_trap)        /* to kernel trap routine */
  976         addl    $4,%esp                 /* pop parameter */
  977 #endif  /* MACH_KDB || MACH_KGDB */
  978 
  979 #if     MACH_RT
  980         CPU_NUMBER(%edx)
  981 
  982         movl    CX(EXT(need_ast),%edx),%eax /* get pending asts */
  983         testl   $ AST_URGENT,%eax       /* any urgent preemption? */
  984         je      EXT(return_from_kernel) /* no, nothing to do */
  985         cmpl    $0,EXT(preemptable)     /* kernel-mode, preemption enabled? */
  986         je      EXT(return_from_kernel) /* no, skip it */
  987         cmpl    $ T_PREEMPT,48(%esp)    /* preempt request? */
  988         jne     EXT(return_from_kernel) /* no, nothing to do */
  989         movl    CX(EXT(kernel_stack),%edx),%eax
  990         movl    %esp,%ecx
  991         xorl    %eax,%ecx
  992         andl    $(-KERNEL_STACK_SIZE),%ecx
  993         testl   %ecx,%ecx               /* are we on the kernel stack? */
  994         jne     EXT(return_from_kernel) /* no, skip it */
  995 
  996 #if     PREEMPT_DEBUG_LOG
  997         pushl   28(%esp)                /* stack pointer */
  998         pushl   24+4(%esp)              /* frame pointer */
  999         pushl   56+8(%esp)              /* stack pointer */
 1000         pushl   $0f
 1001         call    EXT(log_thread_action)
 1002         addl    $16, %esp
 1003         .data
 1004 0:      String  "trap preempt eip"
 1005         .text
 1006 #endif  /* PREEMPT_DEBUG_LOG */
 1007 
 1008         pushl   $1                      /* push preemption flag */
 1009         call    EXT(i386_astintr)       /* take the AST */
 1010         addl    $4,%esp                 /* pop preemption flag */
 1011 #endif  /* MACH_RT */
 1012 
 1013         jmp     EXT(return_from_kernel)
 1014 
 1015 /*
 1016  *      Called as a function, makes the current thread
 1017  *      return from the kernel as if from an exception.
 1018  */
 1019 
 1020         .globl  EXT(thread_exception_return)
 1021         .globl  EXT(thread_bootstrap_return)
 1022 LEXT(thread_exception_return)
 1023 LEXT(thread_bootstrap_return)
 1024         movl    %esp,%ecx                       /* get kernel stack */
 1025         or      $(KERNEL_STACK_SIZE-1),%ecx
 1026         movl    -3-IKS_SIZE(%ecx),%esp          /* switch back to PCB stack */
 1027         jmp     EXT(return_from_trap)
 1028 
 1029 Entry(call_continuation)
 1030         movl    S_ARG0,%eax                     /* get continuation */
 1031         movl    %esp,%ecx                       /* get kernel stack */
 1032         or      $(KERNEL_STACK_SIZE-1),%ecx
 1033         addl    $(-3-IKS_SIZE),%ecx
 1034         movl    %ecx,%esp                       /* pop the stack */
 1035         xorl    %ebp,%ebp                       /* zero frame pointer */
 1036         jmp     *%eax                           /* goto continuation */
 1037 
 1038 #if 0
 1039 #define LOG_INTERRUPT(info,msg)                 \
 1040         pushal                          ;       \
 1041         pushl   msg                     ;       \
 1042         pushl   info                    ;       \
 1043         call    EXT(log_thread_action)  ;       \
 1044         add     $8,%esp                 ;       \
 1045         popal
 1046 #define CHECK_INTERRUPT_TIME(n)                 \
 1047         pushal                          ;       \
 1048         pushl   $n                      ;       \
 1049         call    EXT(check_thread_time)  ;       \
 1050         add     $4,%esp                 ;       \
 1051         popal
 1052 #else
 1053 #define LOG_INTERRUPT(info,msg)
 1054 #define CHECK_INTERRUPT_TIME(n)
 1055 #endif
 1056          
 1057 .data
 1058 imsg_start:
 1059         String  "interrupt start"
 1060 imsg_end:
 1061         String  "interrupt end"
 1062 
 1063 .text
 1064 /*
 1065  * All interrupts enter here.
 1066  * old %eax on stack; interrupt number in %eax.
 1067  */
 1068 Entry(all_intrs)
 1069         pushl   %ecx                    /* save registers */
 1070         pushl   %edx
 1071         cld                             /* clear direction flag */
 1072 
 1073         cmpl    %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
 1074         jb      int_from_intstack       /* if not: */
 1075 
 1076         pushl   %ds                     /* save segment registers */
 1077         pushl   %es
 1078         pushl   %fs
 1079         pushl   %gs
 1080         mov     %ss,%dx                 /* switch to kernel segments */
 1081         mov     %dx,%ds
 1082         mov     %dx,%es
 1083         mov     $ CPU_DATA,%dx
 1084         mov     %dx,%gs
 1085 
 1086         CPU_NUMBER(%edx)
 1087 
 1088         movl    CX(EXT(int_stack_top),%edx),%ecx
 1089         movl    %esp,%edx               /* & i386_interrupt_state */
 1090         xchgl   %ecx,%esp               /* switch to interrupt stack */
 1091 
 1092 #if     STAT_TIME
 1093         pushl   %ecx                    /* save pointer to old stack */
 1094 #else
 1095         pushl   %ebx                    /* save %ebx - out of the way */
 1096                                         /* so stack looks the same */
 1097         pushl   %ecx                    /* save pointer to old stack */
 1098         TIME_INT_ENTRY                  /* do timing */
 1099 #endif
 1100 
 1101         pushl   %edx                    /* pass &i386_interrupt_state to pe_incoming_interrupt */
 1102         
 1103 #if     MACH_RT
 1104         movl    $ CPD_PREEMPTION_LEVEL,%edx
 1105         incl    %gs:(%edx)
 1106 #endif  /* MACH_RT */
 1107 
 1108         movl    $ CPD_INTERRUPT_LEVEL,%edx
 1109         incl    %gs:(%edx)
 1110 
 1111         pushl   %eax                            /* Push trap number */
 1112         call    EXT(PE_incoming_interrupt)              /* call generic interrupt routine */
 1113         addl    $8,%esp                 /* Pop trap number and eip */
 1114 
 1115         .globl  EXT(return_to_iret)
 1116 LEXT(return_to_iret)                    /* (label for kdb_kintr and hardclock) */
 1117 
 1118         movl    $ CPD_INTERRUPT_LEVEL,%edx
 1119         decl    %gs:(%edx)
 1120 
 1121 #if     MACH_RT
 1122         movl    $ CPD_PREEMPTION_LEVEL,%edx
 1123         decl    %gs:(%edx)
 1124 #endif  /* MACH_RT */
 1125 
 1126 #if     STAT_TIME
 1127 #else
 1128         TIME_INT_EXIT                   /* do timing */
 1129         movl    4(%esp),%ebx            /* restore the extra reg we saved */
 1130 #endif
 1131 
 1132         popl    %esp                    /* switch back to old stack */
 1133 
 1134         CPU_NUMBER(%edx)
 1135         movl    CX(EXT(need_ast),%edx),%eax
 1136         testl   %eax,%eax               /* any pending asts? */
 1137         je      1f                      /* no, nothing to do */
 1138         testl   $(EFL_VM),I_EFL(%esp)   /* if in V86 */
 1139         jnz     ast_from_interrupt      /* take it */
 1140         testb   $3,I_CS(%esp)           /* user mode, */
 1141         jnz     ast_from_interrupt      /* take it */
 1142 #ifdef FIXME
 1143         cmpl    ETEXT_ADDR,I_EIP(%esp) /* if within kernel-loaded task, */
 1144         jnb     ast_from_interrupt      /* take it */
 1145 #endif
 1146 
 1147 #if     MACH_RT
 1148         cmpl    $0,EXT(preemptable)     /* kernel-mode, preemption enabled? */
 1149         je      1f                      /* no, skip it */
 1150         movl    $ CPD_PREEMPTION_LEVEL,%ecx
 1151         cmpl    $0,%gs:(%ecx)           /* preemption masked? */
 1152         jne     1f                      /* yes, skip it */
 1153         testl   $ AST_URGENT,%eax       /* any urgent requests? */
 1154         je      1f                      /* no, skip it */
 1155         cmpl    $ EXT(locore_end),I_EIP(%esp)   /* are we in locore code? */
 1156         jb      1f                      /* yes, skip it */
 1157         movl    CX(EXT(kernel_stack),%edx),%eax
 1158         movl    %esp,%ecx
 1159         xorl    %eax,%ecx
 1160         andl    $(-KERNEL_STACK_SIZE),%ecx
 1161         testl   %ecx,%ecx               /* are we on the kernel stack? */
 1162         jne     1f                      /* no, skip it */
 1163 
 1164 /*
 1165  * Take an AST from kernel space.  We don't need (and don't want)
 1166  * to do as much as the case where the interrupt came from user
 1167  * space.
 1168  */
 1169 #if     PREEMPT_DEBUG_LOG
 1170         pushl   $0
 1171         pushl   $0
 1172         pushl   I_EIP+8(%esp)
 1173         pushl   $0f
 1174         call    EXT(log_thread_action)
 1175         addl    $16, %esp
 1176         .data
 1177 0:      String  "intr preempt eip"
 1178         .text
 1179 #endif  /* PREEMPT_DEBUG_LOG */
 1180 
 1181         sti
 1182         pushl   $1                      /* push preemption flag */
 1183         call    EXT(i386_astintr)       /* take the AST */
 1184         addl    $4,%esp                 /* pop preemption flag */
 1185 #endif  /* MACH_RT */
 1186 
 1187 1:
 1188         pop     %gs
 1189         pop     %fs
 1190         pop     %es                     /* restore segment regs */
 1191         pop     %ds
 1192         pop     %edx
 1193         pop     %ecx
 1194         pop     %eax
 1195         iret                            /* return to caller */
 1196 
 1197 int_from_intstack:
 1198 #if     MACH_RT
 1199         movl    $ CPD_PREEMPTION_LEVEL,%edx
 1200         incl    %gs:(%edx)
 1201 #endif  /* MACH_RT */
 1202 
 1203         movl    $ CPD_INTERRUPT_LEVEL,%edx
 1204         incl    %gs:(%edx)
 1205 
 1206         subl    $16, %esp               /* dummy ds, es, fs, gs */
 1207         movl    %esp, %edx              /* &i386_interrupt_state */
 1208         pushl   %edx                    /* pass &i386_interrupt_state to PE_incoming_interrupt /*
 1209         
 1210         pushl   %eax                    /* Push trap number */
 1211 
 1212         call    EXT(PE_incoming_interrupt)
 1213         addl    $20,%esp                        /* pop i386_interrupt_state, dummy gs,fs,es,ds */
 1214 
 1215 LEXT(return_to_iret_i)                  /* ( label for kdb_kintr) */
 1216 
 1217         addl    $4,%esp                 /* pop trap number */
 1218 
 1219         movl    $ CPD_INTERRUPT_LEVEL,%edx
 1220         decl    %gs:(%edx)
 1221 
 1222 #if     MACH_RT
 1223         movl    $ CPD_PREEMPTION_LEVEL,%edx
 1224         decl    %gs:(%edx)
 1225 #endif  /* MACH_RT */
 1226 
 1227         pop     %edx                    /* must have been on kernel segs */
 1228         pop     %ecx
 1229         pop     %eax                    /* no ASTs */
 1230         iret
 1231 
 1232 /*
 1233  *      Take an AST from an interrupt.
 1234  *      On PCB stack.
 1235  * sp-> es      -> edx
 1236  *      ds      -> ecx
 1237  *      edx     -> eax
 1238  *      ecx     -> trapno
 1239  *      eax     -> code
 1240  *      eip
 1241  *      cs
 1242  *      efl
 1243  *      esp
 1244  *      ss
 1245  */
 1246 ast_from_interrupt:
 1247         pop     %gs
 1248         pop     %fs
 1249         pop     %es                     /* restore all registers ... */
 1250         pop     %ds
 1251         popl    %edx
 1252         popl    %ecx
 1253         popl    %eax
 1254         sti                             /* Reenable interrupts */
 1255         pushl   $0                      /* zero code */
 1256         pushl   $0                      /* zero trap number */
 1257         pusha                           /* save general registers */
 1258         push    %ds                     /* save segment registers */
 1259         push    %es
 1260         push    %fs
 1261         push    %gs
 1262         mov     %ss,%dx                 /* switch to kernel segments */
 1263         mov     %dx,%ds
 1264         mov     %dx,%es
 1265         mov     $ CPU_DATA,%dx
 1266         mov     %dx,%gs
 1267 
 1268         /*
 1269          * See if we interrupted a kernel-loaded thread executing
 1270          * in its own task.
 1271          */
 1272         CPU_NUMBER(%edx)
 1273         testl   $(EFL_VM),R_EFLAGS(%esp) /* in V86 mode? */
 1274         jnz     0f                      /* user mode trap if so */
 1275         testb   $3,R_CS(%esp)           
 1276         jnz     0f                      /* user mode, back to normal */
 1277 #ifdef FIXME
 1278         cmpl    ETEXT_ADDR,R_EIP(%esp)
 1279         jb      0f                      /* not kernel-loaded, back to normal */
 1280 #endif
 1281 
 1282         /*
 1283          * Transfer the current stack frame by hand into the PCB.
 1284          */
 1285         CAH(afistart)
 1286         movl    CX(EXT(active_kloaded),%edx),%eax
 1287         movl    CX(EXT(kernel_stack),%edx),%ebx
 1288         xchgl   %ebx,%esp
 1289         FRAME_STACK_TO_PCB(%eax,%ebx)
 1290         CAH(afiend)
 1291         TIME_TRAP_UENTRY
 1292         jmp     3f
 1293 0:
 1294         TIME_TRAP_UENTRY
 1295 
 1296         movl    CX(EXT(kernel_stack),%edx),%eax
 1297                                         /* switch to kernel stack */
 1298         xchgl   %eax,%esp
 1299 3:
 1300         pushl   %eax
 1301         pushl   $0                      /* push preemption flag */
 1302         call    EXT(i386_astintr)       /* take the AST */
 1303         addl    $4,%esp                 /* pop preemption flag */
 1304         popl    %esp                    /* back to PCB stack */
 1305         jmp     EXT(return_from_trap)   /* return */
 1306 
 1307 #if     MACH_KDB || MACH_KGDB 
 1308 /*
 1309  * kdb_kintr:   enter kdb from keyboard interrupt.
 1310  * Chase down the stack frames until we find one whose return
 1311  * address is the interrupt handler.   At that point, we have:
 1312  *
 1313  * frame->      saved %ebp
 1314  *              return address in interrupt handler
 1315  *              ivect
 1316  *              saved SPL
 1317  *              return address == return_to_iret_i
 1318  *              saved %edx
 1319  *              saved %ecx
 1320  *              saved %eax
 1321  *              saved %eip
 1322  *              saved %cs
 1323  *              saved %efl
 1324  *
 1325  * OR:
 1326  * frame->      saved %ebp
 1327  *              return address in interrupt handler
 1328  *              ivect
 1329  *              saved SPL
 1330  *              return address == return_to_iret
 1331  *              pointer to save area on old stack
 1332  *            [ saved %ebx, if accurate timing ]
 1333  *
 1334  * old stack:   saved %es
 1335  *              saved %ds
 1336  *              saved %edx
 1337  *              saved %ecx
 1338  *              saved %eax
 1339  *              saved %eip
 1340  *              saved %cs
 1341  *              saved %efl
 1342  *
 1343  * Call kdb, passing it that register save area.
 1344  */
 1345 
 1346 #if MACH_KGDB
 1347 Entry(kgdb_kintr)
 1348 #endif /* MACH_KGDB */
 1349 #if MACH_KDB
 1350 Entry(kdb_kintr)
 1351 #endif /* MACH_KDB */
 1352         movl    %ebp,%eax               /* save caller`s frame pointer */
 1353         movl    $ EXT(return_to_iret),%ecx /* interrupt return address 1 */
 1354         movl    $ EXT(return_to_iret_i),%edx /* interrupt return address 2 */
 1355 
 1356 0:      cmpl    16(%eax),%ecx           /* does this frame return to */
 1357                                         /* interrupt handler (1)? */
 1358         je      1f
 1359         cmpl    $kdb_from_iret,16(%eax)
 1360         je      1f
 1361         cmpl    16(%eax),%edx           /* interrupt handler (2)? */
 1362         je      2f                      /* if not: */
 1363         cmpl    $kdb_from_iret_i,16(%eax)
 1364         je      2f
 1365         movl    (%eax),%eax             /* try next frame */
 1366         jmp     0b
 1367 
 1368 1:      movl    $kdb_from_iret,16(%eax) /* returns to kernel/user stack */
 1369         ret
 1370 
 1371 2:      movl    $kdb_from_iret_i,16(%eax)
 1372                                         /* returns to interrupt stack */
 1373         ret
 1374 
 1375 /*
 1376  * On return from keyboard interrupt, we will execute
 1377  * kdb_from_iret_i
 1378  *      if returning to an interrupt on the interrupt stack
 1379  * kdb_from_iret
 1380  *      if returning to an interrupt on the user or kernel stack
 1381  */
 1382 kdb_from_iret:
 1383                                         /* save regs in known locations */
 1384 #if     STAT_TIME
 1385         pushl   %ebx                    /* caller`s %ebx is in reg */
 1386 #else
 1387         movl    4(%esp),%eax            /* get caller`s %ebx */
 1388         pushl   %eax                    /* push on stack */
 1389 #endif
 1390         pushl   %ebp
 1391         pushl   %esi
 1392         pushl   %edi
 1393         push    %fs
 1394         push    %gs
 1395 #if MACH_KGDB
 1396         cli
 1397         pushl   %esp                    /* pass regs */
 1398         call    EXT(kgdb_kentry)        /* to kgdb */
 1399         addl    $4,%esp                 /* pop parameters */
 1400 #endif /* MACH_KGDB */
 1401 #if MACH_KDB
 1402         pushl   %esp                    /* pass regs */
 1403         call    EXT(kdb_kentry)         /* to kdb */
 1404         addl    $4,%esp                 /* pop parameters */
 1405 #endif /* MACH_KDB */
 1406         pop     %gs                     /* restore registers */
 1407         pop     %fs
 1408         popl    %edi
 1409         popl    %esi
 1410         popl    %ebp
 1411 #if     STAT_TIME
 1412         popl    %ebx
 1413 #else
 1414         popl    %eax
 1415         movl    %eax,4(%esp)
 1416 #endif
 1417         jmp     EXT(return_to_iret)     /* normal interrupt return */
 1418 
 1419 kdb_from_iret_i:                        /* on interrupt stack */
 1420         pop     %edx                    /* restore saved registers */
 1421         pop     %ecx
 1422         pop     %eax
 1423         pushl   $0                      /* zero error code */
 1424         pushl   $0                      /* zero trap number */
 1425         pusha                           /* save general registers */
 1426         push    %ds                     /* save segment registers */
 1427         push    %es
 1428         push    %fs
 1429         push    %gs
 1430 #if MACH_KGDB
 1431         cli                             /* disable interrupts */
 1432         CPU_NUMBER(%edx)                /* get CPU number */
 1433         movl    CX(EXT(kgdb_stacks),%edx),%ebx
 1434         xchgl   %ebx,%esp               /* switch to kgdb stack */
 1435         pushl   %ebx                    /* pass old sp as an arg */
 1436         call    EXT(kgdb_from_kernel)
 1437         popl    %esp                    /* switch back to interrupt stack */
 1438 #endif /* MACH_KGDB */
 1439 #if MACH_KDB
 1440         pushl   %esp                    /* pass regs, */
 1441         pushl   $0                      /* code, */
 1442         pushl   $-1                     /* type to kdb */
 1443         call    EXT(kdb_trap)
 1444         addl    $12,%esp
 1445 #endif /* MACH_KDB */
 1446         pop     %gs                     /* restore segment registers */
 1447         pop     %fs
 1448         pop     %es
 1449         pop     %ds
 1450         popa                            /* restore general registers */
 1451         addl    $8,%esp
 1452         iret
 1453 
 1454 #endif  /* MACH_KDB || MACH_KGDB */
 1455 
 1456 
 1457 /*
 1458  * Mach RPC enters through a call gate, like a system call.
 1459  */
 1460 
 1461 Entry(mach_rpc)
 1462         pushf                           /* save flags as soon as possible */
 1463         pushl   %eax                    /* save system call number */
 1464         pushl   $0                      /* clear trap number slot */
 1465 
 1466         pusha                           /* save the general registers */
 1467         pushl   %ds                     /* and the segment registers */
 1468         pushl   %es
 1469         pushl   %fs
 1470         pushl   %gs
 1471 
 1472         mov     %ss,%dx                 /* switch to kernel data segment */
 1473         mov     %dx,%ds
 1474         mov     %dx,%es
 1475         mov     $ CPU_DATA,%dx
 1476         mov     %dx,%gs
 1477 
 1478 /*
 1479  * Shuffle eflags,eip,cs into proper places
 1480  */
 1481 
 1482         movl    R_EIP(%esp),%ebx        /* eflags are in EIP slot */
 1483         movl    R_CS(%esp),%ecx         /* eip is in CS slot */
 1484         movl    R_EFLAGS(%esp),%edx     /* cs is in EFLAGS slot */
 1485         movl    %ecx,R_EIP(%esp)        /* fix eip */
 1486         movl    %edx,R_CS(%esp)         /* fix cs */
 1487         movl    %ebx,R_EFLAGS(%esp)     /* fix eflags */
 1488 
 1489         CPU_NUMBER(%edx)
 1490         TIME_TRAP_UENTRY
 1491 
 1492         negl    %eax                    /* get system call number */
 1493         shll    $4,%eax                 /* manual indexing */
 1494 
 1495 /*
 1496  * Check here for mach_rpc from kernel-loaded task --
 1497  *  - Note that kernel-loaded task returns via real return.
 1498  * We didn't enter here "through" PCB (i.e., using ring 0 stack),
 1499  * so transfer the stack frame into the PCB explicitly, then
 1500  * start running on resulting "PCB stack".  We have to set
 1501  * up a simulated "uesp" manually, since there's none in the
 1502  * frame.
 1503  */
 1504         cmpl    $0,CX(EXT(active_kloaded),%edx)
 1505         jz      2f
 1506         CAH(mrstart)
 1507         movl    CX(EXT(active_kloaded),%edx),%ebx
 1508         movl    CX(EXT(kernel_stack),%edx),%edx
 1509         xchgl   %edx,%esp
 1510 
 1511         FRAME_STACK_TO_PCB(%ebx,%edx)
 1512         CAH(mrend)
 1513 
 1514         CPU_NUMBER(%edx)
 1515         jmp     3f
 1516 
 1517 2:
 1518         CPU_NUMBER(%edx)
 1519         movl    CX(EXT(kernel_stack),%edx),%ebx
 1520                                         /* get current kernel stack */
 1521         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 1522                                         /* user registers. */
 1523 
 1524 3:
 1525 
 1526 /*
 1527  * Register use on entry:
 1528  *   eax contains syscall number
 1529  *   ebx contains user regs pointer
 1530  */
 1531 #undef  RPC_TRAP_REGISTERS
 1532 #ifdef  RPC_TRAP_REGISTERS
 1533         pushl   R_ESI(%ebx)
 1534         pushl   R_EDI(%ebx)
 1535         pushl   R_ECX(%ebx)
 1536         pushl   R_EDX(%ebx)
 1537 #else
 1538         movl    EXT(mach_trap_table)(%eax),%ecx
 1539                                         /* get number of arguments */
 1540         jecxz   2f                      /* skip argument copy if none */
 1541         movl    R_UESP(%ebx),%esi       /* get user stack pointer */
 1542         lea     4(%esi,%ecx,4),%esi     /* skip user return address, */
 1543                                         /* and point past last argument */
 1544         /* edx holds cpu number from above */
 1545         movl    CX(EXT(active_kloaded),%edx),%edx
 1546                                         /* point to current thread */
 1547         orl     %edx,%edx               /* if ! kernel-loaded, check addr */
 1548         jz      4f                      /* else */
 1549         mov     %ds,%dx                 /* kernel data segment access */
 1550         jmp     5f
 1551 4:
 1552         cmpl    $(VM_MAX_ADDRESS),%esi  /* in user space? */
 1553         ja      mach_call_addr          /* address error if not */
 1554         movl    $ USER_DS,%edx          /* user data segment access */
 1555 5:
 1556         mov     %dx,%fs
 1557         movl    %esp,%edx               /* save kernel ESP for error recovery */
 1558 1:
 1559         subl    $4,%esi
 1560         RECOVERY_SECTION
 1561         RECOVER(mach_call_addr_push)
 1562         pushl   %fs:(%esi)              /* push argument on stack */
 1563         loop    1b                      /* loop for all arguments */
 1564 #endif
 1565 
 1566 /*
 1567  * Register use on entry:
 1568  *   eax contains syscall number
 1569  *   ebx contains user regs pointer
 1570  */
 1571 2:
 1572 
 1573         pushl   %ebx                    /* arg ptr */
 1574         pushl   %eax                    /* call # - preserved across */
 1575         call    EXT(mach_call_start)
 1576         addl    $ 8, %esp
 1577         movl    %eax, %ebx              /* need later */
 1578         
 1579         CAH(call_call)
 1580         call    *EXT(mach_trap_table)+4(%eax)
 1581                                         /* call procedure */
 1582 
 1583         pushl   %eax                    /* retval */
 1584         pushl   %ebx                    /* call # */
 1585         call    EXT(mach_call_end)
 1586         addl    $ 8, %esp
 1587         
 1588         movl    %esp,%ecx               /* get kernel stack */
 1589         or      $(KERNEL_STACK_SIZE-1),%ecx
 1590         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 1591         movl    %eax,R_EAX(%esp)        /* save return value */
 1592         jmp     EXT(return_from_trap)   /* return to user */
 1593 
 1594 
 1595 /*
 1596  * Special system call entry for "int 0x80", which has the "eflags"
 1597  * register saved at the right place already.
 1598  * Fall back to the common syscall path after saving the registers.
 1599  *
 1600  * esp ->       old eip
 1601  *              old cs
 1602  *              old eflags
 1603  *              old esp         if trapped from user
 1604  *              old ss          if trapped from user
 1605  *
 1606  * XXX: for the moment, we don't check for int 0x80 from kernel mode.
 1607  */
 1608 Entry(syscall_int80)
 1609         pushl   %eax                    /* save system call number */
 1610         pushl   $0                      /* clear trap number slot */
 1611 
 1612         pusha                           /* save the general registers */
 1613         pushl   %ds                     /* and the segment registers */
 1614         pushl   %es
 1615         pushl   %fs
 1616         pushl   %gs
 1617 
 1618         mov     %ss,%dx                 /* switch to kernel data segment */
 1619         mov     %dx,%ds
 1620         mov     %dx,%es
 1621         mov     $ CPU_DATA,%dx
 1622         mov     %dx,%gs
 1623 
 1624         jmp     syscall_entry_3
 1625 
 1626 /*
 1627  * System call enters through a call gate.  Flags are not saved -
 1628  * we must shuffle stack to look like trap save area.
 1629  *
 1630  * esp->        old eip
 1631  *              old cs
 1632  *              old esp
 1633  *              old ss
 1634  *
 1635  * eax contains system call number.
 1636  *
 1637  * NB: below use of CPU_NUMBER assumes that macro will use correct
 1638  * correct segment register for any kernel data accesses.
 1639  */
 1640 Entry(syscall)
 1641 syscall_entry:
 1642         pushf                           /* save flags as soon as possible */
 1643 syscall_entry_2:
 1644         pushl   %eax                    /* save system call number */
 1645         pushl   $0                      /* clear trap number slot */
 1646 
 1647         pusha                           /* save the general registers */
 1648         pushl   %ds                     /* and the segment registers */
 1649         pushl   %es
 1650         pushl   %fs
 1651         pushl   %gs
 1652 
 1653         mov     %ss,%dx                 /* switch to kernel data segment */
 1654         mov     %dx,%ds
 1655         mov     %dx,%es
 1656         mov     $ CPU_DATA,%dx
 1657         mov     %dx,%gs
 1658 
 1659 /*
 1660  * Shuffle eflags,eip,cs into proper places
 1661  */
 1662 
 1663         movl    R_EIP(%esp),%ebx        /* eflags are in EIP slot */
 1664         movl    R_CS(%esp),%ecx         /* eip is in CS slot */
 1665         movl    R_EFLAGS(%esp),%edx     /* cs is in EFLAGS slot */
 1666         movl    %ecx,R_EIP(%esp)        /* fix eip */
 1667         movl    %edx,R_CS(%esp)         /* fix cs */
 1668         movl    %ebx,R_EFLAGS(%esp)     /* fix eflags */
 1669 
 1670 syscall_entry_3:
 1671         CPU_NUMBER(%edx)
 1672 /*
 1673  * Check here for syscall from kernel-loaded task --
 1674  * We didn't enter here "through" PCB (i.e., using ring 0 stack),
 1675  * so transfer the stack frame into the PCB explicitly, then
 1676  * start running on resulting "PCB stack".  We have to set
 1677  * up a simulated "uesp" manually, since there's none in the
 1678  * frame.
 1679  */
 1680         cmpl    $0,CX(EXT(active_kloaded),%edx)
 1681         jz      0f
 1682         CAH(scstart)
 1683         movl    CX(EXT(active_kloaded),%edx),%ebx
 1684         movl    CX(EXT(kernel_stack),%edx),%edx
 1685         xchgl   %edx,%esp
 1686         FRAME_STACK_TO_PCB(%ebx,%edx)
 1687         CAH(scend)
 1688         TIME_TRAP_UENTRY
 1689         CPU_NUMBER(%edx)
 1690         jmp     1f
 1691 
 1692 0:
 1693         TIME_TRAP_UENTRY
 1694 
 1695         CPU_NUMBER(%edx)
 1696         movl    CX(EXT(kernel_stack),%edx),%ebx
 1697                                         /* get current kernel stack */
 1698         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 1699                                         /* user registers. */
 1700                                         /* user regs pointer already set */
 1701 
 1702 /*
 1703  * Check for MACH or emulated system call
 1704  * Register use (from here till we begin processing call):
 1705  *   eax contains system call number
 1706  *   ebx points to user regs
 1707  */
 1708 1:
 1709         movl    $ CPD_ACTIVE_THREAD,%edx
 1710         movl    %gs:(%edx),%edx                 /* get active thread */
 1711         movl    ACT_TASK(%edx),%edx     /* point to task */
 1712         movl    TASK_EMUL(%edx),%edx    /* get emulation vector */
 1713         orl     %edx,%edx               /* if none, */
 1714         je      syscall_native          /*    do native system call */
 1715         movl    %eax,%ecx               /* copy system call number */
 1716         subl    DISP_MIN(%edx),%ecx     /* get displacement into syscall */
 1717                                         /* vector table */
 1718         jl      syscall_native          /* too low - native system call */
 1719         cmpl    DISP_COUNT(%edx),%ecx   /* check range */
 1720         jnl     syscall_native          /* too high - native system call */
 1721         movl    DISP_VECTOR(%edx,%ecx,4),%edx
 1722                                         /* get the emulation vector */
 1723         orl     %edx,%edx               /* emulated system call if not zero */
 1724         jnz     syscall_emul
 1725 
 1726 /*
 1727  * Native system call.
 1728  * Register use on entry:
 1729  *   eax contains syscall number
 1730  *   ebx points to user regs
 1731  */
 1732 syscall_native:
 1733         negl    %eax                    /* get system call number */
 1734         jl      mach_call_range         /* out of range if it was positive */
 1735 
 1736         cmpl    EXT(mach_trap_count),%eax /* check system call table bounds */
 1737         jg      mach_call_range         /* error if out of range */
 1738         shll    $4,%eax                 /* manual indexing */
 1739 
 1740         movl    EXT(mach_trap_table)+4(%eax),%edx
 1741                                         /* get procedure */
 1742         cmpl    $ EXT(kern_invalid),%edx        /* if not "kern_invalid" */
 1743         jne     do_native_call          /* go on with Mach syscall */
 1744 
 1745         movl    $ CPD_ACTIVE_THREAD,%edx
 1746         movl    %gs:(%edx),%edx                 /* get active thread */
 1747         movl    ACT_TASK(%edx),%edx     /* point to task */
 1748         movl    TASK_EMUL(%edx),%edx    /* get emulation vector */
 1749         orl     %edx,%edx               /* if it exists, */
 1750         jne     do_native_call          /* do native system call */
 1751         shrl    $4,%eax                 /* restore syscall number */
 1752         jmp     mach_call_range         /* try it as a "server" syscall */
 1753 
 1754 /*
 1755  * Register use on entry:
 1756  *   eax contains syscall number
 1757  *   ebx contains user regs pointer
 1758  */
 1759 do_native_call:
 1760         movl    EXT(mach_trap_table)(%eax),%ecx
 1761                                         /* get number of arguments */
 1762         jecxz   mach_call_call          /* skip argument copy if none */
 1763         movl    R_UESP(%ebx),%esi       /* get user stack pointer */
 1764         lea     4(%esi,%ecx,4),%esi     /* skip user return address, */
 1765                                         /* and point past last argument */
 1766         CPU_NUMBER(%edx)
 1767         movl    CX(EXT(active_kloaded),%edx),%edx
 1768                                         /* point to current thread */
 1769         orl     %edx,%edx               /* if kernel-loaded, skip addr check */
 1770         jz      0f                      /* else */
 1771         mov     %ds,%dx                 /* kernel data segment access  */
 1772         jmp     1f
 1773 0:
 1774         cmpl    $(VM_MAX_ADDRESS),%esi  /* in user space? */
 1775         ja      mach_call_addr          /* address error if not */
 1776         movl    $ USER_DS,%edx          /* user data segment access */
 1777 1:
 1778         mov     %dx,%fs
 1779         movl    %esp,%edx               /* save kernel ESP for error recovery */
 1780 2:
 1781         subl    $4,%esi
 1782         RECOVERY_SECTION
 1783         RECOVER(mach_call_addr_push)
 1784         pushl   %fs:(%esi)              /* push argument on stack */
 1785         loop    2b                      /* loop for all arguments */
 1786 
 1787 /*
 1788  * Register use on entry:
 1789  *   eax contains syscall number
 1790  *   ebx contains user regs pointer
 1791  */
 1792 mach_call_call:
 1793 
 1794         CAH(call_call)
 1795 
 1796 #if     ETAP_EVENT_MONITOR
 1797         cmpl    $0x200, %eax                    /* is this mach_msg? */
 1798         jz      make_syscall                    /* if yes, don't record event */
 1799 
 1800         pushal                                  /* Otherwise: save registers */
 1801         pushl   %eax                            /*   push syscall number on stack*/
 1802         call    EXT(etap_machcall_probe1)       /*   call event begin probe */
 1803         add     $4,%esp                         /*   restore stack */
 1804         popal                                   /*   restore registers */
 1805 
 1806         call    *EXT(mach_trap_table)+4(%eax)   /* call procedure */
 1807         pushal
 1808         call    EXT(etap_machcall_probe2)       /* call event end probe */
 1809         popal
 1810         jmp     skip_syscall                    /* syscall already made */
 1811 #endif  /* ETAP_EVENT_MONITOR */
 1812 
 1813 make_syscall:
 1814 
 1815         pushl   %ebx                    /* arg ptr */
 1816         pushl   %eax                    /* call # - preserved across */
 1817         call    EXT(mach_call_start)
 1818         addl    $ 8, %esp
 1819         movl    %eax, %ebx              /* need later */
 1820 
 1821         call    *EXT(mach_trap_table)+4(%eax)   /* call procedure */
 1822 
 1823         pushl   %eax                    /* retval */
 1824         pushl   %ebx                    /* call # */
 1825         call    EXT(mach_call_end)
 1826         addl    $ 8, %esp
 1827 
 1828 skip_syscall:
 1829 
 1830         movl    %esp,%ecx               /* get kernel stack */
 1831         or      $(KERNEL_STACK_SIZE-1),%ecx
 1832         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 1833         movl    %eax,R_EAX(%esp)        /* save return value */
 1834         jmp     EXT(return_from_trap)   /* return to user */
 1835 
 1836 /*
 1837  * Address out of range.  Change to page fault.
 1838  * %esi holds failing address.
 1839  * Register use on entry:
 1840  *   ebx contains user regs pointer
 1841  */
 1842 mach_call_addr_push:
 1843         movl    %edx,%esp               /* clean parameters from stack */
 1844 mach_call_addr:
 1845         movl    %esi,R_CR2(%ebx)        /* set fault address */
 1846         movl    $(T_PAGE_FAULT),R_TRAPNO(%ebx)
 1847                                         /* set page-fault trap */
 1848         movl    $(T_PF_USER),R_ERR(%ebx)
 1849                                         /* set error code - read user space */
 1850         CAH(call_addr)
 1851         jmp     EXT(take_trap)          /* treat as a trap */
 1852 
 1853 /*
 1854  * System call out of range.  Treat as invalid-instruction trap.
 1855  * (? general protection?)
 1856  * Register use on entry:
 1857  *   eax contains syscall number
 1858  */
 1859 mach_call_range:
 1860         movl    $ CPD_ACTIVE_THREAD,%edx
 1861         movl    %gs:(%edx),%edx         /* get active thread */
 1862         movl    ACT_TASK(%edx),%edx     /* point to task */
 1863         movl    TASK_EMUL(%edx),%edx    /* get emulation vector */
 1864         orl     %edx,%edx               /* if emulator, */
 1865         jne     EXT(syscall_failed)     /*    handle as illegal instruction */
 1866                                         /* else generate syscall exception: */
 1867         push    %eax
 1868         movl    %esp,%edx
 1869         push    $1                      /* code_cnt = 1 */
 1870         push    %edx                    /* exception_type_t (see i/f docky) */
 1871         push    $ EXC_SYSCALL
 1872         CAH(call_range)
 1873         call    EXT(exception)
 1874         /* no return */
 1875 
 1876         .globl  EXT(syscall_failed)
 1877 LEXT(syscall_failed)
 1878         movl    %esp,%ecx               /* get kernel stack */
 1879         or      $(KERNEL_STACK_SIZE-1),%ecx
 1880         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 1881         CPU_NUMBER(%edx)
 1882         movl    CX(EXT(kernel_stack),%edx),%ebx
 1883                                         /* get current kernel stack */
 1884         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 1885                                         /* user registers. */
 1886                                         /* user regs pointer already set */
 1887 
 1888         movl    $(T_INVALID_OPCODE),R_TRAPNO(%ebx)
 1889                                         /* set invalid-operation trap */
 1890         movl    $0,R_ERR(%ebx)          /* clear error code */
 1891         CAH(failed)
 1892         jmp     EXT(take_trap)          /* treat as a trap */
 1893 
 1894 /*
 1895  * User space emulation of system calls.
 1896  * edx - user address to handle syscall
 1897  *
 1898  * User stack will become:
 1899  * uesp->       eflags
 1900  *              eip
 1901  * Register use on entry:
 1902  *   ebx contains user regs pointer
 1903  *   edx contains emulator vector address
 1904  */
 1905 syscall_emul:
 1906         movl    R_UESP(%ebx),%edi       /* get user stack pointer */
 1907         CPU_NUMBER(%eax)
 1908         movl    CX(EXT(active_kloaded),%eax),%eax
 1909         orl     %eax,%eax               /* if thread not kernel-loaded, */
 1910         jz      0f                      /*   do address checks */
 1911         subl    $8,%edi
 1912         mov     %ds,%ax                 /* kernel data segment access */
 1913         jmp     1f                      /* otherwise, skip them */
 1914 0:
 1915         cmpl    $(VM_MAX_ADDRESS),%edi  /* in user space? */
 1916         ja      syscall_addr            /* address error if not */
 1917         subl    $8,%edi                 /* push space for new arguments */
 1918         cmpl    $(VM_MIN_ADDRESS),%edi  /* still in user space? */
 1919         jb      syscall_addr            /* error if not */
 1920         movl    $ USER_DS,%ax           /* user data segment access */
 1921 1:
 1922         mov     %ax,%fs
 1923         movl    R_EFLAGS(%ebx),%eax     /* move flags */
 1924         RECOVERY_SECTION
 1925         RECOVER(syscall_addr)
 1926         movl    %eax,%fs:0(%edi)        /* to user stack */
 1927         movl    R_EIP(%ebx),%eax        /* move eip */
 1928         RECOVERY_SECTION
 1929         RECOVER(syscall_addr)
 1930         movl    %eax,%fs:4(%edi)        /* to user stack */
 1931         movl    %edi,R_UESP(%ebx)       /* set new user stack pointer */
 1932         movl    %edx,R_EIP(%ebx)        /* change return address to trap */
 1933         movl    %ebx,%esp               /* back to PCB stack */
 1934         CAH(emul)
 1935         jmp     EXT(return_from_trap)   /* return to user */
 1936 
 1937 
 1938 /*
 1939  * Address error - address is in %edi.
 1940  * Register use on entry:
 1941  *   ebx contains user regs pointer
 1942  */
 1943 syscall_addr:
 1944         movl    %edi,R_CR2(%ebx)        /* set fault address */
 1945         movl    $(T_PAGE_FAULT),R_TRAPNO(%ebx)
 1946                                         /* set page-fault trap */
 1947         movl    $(T_PF_USER),R_ERR(%ebx)
 1948                                         /* set error code - read user space */
 1949         CAH(addr)
 1950         jmp     EXT(take_trap)          /* treat as a trap */
 1951 
 1952 /**/
 1953 /*
 1954  * Utility routines.
 1955  */
 1956 
 1957 
 1958 /*
 1959  * Copy from user address space.
 1960  * arg0:        user address
 1961  * arg1:        kernel address
 1962  * arg2:        byte count
 1963  */
 1964 Entry(copyinmsg)
 1965 ENTRY(copyin)
 1966         pushl   %esi
 1967         pushl   %edi                    /* save registers */
 1968 
 1969         movl    8+S_ARG0,%esi           /* get user start address */
 1970         movl    8+S_ARG1,%edi           /* get kernel destination address */
 1971         movl    8+S_ARG2,%edx           /* get count */
 1972 
 1973         lea     0(%esi,%edx),%eax       /* get user end address + 1 */
 1974 
 1975         movl    $ CPD_ACTIVE_THREAD,%ecx
 1976         movl    %gs:(%ecx),%ecx                 /* get active thread */
 1977         movl    ACT_MAP(%ecx),%ecx              /* get act->map */
 1978         movl    MAP_PMAP(%ecx),%ecx             /* get map->pmap */
 1979         cmpl    EXT(kernel_pmap), %ecx
 1980         jz      1f
 1981         movl    $ USER_DS,%cx           /* user data segment access */
 1982         mov     %cx,%ds
 1983 1:
 1984         cmpl    %esi,%eax
 1985         jb      copyin_fail             /* fail if wrap-around */
 1986         cld                             /* count up */
 1987         movl    %edx,%ecx               /* move by longwords first */
 1988         shrl    $2,%ecx
 1989         RECOVERY_SECTION
 1990         RECOVER(copyin_fail)
 1991         rep
 1992         movsl                           /* move longwords */
 1993         movl    %edx,%ecx               /* now move remaining bytes */
 1994         andl    $3,%ecx
 1995         RECOVERY_SECTION
 1996         RECOVER(copyin_fail)
 1997         rep
 1998         movsb
 1999         xorl    %eax,%eax               /* return 0 for success */
 2000 copy_ret:
 2001         mov     %ss,%di                 /* restore kernel data segment */
 2002         mov     %di,%ds
 2003 
 2004         popl    %edi                    /* restore registers */
 2005         popl    %esi
 2006         ret                             /* and return */
 2007 
 2008 copyin_fail:
 2009         movl    $ EFAULT,%eax                   /* return error for failure */
 2010         jmp     copy_ret                /* pop frame and return */
 2011 
 2012 /*
 2013  * Copy string from user address space.
 2014  * arg0:        user address
 2015  * arg1:        kernel address
 2016  * arg2:        max byte count
 2017  * arg3:        actual byte count (OUT)
 2018  */
 2019 Entry(copyinstr)
 2020         pushl   %esi
 2021         pushl   %edi                    /* save registers */
 2022 
 2023         movl    8+S_ARG0,%esi           /* get user start address */
 2024         movl    8+S_ARG1,%edi           /* get kernel destination address */
 2025         movl    8+S_ARG2,%edx           /* get count */
 2026 
 2027         lea     0(%esi,%edx),%eax       /* get user end address + 1 */
 2028 
 2029         movl    $ CPD_ACTIVE_THREAD,%ecx
 2030         movl    %gs:(%ecx),%ecx                 /* get active thread */
 2031         movl    ACT_MAP(%ecx),%ecx              /* get act->map */
 2032         movl    MAP_PMAP(%ecx),%ecx             /* get map->pmap */
 2033         cmpl    EXT(kernel_pmap), %ecx
 2034         jne     0f
 2035         mov     %ds,%cx                 /* kernel data segment access  */
 2036         jmp     1f
 2037 0:
 2038         movl    $ USER_DS,%cx           /* user data segment access */
 2039 1:
 2040         mov     %cx,%fs
 2041         xorl    %eax,%eax
 2042         cmpl    $0,%edx
 2043         je      4f
 2044 2:
 2045         RECOVERY_SECTION
 2046         RECOVER(copystr_fail)           /* copy bytes... */
 2047         movb    %fs:(%esi),%eax
 2048         incl    %esi
 2049         testl   %edi,%edi               /* if kernel address is ... */
 2050         jz      3f                      /* not NULL */
 2051         movb    %eax,(%edi)             /* copy the byte */
 2052         incl    %edi
 2053 3:
 2054         decl    %edx
 2055         je      5f                      /* Zero count.. error out */
 2056         cmpl    $0,%eax
 2057         jne     2b                      /* .. a NUL found? */
 2058         jmp     4f                      /* return zero (%eax) */
 2059 5:
 2060         movl    $ ENAMETOOLONG,%eax     /* String is too long.. */
 2061 4:
 2062         movl    8+S_ARG3,%edi           /* get OUT len ptr */
 2063         cmpl    $0,%edi
 2064         jz      copystr_ret             /* if null, just return */
 2065         subl    8+S_ARG0,%esi
 2066         movl    %esi,(%edi)             /* else set OUT arg to xfer len */
 2067 copystr_ret:
 2068         popl    %edi                    /* restore registers */
 2069         popl    %esi
 2070         ret                             /* and return */
 2071 
 2072 copystr_fail:
 2073         movl    $ EFAULT,%eax           /* return error for failure */
 2074         jmp     copy_ret                /* pop frame and return */
 2075 
 2076 /*
 2077  * Copy to user address space.
 2078  * arg0:        kernel address
 2079  * arg1:        user address
 2080  * arg2:        byte count
 2081  */
 2082 Entry(copyoutmsg)
 2083 ENTRY(copyout)
 2084         pushl   %esi
 2085         pushl   %edi                    /* save registers */
 2086         pushl   %ebx
 2087 
 2088         movl    12+S_ARG0,%esi          /* get kernel start address */
 2089         movl    12+S_ARG1,%edi          /* get user start address */
 2090         movl    12+S_ARG2,%edx          /* get count */
 2091 
 2092         leal    0(%edi,%edx),%eax       /* get user end address + 1 */
 2093 
 2094         movl    $ CPD_ACTIVE_THREAD,%ecx
 2095         movl    %gs:(%ecx),%ecx                 /* get active thread */
 2096         movl    ACT_MAP(%ecx),%ecx              /* get act->map */
 2097         movl    MAP_PMAP(%ecx),%ecx             /* get map->pmap */
 2098         cmpl    EXT(kernel_pmap), %ecx
 2099         jne     0f
 2100         mov     %ds,%cx                 /* else kernel data segment access  */
 2101         jmp     1f
 2102 0:
 2103         movl    $ USER_DS,%cx
 2104 1:
 2105         mov     %cx,%es
 2106 
 2107 /*
 2108  * Check whether user address space is writable
 2109  * before writing to it - hardware is broken.
 2110  *
 2111  * Skip check if "user" address is really in
 2112  * kernel space (i.e., if it's in a kernel-loaded
 2113  * task).
 2114  *
 2115  * Register usage:
 2116  *      esi/edi source/dest pointers for rep/mov
 2117  *      ecx     counter for rep/mov
 2118  *      edx     counts down from 3rd arg
 2119  *      eax     count of bytes for each (partial) page copy
 2120  *      ebx     shadows edi, used to adjust edx
 2121  */
 2122         movl    %edi,%ebx               /* copy edi for syncing up */
 2123 copyout_retry:
 2124         /* if restarting after a partial copy, put edx back in sync, */
 2125         addl    %ebx,%edx               /* edx -= (edi - ebx); */
 2126         subl    %edi,%edx               /
 2127         movl    %edi,%ebx               /* ebx = edi; */
 2128 
 2129 /*
 2130  * Copy only what fits on the current destination page.
 2131  * Check for write-fault again on the next page.
 2132  */
 2133         leal    NBPG(%edi),%eax         /* point to */
 2134         andl    $(-NBPG),%eax           /* start of next page */
 2135         subl    %edi,%eax               /* get number of bytes to that point */
 2136         cmpl    %edx,%eax               /* bigger than count? */
 2137         jle     1f                      /* if so, */
 2138         movl    %edx,%eax               /* use count */
 2139 1:
 2140         cld                             /* count up */
 2141         movl    %eax,%ecx               /* move by longwords first */
 2142         shrl    $2,%ecx
 2143         RECOVERY_SECTION
 2144         RECOVER(copyout_fail)
 2145         RETRY_SECTION
 2146         RETRY(copyout_retry)
 2147         rep
 2148         movsl
 2149         movl    %eax,%ecx               /* now move remaining bytes */
 2150         andl    $3,%ecx
 2151         RECOVERY_SECTION
 2152         RECOVER(copyout_fail)
 2153         RETRY_SECTION
 2154         RETRY(copyout_retry)
 2155         rep
 2156         movsb                           /* move */
 2157         movl    %edi,%ebx               /* copy edi for syncing up */
 2158         subl    %eax,%edx               /* and decrement count */
 2159         jg      copyout_retry           /* restart on next page if not done */
 2160         xorl    %eax,%eax               /* return 0 for success */
 2161 copyout_ret:
 2162         mov     %ss,%di                 /* restore kernel segment */
 2163         mov     %di,%es
 2164 
 2165         popl    %ebx
 2166         popl    %edi                    /* restore registers */
 2167         popl    %esi
 2168         ret                             /* and return */
 2169 
 2170 copyout_fail:
 2171         movl    $ EFAULT,%eax           /* return error for failure */
 2172         jmp     copyout_ret             /* pop frame and return */
 2173 
 2174 /*
 2175  * FPU routines.
 2176  */
 2177 
 2178 /*
 2179  * Initialize FPU.
 2180  */
 2181 ENTRY(_fninit)
 2182         fninit
 2183         ret
 2184 
 2185 /*
 2186  * Read control word
 2187  */
 2188 ENTRY(_fstcw)
 2189         pushl   %eax            /* get stack space */
 2190         fstcw   (%esp)
 2191         popl    %eax
 2192         ret
 2193 
 2194 /*
 2195  * Set control word
 2196  */
 2197 ENTRY(_fldcw)
 2198         fldcw   4(%esp)
 2199         ret
 2200 
 2201 /*
 2202  * Read status word
 2203  */
 2204 ENTRY(_fnstsw)
 2205         xor     %eax,%eax               /* clear high 16 bits of eax */
 2206         fnstsw  %ax                     /* read FP status */
 2207         ret
 2208 
 2209 /*
 2210  * Clear FPU exceptions
 2211  */
 2212 ENTRY(_fnclex)
 2213         fnclex
 2214         ret
 2215 
 2216 /*
 2217  * Clear task-switched flag.
 2218  */
 2219 ENTRY(_clts)
 2220         clts
 2221         ret
 2222 
 2223 /*
 2224  * Save complete FPU state.  Save error for later.
 2225  */
 2226 ENTRY(_fpsave)
 2227         movl    4(%esp),%eax            /* get save area pointer */
 2228         fnsave  (%eax)                  /* save complete state, including */
 2229                                         /* errors */
 2230         ret
 2231 
 2232 /*
 2233  * Restore FPU state.
 2234  */
 2235 ENTRY(_fprestore)
 2236         movl    4(%esp),%eax            /* get save area pointer */
 2237         frstor  (%eax)                  /* restore complete state */
 2238         ret
 2239 
 2240 /*
 2241  * Set cr3
 2242  */
 2243 ENTRY(set_cr3)
 2244 #if     NCPUS > 1
 2245         CPU_NUMBER(%eax)
 2246         orl     4(%esp), %eax
 2247 #else   /* NCPUS > 1 && AT386 */
 2248         movl    4(%esp),%eax            /* get new cr3 value */
 2249 #endif  /* NCPUS > 1 && AT386 */
 2250         /*
 2251          * Don't set PDBR to a new value (hence invalidating the
 2252          * "paging cache") if the new value matches the current one.
 2253          */
 2254         movl    %cr3,%edx               /* get current cr3 value */
 2255         cmpl    %eax,%edx
 2256         je      0f                      /* if two are equal, don't set */
 2257         movl    %eax,%cr3               /* load it (and flush cache) */
 2258 0:
 2259         ret
 2260 
 2261 /*
 2262  * Read cr3
 2263  */
 2264 ENTRY(get_cr3)
 2265         movl    %cr3,%eax
 2266 #if     NCPUS > 1
 2267         andl    $(~0x7), %eax           /* remove cpu number */
 2268 #endif  /* NCPUS > 1 && AT386 */
 2269         ret
 2270 
 2271 /*
 2272  * Flush TLB
 2273  */
 2274 ENTRY(flush_tlb)
 2275         movl    %cr3,%eax               /* flush tlb by reloading CR3 */
 2276         movl    %eax,%cr3               /* with itself */
 2277         ret
 2278 
 2279 /*
 2280  * Read cr2
 2281  */
 2282 ENTRY(get_cr2)
 2283         movl    %cr2,%eax
 2284         ret
 2285 
 2286 /*
 2287  * Read cr4
 2288  */
 2289 ENTRY(get_cr4)
 2290         .byte   0x0f,0x20,0xe0          /* movl %cr4, %eax */
 2291         ret
 2292 
 2293 /*
 2294  * Write cr4
 2295  */
 2296 ENTRY(set_cr4)
 2297         movl    4(%esp), %eax
 2298         .byte   0x0f,0x22,0xe0          /* movl %eax, %cr4 */
 2299         ret
 2300 
 2301 /*
 2302  * Read ldtr
 2303  */
 2304 Entry(get_ldt)
 2305         xorl    %eax,%eax
 2306         sldt    %ax
 2307         ret
 2308 
 2309 /*
 2310  * Set ldtr
 2311  */
 2312 Entry(set_ldt)
 2313         lldt    4(%esp)
 2314         ret
 2315 
 2316 /*
 2317  * Read task register.
 2318  */
 2319 ENTRY(get_tr)
 2320         xorl    %eax,%eax
 2321         str     %ax
 2322         ret
 2323 
 2324 /*
 2325  * Set task register.  Also clears busy bit of task descriptor.
 2326  */
 2327 ENTRY(set_tr)
 2328         movl    S_ARG0,%eax             /* get task segment number */
 2329         subl    $8,%esp                 /* push space for SGDT */
 2330         sgdt    2(%esp)                 /* store GDT limit and base (linear) */
 2331         movl    4(%esp),%edx            /* address GDT */
 2332         movb    $(K_TSS),5(%edx,%eax)   /* fix access byte in task descriptor */
 2333         ltr     %ax                     /* load task register */
 2334         addl    $8,%esp                 /* clear stack */
 2335         ret                             /* and return */
 2336 
 2337 /*
 2338  * Set task-switched flag.
 2339  */
 2340 ENTRY(_setts)
 2341         movl    %cr0,%eax               /* get cr0 */
 2342         orl     $(CR0_TS),%eax          /* or in TS bit */
 2343         movl    %eax,%cr0               /* set cr0 */
 2344         ret
 2345 
 2346 /*
 2347  * io register must not be used on slaves (no AT bus)
 2348  */
 2349 #define ILL_ON_SLAVE
 2350 
 2351 
 2352 #if     MACH_ASSERT
 2353 
 2354 #define ARG0            B_ARG0
 2355 #define ARG1            B_ARG1
 2356 #define ARG2            B_ARG2
 2357 #define PUSH_FRAME      FRAME
 2358 #define POP_FRAME       EMARF
 2359 
 2360 #else   /* MACH_ASSERT */
 2361 
 2362 #define ARG0            S_ARG0
 2363 #define ARG1            S_ARG1
 2364 #define ARG2            S_ARG2
 2365 #define PUSH_FRAME      
 2366 #define POP_FRAME       
 2367 
 2368 #endif  /* MACH_ASSERT */
 2369 
 2370 
 2371 #if     MACH_KDB || MACH_ASSERT
 2372 
 2373 /*
 2374  * Following routines are also defined as macros in i386/pio.h
 2375  * Compile then when MACH_KDB is configured so that they
 2376  * can be invoked from the debugger.
 2377  */
 2378 
 2379 /*
 2380  * void outb(unsigned char *io_port,
 2381  *           unsigned char byte)
 2382  *
 2383  * Output a byte to an IO port.
 2384  */
 2385 ENTRY(outb)
 2386         PUSH_FRAME
 2387         ILL_ON_SLAVE
 2388         movl    ARG0,%edx               /* IO port address */
 2389         movl    ARG1,%eax               /* data to output */
 2390         outb    %al,%dx                 /* send it out */
 2391         POP_FRAME
 2392         ret
 2393 
 2394 /*
 2395  * unsigned char inb(unsigned char *io_port)
 2396  *
 2397  * Input a byte from an IO port.
 2398  */
 2399 ENTRY(inb)
 2400         PUSH_FRAME
 2401         ILL_ON_SLAVE
 2402         movl    ARG0,%edx               /* IO port address */
 2403         xor     %eax,%eax               /* clear high bits of register */
 2404         inb     %dx,%al                 /* get the byte */
 2405         POP_FRAME
 2406         ret
 2407 
 2408 /*
 2409  * void outw(unsigned short *io_port,
 2410  *           unsigned short word)
 2411  *
 2412  * Output a word to an IO port.
 2413  */
 2414 ENTRY(outw)
 2415         PUSH_FRAME
 2416         ILL_ON_SLAVE
 2417         movl    ARG0,%edx               /* IO port address */
 2418         movl    ARG1,%eax               /* data to output */
 2419         outw    %ax,%dx                 /* send it out */
 2420         POP_FRAME
 2421         ret
 2422 
 2423 /*
 2424  * unsigned short inw(unsigned short *io_port)
 2425  *
 2426  * Input a word from an IO port.
 2427  */
 2428 ENTRY(inw)
 2429         PUSH_FRAME
 2430         ILL_ON_SLAVE
 2431         movl    ARG0,%edx               /* IO port address */
 2432         xor     %eax,%eax               /* clear high bits of register */
 2433         inw     %dx,%ax                 /* get the word */
 2434         POP_FRAME
 2435         ret
 2436 
 2437 /*
 2438  * void outl(unsigned int *io_port,
 2439  *           unsigned int byte)
 2440  *
 2441  * Output an int to an IO port.
 2442  */
 2443 ENTRY(outl)
 2444         PUSH_FRAME
 2445         ILL_ON_SLAVE
 2446         movl    ARG0,%edx               /* IO port address*/
 2447         movl    ARG1,%eax               /* data to output */
 2448         outl    %eax,%dx                /* send it out */
 2449         POP_FRAME
 2450         ret
 2451 
 2452 /*
 2453  * unsigned int inl(unsigned int *io_port)
 2454  *
 2455  * Input an int from an IO port.
 2456  */
 2457 ENTRY(inl)
 2458         PUSH_FRAME
 2459         ILL_ON_SLAVE
 2460         movl    ARG0,%edx               /* IO port address */
 2461         inl     %dx,%eax                /* get the int */
 2462         POP_FRAME
 2463         ret
 2464 
 2465 #endif  /* MACH_KDB  || MACH_ASSERT*/
 2466 
 2467 /*
 2468  * void loutb(unsigned byte *io_port,
 2469  *            unsigned byte *data,
 2470  *            unsigned int count)
 2471  *
 2472  * Output an array of bytes to an IO port.
 2473  */
 2474 ENTRY(loutb)
 2475 ENTRY(outsb)
 2476         PUSH_FRAME
 2477         ILL_ON_SLAVE
 2478         movl    %esi,%eax               /* save register */
 2479         movl    ARG0,%edx               /* get io port number */
 2480         movl    ARG1,%esi               /* get data address */
 2481         movl    ARG2,%ecx               /* get count */
 2482         cld                             /* count up */
 2483         rep
 2484         outsb                           /* output */
 2485         movl    %eax,%esi               /* restore register */
 2486         POP_FRAME
 2487         ret     
 2488 
 2489 
 2490 /*
 2491  * void loutw(unsigned short *io_port,
 2492  *            unsigned short *data,
 2493  *            unsigned int count)
 2494  *
 2495  * Output an array of shorts to an IO port.
 2496  */
 2497 ENTRY(loutw)
 2498 ENTRY(outsw)
 2499         PUSH_FRAME
 2500         ILL_ON_SLAVE
 2501         movl    %esi,%eax               /* save register */
 2502         movl    ARG0,%edx               /* get io port number */
 2503         movl    ARG1,%esi               /* get data address */
 2504         movl    ARG2,%ecx               /* get count */
 2505         cld                             /* count up */
 2506         rep
 2507         outsw                           /* output */
 2508         movl    %eax,%esi               /* restore register */
 2509         POP_FRAME
 2510         ret
 2511 
 2512 /*
 2513  * void loutw(unsigned short io_port,
 2514  *            unsigned int *data,
 2515  *            unsigned int count)
 2516  *
 2517  * Output an array of longs to an IO port.
 2518  */
 2519 ENTRY(loutl)
 2520 ENTRY(outsl)
 2521         PUSH_FRAME
 2522         ILL_ON_SLAVE
 2523         movl    %esi,%eax               /* save register */
 2524         movl    ARG0,%edx               /* get io port number */
 2525         movl    ARG1,%esi               /* get data address */
 2526         movl    ARG2,%ecx               /* get count */
 2527         cld                             /* count up */
 2528         rep
 2529         outsl                           /* output */
 2530         movl    %eax,%esi               /* restore register */
 2531         POP_FRAME
 2532         ret
 2533 
 2534 
 2535 /*
 2536  * void linb(unsigned char *io_port,
 2537  *           unsigned char *data,
 2538  *           unsigned int count)
 2539  *
 2540  * Input an array of bytes from an IO port.
 2541  */
 2542 ENTRY(linb)
 2543 ENTRY(insb)
 2544         PUSH_FRAME
 2545         ILL_ON_SLAVE
 2546         movl    %edi,%eax               /* save register */
 2547         movl    ARG0,%edx               /* get io port number */
 2548         movl    ARG1,%edi               /* get data address */
 2549         movl    ARG2,%ecx               /* get count */
 2550         cld                             /* count up */
 2551         rep
 2552         insb                            /* input */
 2553         movl    %eax,%edi               /* restore register */
 2554         POP_FRAME
 2555         ret
 2556 
 2557 
 2558 /*
 2559  * void linw(unsigned short *io_port,
 2560  *           unsigned short *data,
 2561  *           unsigned int count)
 2562  *
 2563  * Input an array of shorts from an IO port.
 2564  */
 2565 ENTRY(linw)
 2566 ENTRY(insw)
 2567         PUSH_FRAME
 2568         ILL_ON_SLAVE
 2569         movl    %edi,%eax               /* save register */
 2570         movl    ARG0,%edx               /* get io port number */
 2571         movl    ARG1,%edi               /* get data address */
 2572         movl    ARG2,%ecx               /* get count */
 2573         cld                             /* count up */
 2574         rep
 2575         insw                            /* input */
 2576         movl    %eax,%edi               /* restore register */
 2577         POP_FRAME
 2578         ret
 2579 
 2580 
 2581 /*
 2582  * void linl(unsigned short io_port,
 2583  *           unsigned int *data,
 2584  *           unsigned int count)
 2585  *
 2586  * Input an array of longs from an IO port.
 2587  */
 2588 ENTRY(linl)
 2589 ENTRY(insl)
 2590         PUSH_FRAME
 2591         ILL_ON_SLAVE
 2592         movl    %edi,%eax               /* save register */
 2593         movl    ARG0,%edx               /* get io port number */
 2594         movl    ARG1,%edi               /* get data address */
 2595         movl    ARG2,%ecx               /* get count */
 2596         cld                             /* count up */
 2597         rep
 2598         insl                            /* input */
 2599         movl    %eax,%edi               /* restore register */
 2600         POP_FRAME
 2601         ret
 2602 
 2603 
 2604 /*
 2605  * int inst_fetch(int eip, int cs);
 2606  *
 2607  * Fetch instruction byte.  Return -1 if invalid address.
 2608  */
 2609         .globl  EXT(inst_fetch)
 2610 LEXT(inst_fetch)
 2611         movl    S_ARG1, %eax            /* get segment */
 2612         movw    %ax,%fs                 /* into FS */
 2613         movl    S_ARG0, %eax            /* get offset */
 2614         RETRY_SECTION
 2615         RETRY(EXT(inst_fetch))          /* re-load FS on retry */
 2616         RECOVERY_SECTION
 2617         RECOVER(EXT(inst_fetch_fault))
 2618         movzbl  %fs:(%eax),%eax         /* load instruction byte */
 2619         ret
 2620 
 2621 LEXT(inst_fetch_fault)
 2622         movl    $-1,%eax                /* return -1 if error */
 2623         ret
 2624 
 2625 
 2626 #if MACH_KDP
 2627 /*
 2628  * kdp_copy_kmem(char *src, char *dst, int count)
 2629  *
 2630  * Similar to copyin except that both addresses are kernel addresses.
 2631  */
 2632 
 2633 ENTRY(kdp_copy_kmem)
 2634         pushl   %esi
 2635         pushl   %edi                    /* save registers */
 2636 
 2637         movl    8+S_ARG0,%esi           /* get kernel start address */
 2638         movl    8+S_ARG1,%edi           /* get kernel destination address */
 2639 
 2640         movl    8+S_ARG2,%edx           /* get count */
 2641 
 2642         lea     0(%esi,%edx),%eax       /* get kernel end address + 1 */
 2643 
 2644         cmpl    %esi,%eax
 2645         jb      kdp_vm_read_fail        /* fail if wrap-around */
 2646         cld                             /* count up */
 2647         movl    %edx,%ecx               /* move by longwords first */
 2648         shrl    $2,%ecx
 2649         RECOVERY_SECTION
 2650         RECOVER(kdp_vm_read_fail)
 2651         rep
 2652         movsl                           /* move longwords */
 2653         movl    %edx,%ecx               /* now move remaining bytes */
 2654         andl    $3,%ecx
 2655         RECOVERY_SECTION
 2656         RECOVER(kdp_vm_read_fail)
 2657         rep
 2658         movsb
 2659 kdp_vm_read_done:
 2660         movl    8+S_ARG2,%edx           /* get count */
 2661         subl    %ecx,%edx               /* Return number of bytes transfered */
 2662         movl    %edx,%eax
 2663 
 2664         popl    %edi                    /* restore registers */
 2665         popl    %esi
 2666         ret                             /* and return */
 2667 
 2668 kdp_vm_read_fail:
 2669         xorl    %eax,%eax       /* didn't copy a thing. */
 2670 
 2671         popl    %edi
 2672         popl    %esi
 2673         ret
 2674 #endif
 2675 
 2676 
 2677 /*
 2678  * Done with recovery and retry tables.
 2679  */
 2680         RECOVERY_SECTION
 2681         RECOVER_TABLE_END
 2682         RETRY_SECTION
 2683         RETRY_TABLE_END
 2684 
 2685 
 2686 
 2687 ENTRY(dr6)
 2688         movl    %db6, %eax
 2689         ret
 2690 
 2691 /*      dr<i>(address, type, len, persistence)
 2692  */
 2693 ENTRY(dr0)
 2694         movl    S_ARG0, %eax
 2695         movl    %eax,EXT(dr_addr)
 2696         movl    %eax, %db0
 2697         movl    $0, %ecx
 2698         jmp     0f
 2699 ENTRY(dr1)
 2700         movl    S_ARG0, %eax
 2701         movl    %eax,EXT(dr_addr)+1*4
 2702         movl    %eax, %db1
 2703         movl    $2, %ecx
 2704         jmp     0f
 2705 ENTRY(dr2)
 2706         movl    S_ARG0, %eax
 2707         movl    %eax,EXT(dr_addr)+2*4
 2708         movl    %eax, %db2
 2709         movl    $4, %ecx
 2710         jmp     0f
 2711 
 2712 ENTRY(dr3)
 2713         movl    S_ARG0, %eax
 2714         movl    %eax,EXT(dr_addr)+3*4
 2715         movl    %eax, %db3
 2716         movl    $6, %ecx
 2717 
 2718 0:
 2719         pushl   %ebp
 2720         movl    %esp, %ebp
 2721 
 2722         movl    %db7, %edx
 2723         movl    %edx,EXT(dr_addr)+4*4
 2724         andl    dr_msk(,%ecx,2),%edx    /* clear out new entry */
 2725         movl    %edx,EXT(dr_addr)+5*4
 2726         movzbl  B_ARG3, %eax
 2727         andb    $3, %al
 2728         shll    %cl, %eax
 2729         orl     %eax, %edx
 2730 
 2731         movzbl  B_ARG1, %eax
 2732         andb    $3, %al
 2733         addb    $0x10, %ecx
 2734         shll    %cl, %eax
 2735         orl     %eax, %edx
 2736 
 2737         movzbl  B_ARG2, %eax
 2738         andb    $3, %al
 2739         addb    $0x2, %ecx
 2740         shll    %cl, %eax
 2741         orl     %eax, %edx
 2742 
 2743         movl    %edx, %db7
 2744         movl    %edx,EXT(dr_addr)+7*4
 2745         movl    %edx, %eax
 2746         leave
 2747         ret
 2748 
 2749         .data
 2750 
 2751 DATA(preemptable)       /* Not on an MP (makes cpu_number() usage unsafe) */
 2752 #if     MACH_RT && (NCPUS == 1)
 2753         .long   0       /* FIXME -- Currently disabled */
 2754 #else
 2755         .long   0       /* FIX ME -- Currently disabled */
 2756 #endif  /* MACH_RT && (NCPUS == 1) */
 2757 
 2758 dr_msk:
 2759         .long   ~0x000f0003
 2760         .long   ~0x00f0000c
 2761         .long   ~0x0f000030
 2762         .long   ~0xf00000c0
 2763 ENTRY(dr_addr)
 2764         .long   0,0,0,0
 2765         .long   0,0,0,0
 2766         .text
 2767 
 2768 ENTRY(get_cr0)
 2769         movl    %cr0, %eax
 2770         ret
 2771 
 2772 ENTRY(set_cr0)
 2773         movl    4(%esp), %eax
 2774         movl    %eax, %cr0
 2775         ret
 2776 
 2777 #ifndef SYMMETRY
 2778 
 2779 /*
 2780  * ffs(mask)
 2781  */
 2782 ENTRY(ffs)
 2783         bsfl    S_ARG0, %eax
 2784         jz      0f
 2785         incl    %eax
 2786         ret
 2787 0:      xorl    %eax, %eax
 2788         ret
 2789 
 2790 /*
 2791  * cpu_shutdown()
 2792  * Force reboot
 2793  */
 2794 
 2795 null_idtr:
 2796         .word   0
 2797         .long   0
 2798 
 2799 Entry(cpu_shutdown)
 2800         lidt    null_idtr       /* disable the interrupt handler */
 2801         xor     %ecx,%ecx       /* generate a divide by zero */
 2802         div     %ecx,%eax       /* reboot now */
 2803         ret                     /* this will "never" be executed */
 2804 
 2805 #endif  /* SYMMETRY */
 2806 
 2807 
 2808 /*
 2809  * setbit(int bitno, int *s) - set bit in bit string
 2810  */
 2811 ENTRY(setbit)
 2812         movl    S_ARG0, %ecx            /* bit number */
 2813         movl    S_ARG1, %eax            /* address */
 2814         btsl    %ecx, (%eax)            /* set bit */
 2815         ret
 2816 
 2817 /*
 2818  * clrbit(int bitno, int *s) - clear bit in bit string
 2819  */
 2820 ENTRY(clrbit)
 2821         movl    S_ARG0, %ecx            /* bit number */
 2822         movl    S_ARG1, %eax            /* address */
 2823         btrl    %ecx, (%eax)            /* clear bit */
 2824         ret
 2825 
 2826 /*
 2827  * ffsbit(int *s) - find first set bit in bit string
 2828  */
 2829 ENTRY(ffsbit)
 2830         movl    S_ARG0, %ecx            /* address */
 2831         movl    $0, %edx                /* base offset */
 2832 0:
 2833         bsfl    (%ecx), %eax            /* check argument bits */
 2834         jnz     1f                      /* found bit, return */
 2835         addl    $4, %ecx                /* increment address */
 2836         addl    $32, %edx               /* increment offset */
 2837         jmp     0b                      /* try again */
 2838 1:
 2839         addl    %edx, %eax              /* return offset */
 2840         ret
 2841 
 2842 /*
 2843  * testbit(int nr, volatile void *array)
 2844  *
 2845  * Test to see if the bit is set within the bit string
 2846  */
 2847 
 2848 ENTRY(testbit)
 2849         movl    S_ARG0,%eax     /* Get the bit to test */
 2850         movl    S_ARG1,%ecx     /* get the array string */
 2851         btl     %eax,(%ecx)
 2852         sbbl    %eax,%eax
 2853         ret
 2854 
 2855 ENTRY(get_pc)
 2856         movl    4(%ebp),%eax
 2857         ret
 2858 
 2859 #if     ETAP
 2860 
 2861 ENTRY(etap_get_pc)
 2862         movl    4(%ebp), %eax           /* fetch pc of caller */
 2863         ret
 2864 
 2865 ENTRY(tvals_to_etap)
 2866         movl    S_ARG0, %eax
 2867         movl    $1000000000, %ecx
 2868         mull    %ecx
 2869         addl    S_ARG1, %eax
 2870         adc     $0, %edx
 2871         ret
 2872 
 2873 /* etap_time_t
 2874  * etap_time_sub(etap_time_t stop, etap_time_t start)
 2875  *      
 2876  *      64bit subtract, returns stop - start
 2877  */                     
 2878 ENTRY(etap_time_sub)
 2879         movl    S_ARG0, %eax            /* stop.low */
 2880         movl    S_ARG1, %edx            /* stop.hi */
 2881         subl    S_ARG2, %eax            /* stop.lo - start.lo */
 2882         sbbl    S_ARG3, %edx            /* stop.hi - start.hi */
 2883         ret
 2884                 
 2885 #endif  /* ETAP */
 2886         
 2887 #if     NCPUS > 1
 2888 
 2889 ENTRY(minsecurity)
 2890         pushl   %ebp
 2891         movl    %esp,%ebp
 2892 /*
 2893  * jail: set the EIP to "jail" to block a kernel thread.
 2894  * Useful to debug synchronization problems on MPs.
 2895  */
 2896 ENTRY(jail)
 2897         jmp     EXT(jail)
 2898 
 2899 #endif  /* NCPUS > 1 */
 2900 
 2901 /*
 2902  * unsigned int
 2903  * div_scale(unsigned int dividend,
 2904  *           unsigned int divisor,
 2905  *           unsigned int *scale)
 2906  *
 2907  * This function returns (dividend << *scale) //divisor where *scale
 2908  * is the largest possible value before overflow. This is used in
 2909  * computation where precision must be achieved in order to avoid
 2910  * floating point usage.
 2911  *
 2912  * Algorithm:
 2913  *      *scale = 0;
 2914  *      while (((dividend >> *scale) >= divisor))
 2915  *              (*scale)++;
 2916  *      *scale = 32 - *scale;
 2917  *      return ((dividend << *scale) / divisor);  
 2918  */
 2919 ENTRY(div_scale)
 2920         PUSH_FRAME
 2921         xorl    %ecx, %ecx              /* *scale = 0 */
 2922         xorl    %eax, %eax
 2923         movl    ARG0, %edx              /* get dividend */
 2924 0:
 2925         cmpl    ARG1, %edx              /* if (divisor > dividend) */
 2926         jle     1f                      /* goto 1f */
 2927         addl    $1, %ecx                /* (*scale)++ */
 2928         shrdl   $1, %edx, %eax          /* dividend >> 1 */
 2929         shrl    $1, %edx                /* dividend >> 1 */
 2930         jmp     0b                      /* goto 0b */
 2931 1:      
 2932         divl    ARG1                    /* (dividend << (32 - *scale)) / divisor */
 2933         movl    ARG2, %edx              /* get scale */
 2934         movl    $32, (%edx)             /* *scale = 32 */
 2935         subl    %ecx, (%edx)            /* *scale -= %ecx */
 2936         POP_FRAME
 2937         ret
 2938 
 2939 /*
 2940  * unsigned int
 2941  * mul_scale(unsigned int multiplicand,
 2942  *           unsigned int multiplier,
 2943  *           unsigned int *scale)
 2944  *
 2945  * This function returns ((multiplicand * multiplier) >> *scale) where
 2946  * scale is the largest possible value before overflow. This is used in
 2947  * computation where precision must be achieved in order to avoid
 2948  * floating point usage.
 2949  *
 2950  * Algorithm:
 2951  *      *scale = 0;
 2952  *      while (overflow((multiplicand * multiplier) >> *scale))
 2953  *              (*scale)++;
 2954  *      return ((multiplicand * multiplier) >> *scale);
 2955  */
 2956 ENTRY(mul_scale)
 2957         PUSH_FRAME
 2958         xorl    %ecx, %ecx              /* *scale = 0 */
 2959         movl    ARG0, %eax              /* get multiplicand */
 2960         mull    ARG1                    /* multiplicand * multiplier */
 2961 0:
 2962         cmpl    $0, %edx                /* if (!overflow()) */
 2963         je      1f                      /* goto 1 */
 2964         addl    $1, %ecx                /* (*scale)++ */
 2965         shrdl   $1, %edx, %eax          /* (multiplicand * multiplier) >> 1 */
 2966         shrl    $1, %edx                /* (multiplicand * multiplier) >> 1 */
 2967         jmp     0b
 2968 1:
 2969         movl    ARG2, %edx              /* get scale */
 2970         movl    %ecx, (%edx)            /* set *scale */
 2971         POP_FRAME
 2972         ret
 2973 
 2974 #ifdef  MACH_BSD
 2975 /*
 2976  * BSD System call entry point.. 
 2977  */
 2978 
 2979 Entry(trap_unix_syscall)
 2980 trap_unix_addr: 
 2981         pushf                           /* save flags as soon as possible */
 2982 trap_unix_2:    
 2983         pushl   %eax                    /* save system call number */
 2984         pushl   $0                      /* clear trap number slot */
 2985 
 2986         pusha                           /* save the general registers */
 2987         pushl   %ds                     /* and the segment registers */
 2988         pushl   %es
 2989         pushl   %fs
 2990         pushl   %gs
 2991 
 2992         mov     %ss,%dx                 /* switch to kernel data segment */
 2993         mov     %dx,%ds
 2994         mov     %dx,%es
 2995         mov     $ CPU_DATA,%dx
 2996         mov     %dx,%gs
 2997 
 2998 /*
 2999  * Shuffle eflags,eip,cs into proper places
 3000  */
 3001 
 3002         movl    R_EIP(%esp),%ebx        /* eflags are in EIP slot */
 3003         movl    R_CS(%esp),%ecx         /* eip is in CS slot */
 3004         movl    R_EFLAGS(%esp),%edx     /* cs is in EFLAGS slot */
 3005         movl    %ecx,R_EIP(%esp)        /* fix eip */
 3006         movl    %edx,R_CS(%esp)         /* fix cs */
 3007         movl    %ebx,R_EFLAGS(%esp)     /* fix eflags */
 3008 
 3009         CPU_NUMBER(%edx)
 3010         TIME_TRAP_UENTRY
 3011 
 3012         negl    %eax                    /* get system call number */
 3013         shll    $4,%eax                 /* manual indexing */
 3014 
 3015         CPU_NUMBER(%edx)
 3016         movl    CX(EXT(kernel_stack),%edx),%ebx
 3017                                         /* get current kernel stack */
 3018         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 3019                                         /* user registers. */
 3020 
 3021 /*
 3022  * Register use on entry:
 3023  *   eax contains syscall number
 3024  *   ebx contains user regs pointer
 3025  */
 3026         CAH(call_call)
 3027         pushl   %ebx                    /* Push the regs set onto stack */
 3028         call    EXT(unix_syscall)
 3029         popl    %ebx
 3030         movl    %esp,%ecx               /* get kernel stack */
 3031         or      $(KERNEL_STACK_SIZE-1),%ecx
 3032         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 3033         movl    %eax,R_EAX(%esp)        /* save return value */
 3034         jmp     EXT(return_from_trap)   /* return to user */
 3035 
 3036 /*
 3037  * Entry point for machdep system calls..
 3038  */
 3039 
 3040 Entry(trap_machdep_syscall)
 3041         pushf                           /* save flags as soon as possible */
 3042         pushl   %eax                    /* save system call number */
 3043         pushl   $0                      /* clear trap number slot */
 3044 
 3045         pusha                           /* save the general registers */
 3046         pushl   %ds                     /* and the segment registers */
 3047         pushl   %es
 3048         pushl   %fs
 3049         pushl   %gs
 3050 
 3051         mov     %ss,%dx                 /* switch to kernel data segment */
 3052         mov     %dx,%ds
 3053         mov     %dx,%es
 3054         mov     $ CPU_DATA,%dx
 3055         mov     %dx,%gs
 3056 
 3057 /*
 3058  * Shuffle eflags,eip,cs into proper places
 3059  */
 3060 
 3061         movl    R_EIP(%esp),%ebx        /* eflags are in EIP slot */
 3062         movl    R_CS(%esp),%ecx         /* eip is in CS slot */
 3063         movl    R_EFLAGS(%esp),%edx     /* cs is in EFLAGS slot */
 3064         movl    %ecx,R_EIP(%esp)        /* fix eip */
 3065         movl    %edx,R_CS(%esp)         /* fix cs */
 3066         movl    %ebx,R_EFLAGS(%esp)     /* fix eflags */
 3067 
 3068         CPU_NUMBER(%edx)
 3069         TIME_TRAP_UENTRY
 3070 
 3071         negl    %eax                    /* get system call number */
 3072         shll    $4,%eax                 /* manual indexing */
 3073 
 3074         CPU_NUMBER(%edx)
 3075         movl    CX(EXT(kernel_stack),%edx),%ebx
 3076                                         /* get current kernel stack */
 3077         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 3078                                         /* user registers. */
 3079 
 3080 /*
 3081  * Register use on entry:
 3082  *   eax contains syscall number
 3083  *   ebx contains user regs pointer
 3084  */
 3085         CAH(call_call)
 3086         pushl   %ebx
 3087         call    EXT(machdep_syscall)
 3088         popl    %ebx
 3089         movl    %esp,%ecx               /* get kernel stack */
 3090         or      $(KERNEL_STACK_SIZE-1),%ecx
 3091         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 3092         movl    %eax,R_EAX(%esp)        /* save return value */
 3093         jmp     EXT(return_from_trap)   /* return to user */
 3094 
 3095 Entry(trap_mach25_syscall)
 3096         pushf                           /* save flags as soon as possible */
 3097         pushl   %eax                    /* save system call number */
 3098         pushl   $0                      /* clear trap number slot */
 3099 
 3100         pusha                           /* save the general registers */
 3101         pushl   %ds                     /* and the segment registers */
 3102         pushl   %es
 3103         pushl   %fs
 3104         pushl   %gs
 3105 
 3106         mov     %ss,%dx                 /* switch to kernel data segment */
 3107         mov     %dx,%ds
 3108         mov     %dx,%es
 3109         mov     $ CPU_DATA,%dx
 3110         mov     %dx,%gs
 3111 
 3112 /*
 3113  * Shuffle eflags,eip,cs into proper places
 3114  */
 3115 
 3116         movl    R_EIP(%esp),%ebx        /* eflags are in EIP slot */
 3117         movl    R_CS(%esp),%ecx         /* eip is in CS slot */
 3118         movl    R_EFLAGS(%esp),%edx     /* cs is in EFLAGS slot */
 3119         movl    %ecx,R_EIP(%esp)        /* fix eip */
 3120         movl    %edx,R_CS(%esp)         /* fix cs */
 3121         movl    %ebx,R_EFLAGS(%esp)     /* fix eflags */
 3122 
 3123         CPU_NUMBER(%edx)
 3124         TIME_TRAP_UENTRY
 3125 
 3126         negl    %eax                    /* get system call number */
 3127         shll    $4,%eax                 /* manual indexing */
 3128 
 3129         CPU_NUMBER(%edx)
 3130         movl    CX(EXT(kernel_stack),%edx),%ebx
 3131                                         /* get current kernel stack */
 3132         xchgl   %ebx,%esp               /* switch stacks - %ebx points to */
 3133                                         /* user registers. */
 3134 
 3135 /*
 3136  * Register use on entry:
 3137  *   eax contains syscall number
 3138  *   ebx contains user regs pointer
 3139  */
 3140         CAH(call_call)
 3141         pushl   %ebx
 3142         call    EXT(mach25_syscall)
 3143         popl    %ebx
 3144         movl    %esp,%ecx               /* get kernel stack */
 3145         or      $(KERNEL_STACK_SIZE-1),%ecx
 3146         movl    -3-IKS_SIZE(%ecx),%esp  /* switch back to PCB stack */
 3147         movl    %eax,R_EAX(%esp)        /* save return value */
 3148         jmp     EXT(return_from_trap)   /* return to user */
 3149 
 3150 #endif

Cache object: d4526905f79a629ba0545ce416275551


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.