The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/sqt/interrupt.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /* 
    2  * Mach Operating System
    3  * Copyright (c) 1992,1991 Carnegie Mellon University
    4  * Copyright (c) 1992,1991 Sequent Computer Systems
    5  * All Rights Reserved.
    6  * 
    7  * Permission to use, copy, modify and distribute this software and its
    8  * documentation is hereby granted, provided that both the copyright
    9  * notice and this permission notice appear in all copies of the
   10  * software, derivative works or modified versions, and any portions
   11  * thereof, and that both notices appear in supporting documentation.
   12  * 
   13  * CARNEGIE MELLON AND SEQUENT COMPUTER SYSTEMS ALLOW FREE USE OF
   14  * THIS SOFTWARE IN ITS "AS IS" CONDITION.  CARNEGIE MELLON AND
   15  * SEQUENT COMPUTER SYSTEMS DISCLAIM ANY LIABILITY OF ANY KIND FOR
   16  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
   17  * 
   18  * Carnegie Mellon requests users of this software to return to
   19  * 
   20  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
   21  *  School of Computer Science
   22  *  Carnegie Mellon University
   23  *  Pittsburgh PA 15213-3890
   24  * 
   25  * any improvements or extensions that they make and grant Carnegie Mellon 
   26  * the rights to redistribute these changes.
   27  */
   28 
   29 /*
   30  * HISTORY
   31  * $Log:        interrupt.s,v $
   32  * Revision 2.4  93/01/14  17:55:57  danner
   33  *      Made NMI interrupts call nmi_intr instead of t_res.  Added
   34  *      allow_nmi.
   35  *      [92/10/25            dbg]
   36  *      Made NMI interrupts call nmi_intr instead of t_res.  Added
   37  *      allow_nmi.
   38  *      [92/10/25            dbg]
   39  * 
   40  * Revision 2.3  91/07/31  18:02:03  dbg
   41  *      Changed copyright.
   42  * 
   43  *      Call interrupt routine, since we may be on a different stack.
   44  *      [91/05/22            dbg]
   45  * 
   46  * Revision 2.2  91/05/08  12:56:40  dbg
   47  *      Put parentheses around substituted immediate expressions, so
   48  *      that they will pass through the GNU preprocessor.
   49  *      [91/01/31            dbg]
   50  * 
   51  *      Converted (interrupt handlers only) for pure kernel.
   52  *      Use Mach Kernel interrupt sequence, not Dynix.
   53  *      [90/05/02            dbg]
   54  * 
   55  */
   56 
   57 .text
   58 
   59 /*
   60  * interrupt.s
   61  *      Machine dependent low-level kernel stuff.
   62  *
   63  * Mostly interrupt and trap handling.
   64  *
   65  * Very machine dependent.  Intel 80386 version.
   66  */
   67 
   68 #include <mach_kdb.h>
   69 
   70 #include <assym.s>
   71 #include <i386/asm.h>
   72 #include <i386/eflags.h>
   73 #include <sqt/asm_macros.h>
   74 #include <sqt/intctl.h>
   75 
   76 /*
   77  * Hardware interrupt handlers.
   78  *
   79  * These are pointed at thru a combination of the per-processor
   80  * Interrupt Descriptor Table (IDT) and Global Descriptor Table (GDT).
   81  *
   82  * There is one handler per SLIC bin.  Bins 1-7 are handled in a common
   83  * manner (the HW interrupts).  Bin 0 is special cased for SW interrupts.
   84  *
   85  * Bins 1-7 handle as follows:
   86  *      Save scratch registers.
   87  *      eax = bin#.
   88  *      Goto "dev_common".
   89  *
   90  * Dev_common:
   91  *      count device interrupt (except bin 7, used for clocks)
   92  *      Save entry IPL.
   93  *      Set up new IPL.
   94  *      Read vector from SLIC.
   95  *      Tell SLIC ok to accept another interrupt.
   96  *      Verify vector as valid.
   97  *      Call interrupt handler thru int_bin_table[].
   98  *      If returning to user mode, check for and handle redispatches
   99  *              (via falling into trap handler (T_SWTCH)).
  100  *      Else, disable interrupts, restore previous IPL, and return.
  101  *
  102  * Handlers are called with vector number as argument.  The bin #
  103  * information is *NOT* passed to the handler.
  104  *
  105  * All interrupts enter via interrupt-gates, thus SW must re-enable
  106  * interrupts at processor.  The main reason for interrupt-gates instead of
  107  * trap-gates is that the SLIC still yanks on the interrupt line until SW
  108  * tells SLIC it has the interrupt; thus if enter with trap-gate, it will
  109  * re-enter constantly and overflow the stack.  Also, other interrupts can
  110  * occur (eg, FPA).
  111  * Bin 0 (SW) interrupts are handled via reading the bin0 message register
  112  * and looping until we clear it out, calling an appropriate SW interrupt
  113  * handler for each bit:
  114  *
  115  *      Save scratch registers.
  116  *      spl1().
  117  *      ldata = SLIC Bin0 message data.
  118  *      ON processor ints.
  119  *      loop {
  120  *              BIT = FFS(ldata).
  121  *              If no bits set {
  122  *                      OFF processor ints.
  123  *                      spl0().
  124  *                      restore registers.
  125  *                      rett.
  126  *              }
  127  *              clear BIT in ldata.
  128  *              call SW handler(BIT).
  129  *              ldata |= SLIC Bin0 message data.
  130  *      }
  131  *
  132  * All entries clear "direction" flag, since C environment assumes this.
  133  *
  134  * Bin 0 interrupt handler uses an interrupt gate, to turn OFF processor
  135  * interrupts until ready to accept another bin0 (or higher) interrupt.
  136  */
  137 
  138 /*
  139  * Kernel segments set up.
  140  * Interrupt number is in %eax.  Only %ecx, %edx usable.
  141  */
  142         .globl  _interrupt              / entry point from locore
  143 _interrupt:
  144         subl    $0x20,%eax              / subtract interrupt base
  145         jl      _bad_interrupt          / bad if < 0
  146         je      _bin0int                / separate handling for bin 0
  147         cmpl    $7,%eax                 / bad if
  148         jg      _bad_interrupt          / > 7
  149 
  150 /*
  151  * Common handling for bin 1-7 interrupts.  EAX == bin# on entry.
  152  * Indexing on int_bin_table assumes the bin_header structure is
  153  * 8-bytes (quad-word).
  154  */
  155 dev_common:
  156         movb    VA_SLIC+SL_LMASK, %cl   / ECX = old SPL.  Save...
  157         movb    spltab(%eax), %ch       / new SPL from table...
  158         movb    %ch, VA_SLIC+SL_LMASK   /       ...masks bin and lower.
  159         pushl   %ecx                    /       ...below scratch regs on stack.
  160                                         / the low byte is the old spl
  161                                         / the high our new spl.
  162         movzbl  VA_SLIC+SL_BININT, %ecx / ECX = vector # from message data.
  163 
  164 /*
  165  * The cpu will block until the write completes.
  166  * This will insure that the spl mask has been set.
  167  * The following write may now take place before or after the "sti".
  168  * The current SL_LMASK is the stronger of the intended spl and the
  169  * spl at the time of the interupt. This simplfies the concerns when
  170  * returning from interrupt later on.
  171  */
  172 
  173         movb    $0, VA_SLIC+SL_BININT   / tell SLIC ok for more interrupts.
  174         sti                             / ok for more interrupts now.
  175         cld                             / in case intr`d code had it set.
  176         leal    _int_bin_table(,%eax,8), %edx / EDX -> intr table for this bin.
  177         pushl   %ecx                    / argument = vector #.
  178         cmpl    %ecx, BH_SIZE(%edx)     / valid vector?
  179         jle     bogusint                / nope.
  180         movl    BH_HDLRTAB(%edx), %eax  / EAX == base of vectors for this bin.
  181         call    *(%eax,%ecx,4)          / call handler.
  182         addl    $4, %esp                / clear stack.
  183 intdone:
  184         popl    %ecx                    / Get entry SPL.
  185         testl   $(EFL_VM), R_EFLAGS(%esp) / returning to V86 mode?
  186         jne     inturet
  187         testb   $3, R_CS(%esp)          / returning to user mode?
  188         je      intkret                 / no
  189 inturet:
  190         cli                             / OFF processor interrupts.
  191         movb    %cl, VA_SLIC+SL_LMASK   / restore entry SPL.
  192 intret0:
  193         ret                             / return to interrupt exit
  194 
  195 /*
  196  * Interrupt return to kernel.
  197  * Since the SLIC mask is now guaranteed to be stronger than the saved
  198  * spl we no longer need to worry when it takes effect with respect to the
  199  * the iret.
  200  */
  201 intkret:
  202         cli                             / OFF processor interrupts.
  203 /*
  204  * %cl is the old mask, %ch is the current mask.
  205  * it is possible that the saved mask was set to a higher value than
  206  * than we are currently running at. This would happen for example if
  207  * we had just written to the slic mask and within 500 ns the interrupt
  208  * that we are returning from occured.
  209  * In this case we must be careful that we don't hit the iret
  210  * without insuring that the mask has been set.
  211  */
  212 
  213         cmpb    %cl,%ch                 / going back to higher SPL?
  214         je      intret0                 / same level do nothing
  215         jb      intret1                 / going to higher spl
  216         movb    %cl, VA_SLIC+SL_LMASK   / restore entry SPL.
  217         jmp     intret0                 / return from interrupt
  218 intret1:
  219         movb    %cl, VA_SLIC+SL_LMASK   / restore entry SPL.
  220 intret2:
  221         movb    VA_SLIC+SL_LMASK, %ch
  222         cmpb    %ch,%cl                 / loop until set
  223         jne     intret2
  224         jmp     intret0                 / return from interrupt
  225 
  226 /*
  227  * spltab[]
  228  *      Maps bin # to IPL value to put in SLIC local-mask register.
  229  *
  230  * spltab[i] masks interrupts `i' and lower priority.
  231  */
  232         .align  2
  233 spltab:
  234         .byte   SPL1                    / [0]
  235         .byte   SPL2                    / [1]
  236         .byte   SPL3                    / [2]
  237         .byte   SPL_HOLE                / [3]
  238         .byte   SPL4                    / [4]
  239         .byte   SPL5                    / [5]
  240         .byte   SPL6                    / [6]
  241         .byte   SPL7                    / [7]
  242 
  243 /*
  244  * Got bogus interrupt...  Vector # larger than allocated handler table
  245  * for the bin.  Dev_common already pushed vector #.
  246  */
  247 
  248         .text
  249 bogusint:
  250         pushl   %eax                    / bin #
  251         call    _bogusint               / complain about this!
  252         addl    $8, %esp                / clear junk off stack.
  253         jmp     intdone                 / return from interrupt.
  254 
  255 /*
  256  * Undefined SW trap handler.
  257  */
  258 ENTRY(swt_undef)
  259         pushl   $swtundef               / panic message
  260         call    _panic                  / no deposit, no return
  261         #addl   $4, %esp                / not really
  262         #ret                            / not really
  263 
  264         .data
  265 swtundef:
  266         .asciz  "Undefined software trap"
  267         .text
  268 
  269 /*
  270  * Bin0 (SW) interrupt handler.  Entered thru interrupt gate, thus
  271  * interrupts masked at processor.
  272  *
  273  * Called routines must *not* redispatch; they must behave as interrupts.
  274  */
  275 ENTRY(bin0int)
  276 /*
  277  * was SPL_ASM($SPL1,%al) but to set SPL to mask bin 0.
  278  * But slic mask may now be greater than spl0 due to synchronisation
  279  * slippage, so add spl1 to what it currently is.
  280  */
  281 
  282         movb    VA_SLIC+SL_LMASK, %al
  283         movb    $(SPL1),%ah             / store the new mask for int_ret
  284         movb    %ah, VA_SLIC+SL_LMASK
  285         pushl   %eax                    / save entry SPL (should be SPL0).
  286         movzbl  VA_SLIC+SL_B0INT, %ecx  / ECX = Bin0 message data (mask).
  287         sti                             / ON processor interrupts.
  288         cld                             / in case intr`d code had it set.
  289 0:      bsfl    %ecx, %eax              / Find software trap bit
  290         je      intdone                 / no bit ==> done.
  291         btrl    %eax, %ecx              / clear soft interrupt bit.
  292         pushl   %ecx                    / save remaining interrupt data.
  293         call    *_softvec(,%eax,4)      / call soft interrupt routine.
  294         popl    %ecx                    / restore interrupt data
  295         orb     VA_SLIC+SL_B0INT, %cl   / CL |= Bin0 message data (mask)
  296         jmp     0b                      / repeat until no bits set.
  297 
  298 /*
  299  * Unconditionally configured SW interrupt handlers.
  300  */
  301 
  302 /*
  303  * undef()
  304  *      No such.  Somebody goofed.
  305  */
  306 ENTRY(undef)
  307         pushl   $undefmsg               / panic message
  308         call    _panic                  / no deposit, no return
  309         #addl   $4, %esp                / not really
  310         #ret                            / not really
  311 
  312         .data
  313 undefmsg:
  314         .asciz  "Undefined software interrupt"
  315         .text
  316 
  317 /*
  318  * Interrupt other than for bins 0..7
  319  */
  320 _bad_interrupt:
  321         addl    $0x20,%eax              / add back interrupt base
  322         cmpl    $2,%eax                 / is it an NMI?
  323         jne     EXT(t_res)              / if not, handle as reserved trap
  324         jmp     EXT(t_nmi)              / if so, test NMI causes
  325 
  326 /*
  327  * T_NMI -- Non-Maskable Interrupt.  No error code.
  328  * Entered thru interrupt gate (interrupts disabled).
  329  *
  330  * If probe_nmi == NULL, handle as trap (which will panic the system).
  331  * Else, jump to probe_nmi.  The "jump" is via an iret, to allow NMI's
  332  * again in the processor (80386 disables NMI's until an iret is executed).
  333  * The "iret" also removes the NMI stack frame.
  334  */
  335 #ifdef  KERNEL_PROFILING
  336         .globl  _kp_nmi
  337 #endif  KERNEL_PROFILING
  338 
  339 ENTRY(t_nmi)
  340         movl    _probe_nmi, %eax        / probe_nmi procedure, or NULL.
  341         cmpl    $0, %eax                / probing?
  342         jz      t_nmi_real              / no -- a real NMI
  343         movl    $_return_to_iret,%edx
  344         cmpl    (%esp),%edx             / did we switch stacks?
  345         je      0f                      / if not:
  346         movl    %eax, 4*4(%esp)         / alter return IP to probe_nmi function.
  347         ret                             / restore regs and jump to
  348                                         / probe-NMI handler.
  349 0:
  350         movl    4(%esp),%edx            / point to interrupt reg save area
  351         movl    %eax,I_EIP(%esp)        / alter return IP to probe_nmi function.
  352         ret                             / restore regs and jump to
  353                                         / probe-NMI handler.
  354 
  355         .data
  356         .globl  _probe_nmi
  357 _probe_nmi:
  358         .long   0
  359         .text
  360 
  361 /*
  362  * NMI and no probe routine set.  If kernel profiling configured,
  363  * do it.
  364  */
  365 t_nmi_real:
  366 #ifdef  KERNEL_PROFILING
  367         cld                             / in case trapped code had it set.
  368         call    _kp_nmi                 / assume profiler NMI
  369         testl   %eax,%eax               / was it really?
  370         jne     0f                      / no -- a real NMI.
  371         ret                             / yes -- return from trap
  372 0:
  373 #endif  KERNEL_PROFILING
  374 
  375 /*
  376  * Real NMI.  Call C.
  377  */
  378         movzbl  VA_SLIC+SL_LMASK,%ecx   / get previous SPL
  379         pushl   %ecx                    / push to make stack same as
  380         pushl   $2                      / normal interrupt stack
  381         call    _nmi_intr               / call C
  382         addl    $8,%esp                 / clean up stack
  383         ret                             / done
  384 
  385 /*
  386  * Allow further NMI interrupts by executing an IRET.
  387  */
  388 ENTRY(allow_nmi)
  389         popl    %eax                    / get return address
  390         pushfl                          / push flags
  391         push    %cs                     / push kernel code segment
  392         pushl   %eax                    / push return address
  393         iret                            / IRET to caller to enable NMIs
  394 
  395 /*
  396  * T_RES -- Reserved trap entry.  Serious Problem.
  397  *
  398  * This is entered via interrupt-gate (interrupts masked at processor).
  399  * Use "splhi()" to insure interrupts can't be turned on; panic printf's
  400  * will re-enable processor interrupts due to "v_gate()".
  401  *
  402  * This is used in all otherwise unused slots in the IDT.  Thus it catches
  403  * bogus interrupt vectors from the hardware.
  404  */
  405 ENTRY(t_res)
  406         SPL_ASM($(SPLHI),%bl)           / %bl = splhi()
  407         sti                             / processor now allows interrupts.
  408         pushl   %eax                    / push error code
  409         pushl   $t_res_msg              / message
  410         call    _panic                  / panic(msg, int number)
  411         addl    $8,%esp                 / allow return
  412         ret
  413 t_res_msg:
  414         .asciz  "Undefined interrupt %d"
  415 
  416 #ifdef  notdef
  417 /*
  418  * t_fpa -- FPA exception.  This is actually an interrupt.
  419  *
  420  * Enter via interrupt gate so interrupts are disabled.
  421  * Must read FPA process context register (PCR), then mask all exceptions
  422  * and insure this is sync'd, then finally enable interrupts and call
  423  * fpa_trap() to do the dirty work.
  424  */
  425 ENTRY(t_fpa)
  426         TRAP_ENTER_NOERR(99)
  427         jmp     trap_common
  428         /*
  429          * Enter much like bin0int.
  430          */
  431         pushl   %eax                    # save...
  432         pushl   %ecx                    #       ...scratch
  433         pushl   %edx                    #               ...registers.
  434         movw    $(KERNEL_DS), %ax       # establish...
  435         movw    %ax, %ds                #       ...kernel DS
  436         movw    %ax, %es                #               ... kernel ES.
  437         movb    VA_SLIC+SL_LMASK, %al   # %al = entry SPL.
  438         movb    $(SPL0), %ah            # for intdone
  439         pushl   %eax                    # save entry SPL.
  440         /*
  441          * Read FPA PCR, then mask all exceptions.
  442          */
  443         movl    VA_FPA+FPA_STCTX, %ecx                  # %ecx = FPA PCR
  444         movl    $(FPA_PCR_EM_ALL), VA_FPA+FPA_LDCTX     # mask all exceptions.
  445         movl    VA_FPA+FPA_STCTX, %edx                  # synch the above write.
  446         /*
  447          * Now can re-enable interrupts and call real FPA trap handler.
  448          * Once re-enable processor interrupts, can take SLIC interrupt.
  449          * Note that SLIC interrupt goes first if FPA and SLIC arrive
  450          * at processor simultaneously.
  451          */
  452         sti                             # interrupts ON again.
  453         pushl   %ecx                    # call it with nasty PCR.
  454         call    _fpa_trap               # poke at process.
  455         popl    %ecx                    # clear stack.
  456         jmp     intdone                 # all done.
  457 
  458 #ifdef  KERNEL_PROFILING
  459 kp_trapret:
  460 #endif  KERNEL_PROFILING
  461         addl    $8, %esp                # clear off traptype and error code
  462         testb   $(RPL_MASK), SP_CS(%esp) # going back to user mode?
  463         je      9f                      # Nope -- avoid seg-reg fuss.
  464         cli                             # restoring ds, es can`t reenter!
  465         movw    $(USER_DS), %ax         # restore...
  466         movw    %ax, %ds                #       ...user-mode DS
  467         movw    %ax, %es                #               ...user-mode ES.
  468 9:      popal                           # restore interrupted registers.
  469         iret                            # back from whence we came.
  470 
  471 #endif  notdef
  472 

Cache object: 808350330f652c7ac9401ade8ad5071c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.