The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/isa/ipl.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1989, 1990 William F. Jolitz.
    3  * Copyright (c) 1990 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software contributed to Berkeley by
    7  * William Jolitz.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed by the University of
   20  *      California, Berkeley and its contributors.
   21  * 4. Neither the name of the University nor the names of its contributors
   22  *    may be used to endorse or promote products derived from this software
   23  *    without specific prior written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   35  * SUCH DAMAGE.
   36  *
   37  *      @(#)ipl.s
   38  *
   39  * $FreeBSD$
   40  */
   41 
   42 
   43 /*
   44  * AT/386
   45  * Vector interrupt control section
   46  *
   47  *  cpl         - Current interrupt disable mask
   48  *  *_imask     - Interrupt masks for various spl*() functions
   49  *  ipending    - Pending interrupts (set when a masked interrupt occurs)
   50  */
   51 
   52         .data
   53         ALIGN_DATA
   54 
   55 /* current priority (all off) */
   56         .globl  _cpl
   57 _cpl:   .long   HWI_MASK | SWI_MASK
   58 
   59         .globl  _tty_imask
   60 _tty_imask:     .long   SWI_TTY_MASK
   61         .globl  _bio_imask
   62 _bio_imask:     .long   SWI_CLOCK_MASK | SWI_CAMBIO_MASK
   63         .globl  _net_imask
   64 _net_imask:     .long   SWI_NET_MASK | SWI_CAMNET_MASK
   65         .globl  _cam_imask
   66 _cam_imask:     .long   SWI_CAMBIO_MASK | SWI_CAMNET_MASK
   67         .globl  _soft_imask
   68 _soft_imask:    .long   SWI_MASK
   69         .globl  _softnet_imask
   70 _softnet_imask: .long   SWI_NET_MASK
   71         .globl  _softtty_imask
   72 _softtty_imask: .long   SWI_TTY_MASK
   73 
   74 /* pending interrupts blocked by splxxx() */
   75         .globl  _ipending
   76 _ipending:      .long   0
   77 
   78 /* set with bits for which queue to service */
   79         .globl  _netisr
   80 _netisr:        .long   0
   81 
   82         .globl _netisrs
   83 _netisrs:
   84         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   85         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   86         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   87         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   88         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   89         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   90         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   91         .long   dummynetisr, dummynetisr, dummynetisr, dummynetisr
   92 
   93         .text
   94 
   95 /*
   96  * Handle return from interrupts, traps and syscalls.
   97  */
   98         SUPERALIGN_TEXT
   99         .type   _doreti,@function
  100 _doreti:
  101         FAKE_MCOUNT(_bintr)             /* init "from" _bintr -> _doreti */
  102         addl    $4,%esp                 /* discard unit number */
  103         popl    %eax                    /* cpl or cml to restore */
  104 doreti_next:
  105         /*
  106          * Check for pending HWIs and SWIs atomically with restoring cpl
  107          * and exiting.  The check has to be atomic with exiting to stop
  108          * (ipending & ~cpl) changing from zero to nonzero while we're
  109          * looking at it (this wouldn't be fatal but it would increase
  110          * interrupt latency).  Restoring cpl has to be atomic with exiting
  111          * so that the stack cannot pile up (the nesting level of interrupt
  112          * handlers is limited by the number of bits in cpl).
  113          */
  114 #ifdef SMP
  115         cli                             /* early to prevent INT deadlock */
  116 doreti_next2:
  117 #endif
  118         movl    %eax,%ecx
  119         notl    %ecx                    /* set bit = unmasked level */
  120 #ifndef SMP
  121         cli
  122 #endif
  123         andl    _ipending,%ecx          /* set bit = unmasked pending INT */
  124         jne     doreti_unpend
  125         movl    %eax,_cpl
  126         decb    _intr_nesting_level
  127 
  128         /* Check for ASTs that can be handled now. */
  129         testl   $AST_PENDING,_astpending
  130         je      doreti_exit
  131         testl   $PSL_VM,TF_EFLAGS(%esp)
  132         jz      doreti_notvm86
  133         cmpl    $1,_in_vm86call
  134         jne     doreti_ast
  135         jmp     doreti_exit     
  136 
  137 doreti_notvm86:
  138         testb   $SEL_RPL_MASK,TF_CS(%esp)
  139         jnz     doreti_ast
  140 
  141         /*
  142          * doreti_exit -        release MP lock, pop registers, iret.
  143          *
  144          *      Note that the syscall trap shotcuts to doreti_syscall_ret.
  145          *      The segment register pop is a special case, since it may
  146          *      fault if (for example) a sigreturn specifies bad segment
  147          *      registers.  The fault is handled in trap.c
  148          */
  149 
  150 doreti_exit:
  151         MEXITCOUNT
  152 
  153 #ifdef SMP
  154         /* release the kernel lock */
  155         movl    $_mp_lock, %edx         /* GIANT_LOCK */
  156         call    _MPrellock_edx
  157 #endif /* SMP */
  158 
  159         .globl  doreti_popl_fs
  160         .globl  doreti_syscall_ret
  161 doreti_syscall_ret:
  162 doreti_popl_fs:
  163         popl    %fs
  164         .globl  doreti_popl_es
  165 doreti_popl_es:
  166         popl    %es
  167         .globl  doreti_popl_ds
  168 doreti_popl_ds:
  169         popl    %ds
  170         popal
  171         addl    $8,%esp
  172         .globl  doreti_iret
  173 doreti_iret:
  174         iret
  175 
  176         ALIGN_TEXT
  177         .globl  doreti_iret_fault
  178 doreti_iret_fault:
  179         subl    $8,%esp
  180         pushal
  181         pushl   %ds
  182         .globl  doreti_popl_ds_fault
  183 doreti_popl_ds_fault:
  184         pushl   %es
  185         .globl  doreti_popl_es_fault
  186 doreti_popl_es_fault:
  187         pushl   %fs
  188         .globl  doreti_popl_fs_fault
  189 doreti_popl_fs_fault:
  190         movl    $0,TF_ERR(%esp) /* XXX should be the error code */
  191         movl    $T_PROTFLT,TF_TRAPNO(%esp)
  192         jmp     alltraps_with_regs_pushed
  193 
  194         ALIGN_TEXT
  195 doreti_unpend:
  196         /*
  197          * Enabling interrupts is safe because we haven't restored cpl yet.
  198          * %ecx contains the next probable ready interrupt (~cpl & ipending)
  199          */
  200 #ifdef SMP
  201         bsfl    %ecx, %ecx              /* locate the next dispatchable int */
  202         lock
  203         btrl    %ecx, _ipending         /* is it really still pending? */
  204         jnc     doreti_next2            /* some intr cleared memory copy */
  205         sti                             /* late to prevent INT deadlock */
  206 #else
  207         sti
  208         bsfl    %ecx,%ecx               /* slow, but not worth optimizing */
  209         btrl    %ecx,_ipending
  210         jnc     doreti_next             /* some intr cleared memory copy */
  211 #endif /* SMP */
  212         /*
  213          * Execute handleable interrupt
  214          *
  215          * Set up JUMP to _ihandlers[%ecx] for HWIs.
  216          * Set up CALL of _ihandlers[%ecx] for SWIs.
  217          * This is a bit early for the SMP case - we have to push %ecx and
  218          * %edx, but could push only %ecx and load %edx later.
  219          */
  220         movl    _ihandlers(,%ecx,4),%edx
  221         cmpl    $NHWI,%ecx
  222         jae     doreti_swi              /* software interrupt handling */
  223         cli                             /* else hardware int handling */
  224 #ifdef SMP
  225         movl    %eax,_cpl               /* same as non-smp case right now */
  226 #else
  227         movl    %eax,_cpl
  228 #endif
  229         MEXITCOUNT
  230 #ifdef APIC_INTR_DIAGNOSTIC
  231         lock
  232         incl    CNAME(apic_itrace_doreti)(,%ecx,4)
  233 #ifdef APIC_INTR_DIAGNOSTIC_IRQ 
  234         cmpl    $APIC_INTR_DIAGNOSTIC_IRQ,%ecx
  235         jne     9f
  236         pushl   %eax
  237         pushl   %ecx
  238         pushl   %edx
  239         pushl   $APIC_ITRACE_DORETI
  240         call    log_intr_event
  241         addl    $4,%esp
  242         popl    %edx
  243         popl    %ecx
  244         popl    %eax
  245 9:      
  246 #endif
  247 #endif
  248         jmp     *%edx
  249 
  250         ALIGN_TEXT
  251 doreti_swi:
  252         pushl   %eax
  253         /*
  254          * At least the SWI_CLOCK handler has to run at a possibly strictly
  255          * lower cpl, so we have to restore
  256          * all the h/w bits in cpl now and have to worry about stack growth.
  257          * The worst case is currently (30 Jan 1994) 2 SWI handlers nested
  258          * in dying interrupt frames and about 12 HWIs nested in active
  259          * interrupt frames.  There are only 4 different SWIs and the HWI
  260          * and SWI masks limit the nesting further.
  261          *
  262          * The SMP case is currently the same as the non-SMP case.
  263          */
  264 #ifdef SMP
  265         orl     imasks(,%ecx,4), %eax   /* or in imasks */
  266         movl    %eax,_cpl               /* set cpl for call */
  267 #else
  268         orl     imasks(,%ecx,4),%eax
  269         movl    %eax,_cpl
  270 #endif
  271         call    *%edx
  272         popl    %eax                    /* cpl to restore */
  273         jmp     doreti_next
  274 
  275         ALIGN_TEXT
  276 doreti_ast:
  277         andl    $~AST_PENDING,_astpending
  278         sti
  279         movl    $T_ASTFLT,TF_TRAPNO(%esp)
  280         call    _trap
  281         subl    %eax,%eax               /* recover cpl|cml */
  282         movb    $1,_intr_nesting_level  /* for doreti_next to decrement */
  283         jmp     doreti_next
  284 
  285         ALIGN_TEXT
  286 swi_net:
  287         MCOUNT
  288         bsfl    _netisr,%eax
  289         je      swi_net_done
  290 swi_net_more:
  291         btrl    %eax,_netisr
  292         jnc     swi_net_next
  293         call    *_netisrs(,%eax,4)
  294 swi_net_next:
  295         bsfl    _netisr,%eax
  296         jne     swi_net_more
  297 swi_net_done:
  298         ret
  299 
  300         ALIGN_TEXT
  301 dummynetisr:
  302         MCOUNT
  303         ret     
  304 
  305 /*
  306  * The arg is in a nonstandard place, so swi_dispatcher() can't be called
  307  * directly and swi_generic() can't use ENTRY() or MCOUNT.
  308  */
  309         ALIGN_TEXT
  310         .globl  _swi_generic
  311         .type   _swi_generic,@function
  312 _swi_generic:
  313         pushl   %ecx
  314         FAKE_MCOUNT(4(%esp))
  315         call    _swi_dispatcher
  316         popl    %ecx
  317         ret
  318 
  319 ENTRY(swi_null)
  320         ret
  321 
  322 #ifdef APIC_IO
  323 #include "i386/isa/apic_ipl.s"
  324 #else
  325 #include "i386/isa/icu_ipl.s"
  326 #endif /* APIC_IO */

Cache object: 30b8206d660a7c20e74d8a3a4e6545a8


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.