The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/swtch.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * William Jolitz.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  * $FreeBSD$
   37  */
   38 
   39 #include "npx.h"
   40 #include "opt_user_ldt.h"
   41 #include "opt_vm86.h"
   42 
   43 #include <sys/rtprio.h>
   44 
   45 #include <machine/asmacros.h>
   46 
   47 #ifdef SMP
   48 #include <machine/pmap.h>
   49 #include <machine/apic.h>
   50 #include <machine/smptests.h>           /** GRAB_LOPRIO */
   51 #include <machine/ipl.h>
   52 #include <machine/lock.h>
   53 #endif /* SMP */
   54 
   55 #include "assym.s"
   56 
   57 
   58 /*****************************************************************************/
   59 /* Scheduling                                                                */
   60 /*****************************************************************************/
   61 
   62 /*
   63  * The following primitives manipulate the run queues.
   64  * _whichqs tells which of the 32 queues _qs
   65  * have processes in them.  setrunqueue puts processes into queues, Remrq
   66  * removes them from queues.  The running process is on no queue,
   67  * other processes are on a queue related to p->p_priority, divided by 4
   68  * actually to shrink the 0-127 range of priorities into the 32 available
   69  * queues.
   70  */
   71         .data
   72 
   73         .globl  _whichqs, _whichrtqs, _whichidqs
   74 
   75 _whichqs:       .long   0               /* which run queues have data */
   76 _whichrtqs:     .long   0               /* which realtime run qs have data */
   77 _whichidqs:     .long   0               /* which idletime run qs have data */
   78 
   79         .globl  _hlt_vector
   80 _hlt_vector:    .long   _default_halt   /* pointer to halt routine */
   81 
   82         .globl  _qs,_cnt,_panic
   83 
   84         .globl  _want_resched
   85 _want_resched:  .long   0               /* we need to re-run the scheduler */
   86 #if defined(SWTCH_OPTIM_STATS)
   87         .globl  _swtch_optim_stats, _tlb_flush_count
   88 _swtch_optim_stats:     .long   0               /* number of _swtch_optims */
   89 _tlb_flush_count:       .long   0
   90 #endif
   91 
   92         .text
   93 /*
   94  * setrunqueue(p)
   95  *
   96  * Call should be made at spl6(), and p->p_stat should be SRUN
   97  */
   98 ENTRY(setrunqueue)
   99         movl    4(%esp),%eax
  100 #ifdef DIAGNOSTIC
  101         cmpb    $SRUN,P_STAT(%eax)
  102         je      set1
  103         pushl   $set2
  104         call    _panic
  105 set1:
  106 #endif
  107         cmpw    $RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
  108         je      set_nort
  109 
  110         movzwl  P_RTPRIO_PRIO(%eax),%edx
  111 
  112         cmpw    $RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* RR realtime priority? */
  113         je      set_rt                          /* RT priority */
  114         cmpw    $RTP_PRIO_FIFO,P_RTPRIO_TYPE(%eax) /* FIFO realtime priority? */
  115         jne     set_id                          /* must be idle priority */
  116         
  117 set_rt:
  118         btsl    %edx,_whichrtqs                 /* set q full bit */
  119         shll    $3,%edx
  120         addl    $_rtqs,%edx                     /* locate q hdr */
  121         movl    %edx,P_FORW(%eax)               /* link process on tail of q */
  122         movl    P_BACK(%edx),%ecx
  123         movl    %ecx,P_BACK(%eax)
  124         movl    %eax,P_BACK(%edx)
  125         movl    %eax,P_FORW(%ecx)
  126         ret
  127 
  128 set_id: 
  129         btsl    %edx,_whichidqs                 /* set q full bit */
  130         shll    $3,%edx
  131         addl    $_idqs,%edx                     /* locate q hdr */
  132         movl    %edx,P_FORW(%eax)               /* link process on tail of q */
  133         movl    P_BACK(%edx),%ecx
  134         movl    %ecx,P_BACK(%eax)
  135         movl    %eax,P_BACK(%edx)
  136         movl    %eax,P_FORW(%ecx)
  137         ret
  138 
  139 set_nort:                                       /*  Normal (RTOFF) code */
  140         movzbl  P_PRI(%eax),%edx
  141         shrl    $2,%edx
  142         btsl    %edx,_whichqs                   /* set q full bit */
  143         shll    $3,%edx
  144         addl    $_qs,%edx                       /* locate q hdr */
  145         movl    %edx,P_FORW(%eax)               /* link process on tail of q */
  146         movl    P_BACK(%edx),%ecx
  147         movl    %ecx,P_BACK(%eax)
  148         movl    %eax,P_BACK(%edx)
  149         movl    %eax,P_FORW(%ecx)
  150         ret
  151 
  152 set2:   .asciz  "setrunqueue"
  153 
  154 /*
  155  * Remrq(p)
  156  *
  157  * Call should be made at spl6().
  158  */
  159 ENTRY(remrq)
  160         movl    4(%esp),%eax
  161         cmpw    $RTP_PRIO_NORMAL,P_RTPRIO_TYPE(%eax) /* normal priority process? */
  162         je      rem_nort
  163 
  164         movzwl  P_RTPRIO_PRIO(%eax),%edx
  165 
  166         cmpw    $RTP_PRIO_REALTIME,P_RTPRIO_TYPE(%eax) /* realtime priority process? */
  167         je      rem0rt
  168         cmpw    $RTP_PRIO_FIFO,P_RTPRIO_TYPE(%eax) /* FIFO realtime priority process? */
  169         jne     rem_id
  170                 
  171 rem0rt:
  172         btrl    %edx,_whichrtqs                 /* clear full bit, panic if clear already */
  173         jb      rem1rt
  174         pushl   $rem3rt
  175         call    _panic
  176 rem1rt:
  177         pushl   %edx
  178         movl    P_FORW(%eax),%ecx               /* unlink process */
  179         movl    P_BACK(%eax),%edx
  180         movl    %edx,P_BACK(%ecx)
  181         movl    P_BACK(%eax),%ecx
  182         movl    P_FORW(%eax),%edx
  183         movl    %edx,P_FORW(%ecx)
  184         popl    %edx
  185         movl    $_rtqs,%ecx
  186         shll    $3,%edx
  187         addl    %edx,%ecx
  188         cmpl    P_FORW(%ecx),%ecx               /* q still has something? */
  189         je      rem2rt
  190         shrl    $3,%edx                         /* yes, set bit as still full */
  191         btsl    %edx,_whichrtqs
  192 rem2rt:
  193         ret
  194 rem_id:
  195         btrl    %edx,_whichidqs                 /* clear full bit, panic if clear already */
  196         jb      rem1id
  197         pushl   $rem3id
  198         call    _panic
  199 rem1id:
  200         pushl   %edx
  201         movl    P_FORW(%eax),%ecx               /* unlink process */
  202         movl    P_BACK(%eax),%edx
  203         movl    %edx,P_BACK(%ecx)
  204         movl    P_BACK(%eax),%ecx
  205         movl    P_FORW(%eax),%edx
  206         movl    %edx,P_FORW(%ecx)
  207         popl    %edx
  208         movl    $_idqs,%ecx
  209         shll    $3,%edx
  210         addl    %edx,%ecx
  211         cmpl    P_FORW(%ecx),%ecx               /* q still has something? */
  212         je      rem2id
  213         shrl    $3,%edx                         /* yes, set bit as still full */
  214         btsl    %edx,_whichidqs
  215 rem2id:
  216         ret
  217 
  218 rem_nort:     
  219         movzbl  P_PRI(%eax),%edx
  220         shrl    $2,%edx
  221         btrl    %edx,_whichqs                   /* clear full bit, panic if clear already */
  222         jb      rem1
  223         pushl   $rem3
  224         call    _panic
  225 rem1:
  226         pushl   %edx
  227         movl    P_FORW(%eax),%ecx               /* unlink process */
  228         movl    P_BACK(%eax),%edx
  229         movl    %edx,P_BACK(%ecx)
  230         movl    P_BACK(%eax),%ecx
  231         movl    P_FORW(%eax),%edx
  232         movl    %edx,P_FORW(%ecx)
  233         popl    %edx
  234         movl    $_qs,%ecx
  235         shll    $3,%edx
  236         addl    %edx,%ecx
  237         cmpl    P_FORW(%ecx),%ecx               /* q still has something? */
  238         je      rem2
  239         shrl    $3,%edx                         /* yes, set bit as still full */
  240         btsl    %edx,_whichqs
  241 rem2:
  242         ret
  243 
  244 rem3:   .asciz  "remrq"
  245 rem3rt: .asciz  "remrq.rt"
  246 rem3id: .asciz  "remrq.id"
  247 
  248 /*
  249  * When no processes are on the runq, cpu_switch() branches to _idle
  250  * to wait for something to come ready.
  251  */
  252         ALIGN_TEXT
  253         .type   _idle,@function
  254 _idle:
  255         xorl    %ebp,%ebp
  256         movl    %ebp,_switchtime
  257 
  258 #ifdef SMP
  259 
  260         /* when called, we have the mplock, intr disabled */
  261         /* use our idleproc's "context" */
  262         movl    _my_idlePTD,%ecx
  263         movl    %ecx,%cr3
  264 #if defined(SWTCH_OPTIM_STATS)
  265         incl    _tlb_flush_count
  266 #endif
  267         /* Keep space for nonexisting return addr, or profiling bombs */
  268         movl    $_idlestack_top-4,%ecx  
  269         movl    %ecx,%esp
  270 
  271         /* update common_tss.tss_esp0 pointer */
  272 #ifdef VM86
  273         movl    _my_tr, %esi
  274 #endif /* VM86 */
  275         movl    %ecx, _common_tss + TSS_ESP0
  276 
  277 #ifdef VM86
  278         cmpl    $0, _private_tss
  279         je      1f
  280         movl    $_common_tssd, %edi
  281 
  282         /* move correct tss descriptor into GDT slot, then reload tr */
  283         leal    _gdt(,%esi,8), %ebx             /* entry in GDT */
  284         movl    0(%edi), %eax
  285         movl    %eax, 0(%ebx)
  286         movl    4(%edi), %eax
  287         movl    %eax, 4(%ebx)
  288         shll    $3, %esi                        /* GSEL(entry, SEL_KPL) */
  289         ltr     %si
  290 1:
  291 #endif /* VM86 */
  292 
  293         sti
  294 
  295         /*
  296          * XXX callers of cpu_switch() do a bogus splclock().  Locking should
  297          * be left to cpu_switch().
  298          */
  299         call    _spl0
  300 
  301         cli
  302 
  303         /*
  304          * _REALLY_ free the lock, no matter how deep the prior nesting.
  305          * We will recover the nesting on the way out when we have a new
  306          * proc to load.
  307          *
  308          * XXX: we had damn well better be sure we had it before doing this!
  309          */
  310         CPL_LOCK                        /* XXX */
  311         andl    $~SWI_AST_MASK, _ipending                       /* XXX */
  312         movl    $0, _cpl        /* XXX Allow ASTs on other CPU */
  313         CPL_UNLOCK                      /* XXX */
  314         movl    $FREE_LOCK, %eax
  315         movl    %eax, _mp_lock
  316 
  317         /* do NOT have lock, intrs disabled */
  318         .globl  idle_loop
  319 idle_loop:
  320 
  321 #if defined(SWTCH_OPTIM_STATS)
  322         incl    _tlb_flush_count
  323 #endif
  324         movl    %cr3,%eax                       /* ouch! */
  325         movl    %eax,%cr3
  326 
  327         cmpl    $0,_smp_active
  328         jne     1f
  329         cmpl    $0,_cpuid
  330         je      1f
  331         jmp     2f
  332 
  333 1:      cmpl    $0,_whichrtqs                   /* real-time queue */
  334         jne     3f
  335         cmpl    $0,_whichqs                     /* normal queue */
  336         jne     3f
  337         cmpl    $0,_whichidqs                   /* 'idle' queue */
  338         jne     3f
  339 
  340         cmpl    $0,_do_page_zero_idle
  341         je      2f
  342 
  343         /* XXX appears to cause panics */
  344         /*
  345          * Inside zero_idle we enable interrupts and grab the mplock
  346          * as needed.  It needs to be careful about entry/exit mutexes.
  347          */
  348         call    _vm_page_zero_idle              /* internal locking */
  349         testl   %eax, %eax
  350         jnz     idle_loop
  351 2:
  352 
  353         /* enable intrs for a halt */
  354         movl    $0, lapic_tpr                   /* 1st candidate for an INT */
  355         call    *_hlt_vector                    /* wait for interrupt */
  356         cli
  357         jmp     idle_loop
  358 
  359 3:
  360         movl    $LOPRIO_LEVEL, lapic_tpr        /* arbitrate for INTs */
  361         call    _get_mplock
  362         CPL_LOCK                                        /* XXX */
  363         movl    $SWI_AST_MASK, _cpl     /* XXX Disallow ASTs on other CPU */
  364         CPL_UNLOCK                                      /* XXX */
  365         cmpl    $0,_whichrtqs                   /* real-time queue */
  366         CROSSJUMP(jne, sw1a, je)
  367         cmpl    $0,_whichqs                     /* normal queue */
  368         CROSSJUMP(jne, nortqr, je)
  369         cmpl    $0,_whichidqs                   /* 'idle' queue */
  370         CROSSJUMP(jne, idqr, je)
  371         CPL_LOCK                                /* XXX */
  372         movl    $0, _cpl                /* XXX Allow ASTs on other CPU */
  373         CPL_UNLOCK                              /* XXX */
  374         call    _rel_mplock
  375         jmp     idle_loop
  376 
  377 #else /* !SMP */
  378 
  379         movl    $tmpstk,%esp
  380 #if defined(OVERLY_CONSERVATIVE_PTD_MGMT)
  381 #if defined(SWTCH_OPTIM_STATS)
  382         incl    _swtch_optim_stats
  383 #endif
  384         movl    _IdlePTD, %ecx
  385         movl    %cr3, %eax
  386         cmpl    %ecx, %eax
  387         je              2f
  388 #if defined(SWTCH_OPTIM_STATS)
  389         decl    _swtch_optim_stats
  390         incl    _tlb_flush_count
  391 #endif
  392         movl    %ecx, %cr3
  393 2:
  394 #endif
  395 
  396         /* update common_tss.tss_esp0 pointer */
  397 #ifdef VM86
  398         movl    _my_tr, %esi
  399 #endif /* VM86 */
  400         movl    %esp, _common_tss + TSS_ESP0
  401 
  402 #ifdef VM86
  403         cmpl    $0, _private_tss
  404         je      1f
  405         movl    $_common_tssd, %edi
  406 
  407         /* move correct tss descriptor into GDT slot, then reload tr */
  408         leal    _gdt(,%esi,8), %ebx             /* entry in GDT */
  409         movl    0(%edi), %eax
  410         movl    %eax, 0(%ebx)
  411         movl    4(%edi), %eax
  412         movl    %eax, 4(%ebx)
  413         shll    $3, %esi                        /* GSEL(entry, SEL_KPL) */
  414         ltr     %si
  415 1:
  416 #endif /* VM86 */
  417 
  418         sti
  419 
  420         /*
  421          * XXX callers of cpu_switch() do a bogus splclock().  Locking should
  422          * be left to cpu_switch().
  423          */
  424         call    _spl0
  425 
  426         ALIGN_TEXT
  427 idle_loop:
  428         cli
  429         cmpl    $0,_whichrtqs                   /* real-time queue */
  430         CROSSJUMP(jne, sw1a, je)
  431         cmpl    $0,_whichqs                     /* normal queue */
  432         CROSSJUMP(jne, nortqr, je)
  433         cmpl    $0,_whichidqs                   /* 'idle' queue */
  434         CROSSJUMP(jne, idqr, je)
  435         call    _vm_page_zero_idle
  436         testl   %eax, %eax
  437         jnz     idle_loop
  438         call    *_hlt_vector                    /* wait for interrupt */
  439         jmp     idle_loop
  440 
  441 #endif /* SMP */
  442 
  443 CROSSJUMPTARGET(_idle)
  444 
  445 ENTRY(default_halt)
  446         sti
  447 #ifndef SMP
  448         hlt                                     /* XXX:  until a wakeup IPI */
  449 #endif
  450         ret
  451 
  452 /*
  453  * cpu_switch()
  454  */
  455 ENTRY(cpu_switch)
  456         
  457         /* switch to new process. first, save context as needed */
  458         movl    _curproc,%ecx
  459 
  460         /* if no process to save, don't bother */
  461         testl   %ecx,%ecx
  462         je      sw1
  463 
  464 #ifdef SMP
  465         movb    P_ONCPU(%ecx), %al              /* save "last" cpu */
  466         movb    %al, P_LASTCPU(%ecx)
  467         movb    $0xff, P_ONCPU(%ecx)            /* "leave" the cpu */
  468 #endif /* SMP */
  469 
  470         movl    P_ADDR(%ecx),%ecx
  471 
  472         movl    (%esp),%eax                     /* Hardware registers */
  473         movl    %eax,PCB_EIP(%ecx)
  474         movl    %ebx,PCB_EBX(%ecx)
  475         movl    %esp,PCB_ESP(%ecx)
  476         movl    %ebp,PCB_EBP(%ecx)
  477         movl    %esi,PCB_ESI(%ecx)
  478         movl    %edi,PCB_EDI(%ecx)
  479         movl    %fs,PCB_FS(%ecx)
  480         movl    %gs,PCB_GS(%ecx)
  481 
  482 #ifdef SMP
  483         movl    _mp_lock, %eax
  484         /* XXX FIXME: we should be saving the local APIC TPR */
  485 #ifdef DIAGNOSTIC
  486         cmpl    $FREE_LOCK, %eax                /* is it free? */
  487         je      badsw4                          /* yes, bad medicine! */
  488 #endif /* DIAGNOSTIC */
  489         andl    $COUNT_FIELD, %eax              /* clear CPU portion */
  490         movl    %eax, PCB_MPNEST(%ecx)          /* store it */
  491 #endif /* SMP */
  492 
  493 #if NNPX > 0
  494         /* have we used fp, and need a save? */
  495         movl    _curproc,%eax
  496         cmpl    %eax,_npxproc
  497         jne     1f
  498         addl    $PCB_SAVEFPU,%ecx               /* h/w bugs make saving complicated */
  499         pushl   %ecx
  500         call    _npxsave                        /* do it in a big C function */
  501         popl    %eax
  502 1:
  503 #endif  /* NNPX > 0 */
  504 
  505         movl    $0,_curproc                     /* out of process */
  506 
  507         /* save is done, now choose a new process or idle */
  508 sw1:
  509         cli
  510 
  511 #ifdef SMP
  512         /* Stop scheduling if smp_active goes zero and we are not BSP */
  513         cmpl    $0,_smp_active
  514         jne     1f
  515         cmpl    $0,_cpuid
  516         je      1f
  517         CROSSJUMP(je, _idle, jne)               /* wind down */
  518 1:
  519 #endif
  520 
  521 sw1a:
  522         movl    _whichrtqs,%edi                 /* pick next p. from rtqs */
  523         testl   %edi,%edi
  524         jz      nortqr                          /* no realtime procs */
  525 
  526         /* XXX - bsf is sloow */
  527         bsfl    %edi,%ebx                       /* find a full q */
  528         jz      nortqr                          /* no proc on rt q - try normal ... */
  529 
  530         /* XX update whichqs? */
  531         btrl    %ebx,%edi                       /* clear q full status */
  532         leal    _rtqs(,%ebx,8),%eax             /* select q */
  533         movl    %eax,%esi
  534 
  535         movl    P_FORW(%eax),%ecx               /* unlink from front of process q */
  536         movl    P_FORW(%ecx),%edx
  537         movl    %edx,P_FORW(%eax)
  538         movl    P_BACK(%ecx),%eax
  539         movl    %eax,P_BACK(%edx)
  540 
  541         cmpl    P_FORW(%ecx),%esi               /* q empty */
  542         je      rt3
  543         btsl    %ebx,%edi                       /* nope, set to indicate not empty */
  544 rt3:
  545         movl    %edi,_whichrtqs                 /* update q status */
  546         jmp     swtch_com
  547 
  548         /* old sw1a */
  549 /* Normal process priority's */
  550 nortqr:
  551         movl    _whichqs,%edi
  552 2:
  553         /* XXX - bsf is sloow */
  554         bsfl    %edi,%ebx                       /* find a full q */
  555         jz      idqr                            /* if none, idle */
  556 
  557         /* XX update whichqs? */
  558         btrl    %ebx,%edi                       /* clear q full status */
  559         leal    _qs(,%ebx,8),%eax               /* select q */
  560         movl    %eax,%esi
  561 
  562         movl    P_FORW(%eax),%ecx               /* unlink from front of process q */
  563         movl    P_FORW(%ecx),%edx
  564         movl    %edx,P_FORW(%eax)
  565         movl    P_BACK(%ecx),%eax
  566         movl    %eax,P_BACK(%edx)
  567 
  568         cmpl    P_FORW(%ecx),%esi               /* q empty */
  569         je      3f
  570         btsl    %ebx,%edi                       /* nope, set to indicate not empty */
  571 3:
  572         movl    %edi,_whichqs                   /* update q status */
  573         jmp     swtch_com
  574 
  575 idqr: /* was sw1a */
  576         movl    _whichidqs,%edi                 /* pick next p. from idqs */
  577 
  578         /* XXX - bsf is sloow */
  579         bsfl    %edi,%ebx                       /* find a full q */
  580         CROSSJUMP(je, _idle, jne)               /* if no proc, idle */
  581 
  582         /* XX update whichqs? */
  583         btrl    %ebx,%edi                       /* clear q full status */
  584         leal    _idqs(,%ebx,8),%eax             /* select q */
  585         movl    %eax,%esi
  586 
  587         movl    P_FORW(%eax),%ecx               /* unlink from front of process q */
  588         movl    P_FORW(%ecx),%edx
  589         movl    %edx,P_FORW(%eax)
  590         movl    P_BACK(%ecx),%eax
  591         movl    %eax,P_BACK(%edx)
  592 
  593         cmpl    P_FORW(%ecx),%esi               /* q empty */
  594         je      id3
  595         btsl    %ebx,%edi                       /* nope, set to indicate not empty */
  596 id3:
  597         movl    %edi,_whichidqs                 /* update q status */
  598 
  599 swtch_com:
  600         movl    $0,%eax
  601         movl    %eax,_want_resched
  602 
  603 #ifdef  DIAGNOSTIC
  604         cmpl    %eax,P_WCHAN(%ecx)
  605         jne     badsw1
  606         cmpb    $SRUN,P_STAT(%ecx)
  607         jne     badsw2
  608 #endif
  609 
  610         movl    %eax,P_BACK(%ecx)               /* isolate process to run */
  611         movl    P_ADDR(%ecx),%edx
  612 
  613 #ifdef SMP
  614         movl    PCB_CR3(%edx),%ebx
  615         /* Grab the private PT pointer from the outgoing process's PTD */
  616         movl    $_PTD, %esi
  617         movl    4*MPPTDI(%esi), %eax            /* fetch cpu's prv pt */
  618 #else
  619 #if defined(SWTCH_OPTIM_STATS)
  620         incl    _swtch_optim_stats
  621 #endif
  622         /* switch address space */
  623         movl    %cr3,%ebx
  624         cmpl    PCB_CR3(%edx),%ebx
  625         je              4f
  626 #if defined(SWTCH_OPTIM_STATS)
  627         decl    _swtch_optim_stats
  628         incl    _tlb_flush_count
  629 #endif
  630         movl    PCB_CR3(%edx),%ebx
  631 #endif /* SMP */
  632         movl    %ebx,%cr3
  633 4:
  634 
  635 #ifdef SMP
  636         /* Copy the private PT to the new process's PTD */
  637         /* XXX yuck, the _PTD changes when we switch, so we have to
  638          * reload %cr3 after changing the address space.
  639          * We need to fix this by storing a pointer to the virtual
  640          * location of the per-process PTD in the PCB or something quick.
  641          * Dereferencing proc->vm_map->pmap->p_pdir[] is painful in asm.
  642          */
  643         movl    %eax, 4*MPPTDI(%esi)            /* restore cpu's prv page */
  644 
  645 #if defined(SWTCH_OPTIM_STATS)
  646         incl    _tlb_flush_count
  647 #endif
  648         /* XXX: we have just changed the page tables.. reload.. */
  649         movl    %ebx, %cr3
  650 #endif /* SMP */
  651 
  652 #ifdef VM86
  653         movl    _my_tr, %esi
  654         cmpl    $0, PCB_EXT(%edx)               /* has pcb extension? */
  655         je      1f
  656         movl    $1, _private_tss                /* mark use of private tss */
  657         movl    PCB_EXT(%edx), %edi             /* new tss descriptor */
  658         jmp     2f
  659 1:
  660 #endif
  661 
  662         /* update common_tss.tss_esp0 pointer */
  663         movl    $_common_tss, %eax
  664         movl    %edx, %ebx                      /* pcb */
  665 #ifdef VM86
  666         addl    $(UPAGES * PAGE_SIZE - 16), %ebx
  667 #else
  668         addl    $(UPAGES * PAGE_SIZE), %ebx
  669 #endif /* VM86 */
  670         movl    %ebx, TSS_ESP0(%eax)
  671 
  672 #ifdef VM86
  673         cmpl    $0, _private_tss
  674         je      3f
  675         movl    $_common_tssd, %edi
  676 2:
  677         /* move correct tss descriptor into GDT slot, then reload tr */
  678         leal    _gdt(,%esi,8), %ebx             /* entry in GDT */
  679         movl    0(%edi), %eax
  680         movl    %eax, 0(%ebx)
  681         movl    4(%edi), %eax
  682         movl    %eax, 4(%ebx)
  683         shll    $3, %esi                        /* GSEL(entry, SEL_KPL) */
  684         ltr     %si
  685 3:
  686 #endif /* VM86 */
  687 
  688         /* restore context */
  689         movl    PCB_EBX(%edx),%ebx
  690         movl    PCB_ESP(%edx),%esp
  691         movl    PCB_EBP(%edx),%ebp
  692         movl    PCB_ESI(%edx),%esi
  693         movl    PCB_EDI(%edx),%edi
  694         movl    PCB_EIP(%edx),%eax
  695         movl    %eax,(%esp)
  696 
  697 #ifdef SMP
  698 #ifdef GRAB_LOPRIO                              /* hold LOPRIO for INTs */
  699 #ifdef CHEAP_TPR
  700         movl    $0, lapic_tpr
  701 #else
  702         andl    $~APIC_TPR_PRIO, lapic_tpr
  703 #endif /** CHEAP_TPR */
  704 #endif /** GRAB_LOPRIO */
  705         movl    _cpuid,%eax
  706         movb    %al, P_ONCPU(%ecx)
  707 #endif /* SMP */
  708         movl    %edx, _curpcb
  709         movl    %ecx, _curproc                  /* into next process */
  710 
  711 #ifdef SMP
  712         movl    _cpu_lockid, %eax
  713         orl     PCB_MPNEST(%edx), %eax          /* add next count from PROC */
  714         movl    %eax, _mp_lock                  /* load the mp_lock */
  715         /* XXX FIXME: we should be restoring the local APIC TPR */
  716 #endif /* SMP */
  717 
  718 #ifdef  USER_LDT
  719         cmpl    $0, PCB_USERLDT(%edx)
  720         jnz     1f
  721         movl    __default_ldt,%eax
  722         cmpl    _currentldt,%eax
  723         je      2f
  724         lldt    __default_ldt
  725         movl    %eax,_currentldt
  726         jmp     2f
  727 1:      pushl   %edx
  728         call    _set_user_ldt
  729         popl    %edx
  730 2:
  731 #endif
  732 
  733         /* This must be done after loading the user LDT. */
  734         .globl  cpu_switch_load_fs
  735 cpu_switch_load_fs:
  736         movl    PCB_FS(%edx),%fs
  737         .globl  cpu_switch_load_gs
  738 cpu_switch_load_gs:
  739         movl    PCB_GS(%edx),%gs
  740 
  741         sti
  742         ret
  743 
  744 CROSSJUMPTARGET(idqr)
  745 CROSSJUMPTARGET(nortqr)
  746 CROSSJUMPTARGET(sw1a)
  747 
  748 #ifdef DIAGNOSTIC
  749 badsw1:
  750         pushl   $sw0_1
  751         call    _panic
  752 
  753 sw0_1:  .asciz  "cpu_switch: has wchan"
  754 
  755 badsw2:
  756         pushl   $sw0_2
  757         call    _panic
  758 
  759 sw0_2:  .asciz  "cpu_switch: not SRUN"
  760 #endif
  761 
  762 #if defined(SMP) && defined(DIAGNOSTIC)
  763 badsw4:
  764         pushl   $sw0_4
  765         call    _panic
  766 
  767 sw0_4:  .asciz  "cpu_switch: do not have lock"
  768 #endif /* SMP && DIAGNOSTIC */
  769 
  770 /*
  771  * savectx(pcb)
  772  * Update pcb, saving current processor state.
  773  */
  774 ENTRY(savectx)
  775         /* fetch PCB */
  776         movl    4(%esp),%ecx
  777 
  778         /* caller's return address - child won't execute this routine */
  779         movl    (%esp),%eax
  780         movl    %eax,PCB_EIP(%ecx)
  781 
  782         movl    %ebx,PCB_EBX(%ecx)
  783         movl    %esp,PCB_ESP(%ecx)
  784         movl    %ebp,PCB_EBP(%ecx)
  785         movl    %esi,PCB_ESI(%ecx)
  786         movl    %edi,PCB_EDI(%ecx)
  787         movl    %fs,PCB_FS(%ecx)
  788         movl    %gs,PCB_GS(%ecx)
  789 
  790 #if NNPX > 0
  791         /*
  792          * If npxproc == NULL, then the npx h/w state is irrelevant and the
  793          * state had better already be in the pcb.  This is true for forks
  794          * but not for dumps (the old book-keeping with FP flags in the pcb
  795          * always lost for dumps because the dump pcb has 0 flags).
  796          *
  797          * If npxproc != NULL, then we have to save the npx h/w state to
  798          * npxproc's pcb and copy it to the requested pcb, or save to the
  799          * requested pcb and reload.  Copying is easier because we would
  800          * have to handle h/w bugs for reloading.  We used to lose the
  801          * parent's npx state for forks by forgetting to reload.
  802          */
  803         movl    _npxproc,%eax
  804         testl   %eax,%eax
  805         je      1f
  806 
  807         pushl   %ecx
  808         movl    P_ADDR(%eax),%eax
  809         leal    PCB_SAVEFPU(%eax),%eax
  810         pushl   %eax
  811         pushl   %eax
  812         call    _npxsave
  813         addl    $4,%esp
  814         popl    %eax
  815         popl    %ecx
  816 
  817         pushl   $PCB_SAVEFPU_SIZE
  818         leal    PCB_SAVEFPU(%ecx),%ecx
  819         pushl   %ecx
  820         pushl   %eax
  821         call    _bcopy
  822         addl    $12,%esp
  823 #endif  /* NNPX > 0 */
  824 
  825 1:
  826         ret

Cache object: e1d69efb4bf90c04cfa211695df742db


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.