The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * William Jolitz.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by the University of
   19  *      California, Berkeley and its contributors.
   20  * 4. Neither the name of the University nor the names of its contributors
   21  *    may be used to endorse or promote products derived from this software
   22  *    without specific prior written permission.
   23  *
   24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   34  * SUCH DAMAGE.
   35  *
   36  *      from: @(#)locore.s      7.3 (Berkeley) 5/13/91
   37  * $FreeBSD: releng/5.0/sys/i386/i386/locore.s 107521 2002-12-02 19:58:55Z deischen $
   38  *
   39  *              originally from: locore.s, by William F. Jolitz
   40  *
   41  *              Substantially rewritten by David Greenman, Rod Grimes,
   42  *                      Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
   43  *                      and many others.
   44  */
   45 
   46 #include "opt_bootp.h"
   47 #include "opt_compat.h"
   48 #include "opt_nfsroot.h"
   49 
   50 #include <sys/syscall.h>
   51 #include <sys/reboot.h>
   52 
   53 #include <machine/asmacros.h>
   54 #include <machine/cputypes.h>
   55 #include <machine/psl.h>
   56 #include <machine/pmap.h>
   57 #include <machine/specialreg.h>
   58 
   59 #include "assym.s"
   60 
   61 /*
   62  *      XXX
   63  *
   64  * Note: This version greatly munged to avoid various assembler errors
   65  * that may be fixed in newer versions of gas. Perhaps newer versions
   66  * will have more pleasant appearance.
   67  */
   68 
   69 /*
   70  * PTmap is recursive pagemap at top of virtual address space.
   71  * Within PTmap, the page directory can be found (third indirection).
   72  *
   73  * NOTE: PTDpde, PTmap, and PTD are being defined as address symbols.
   74  * In C you access them directly, and not with a '*'. Storage is not being 
   75  * allocated. They will magically address the correct locations in KVM
   76  * which C will treat as normal variables of the type they are defined in 
   77  * machine/pmap.h, i.e.  PTDpde = XX ; to set a PDE entry, NOT *PTDpde = XX;
   78  */
   79         .globl  PTmap,PTD,PTDpde
   80         .set    PTmap,(PTDPTDI << PDRSHIFT)
   81         .set    PTD,PTmap + (PTDPTDI * PAGE_SIZE)
   82         .set    PTDpde,PTD + (PTDPTDI * PDESIZE)
   83 
   84 /*
   85  * APTmap, APTD is the alternate recursive pagemap.
   86  * It's used when modifying another process's page tables.
   87  * See the note above. It is true here as well.
   88  */
   89         .globl  APTmap,APTD,APTDpde
   90         .set    APTmap,APTDPTDI << PDRSHIFT
   91         .set    APTD,APTmap + (APTDPTDI * PAGE_SIZE)
   92         .set    APTDpde,PTD + (APTDPTDI * PDESIZE)
   93 
   94 #ifdef SMP
   95 /*
   96  * Define layout of per-cpu address space.
   97  * This is "constructed" in locore.s on the BSP and in mp_machdep.c
   98  * for each AP.  DO NOT REORDER THESE WITHOUT UPDATING THE REST!
   99  */
  100         .globl  SMP_prvspace, lapic
  101         .set    SMP_prvspace,(MPPTDI << PDRSHIFT)
  102         .set    lapic,SMP_prvspace + (NPTEPG-1) * PAGE_SIZE
  103 #endif /* SMP */
  104 
  105 /*
  106  * Compiled KERNBASE location
  107  */
  108         .globl  kernbase
  109         .set    kernbase,KERNBASE
  110 
  111 /*
  112  * Globals
  113  */
  114         .data
  115         ALIGN_DATA                      /* just to be sure */
  116 
  117         .globl  HIDENAME(tmpstk)
  118         .space  0x2000                  /* space for tmpstk - temporary stack */
  119 HIDENAME(tmpstk):
  120 
  121         .globl  bootinfo
  122 bootinfo:       .space  BOOTINFO_SIZE   /* bootinfo that we can handle */
  123 
  124                 .globl KERNend
  125 KERNend:        .long   0               /* phys addr end of kernel (just after bss) */
  126 physfree:       .long   0               /* phys addr of next free page */
  127 
  128 #ifdef SMP
  129                 .globl  cpu0prvpage
  130 cpu0pp:         .long   0               /* phys addr cpu0 private pg */
  131 cpu0prvpage:    .long   0               /* relocated version */
  132 
  133                 .globl  SMPpt
  134 SMPptpa:        .long   0               /* phys addr SMP page table */
  135 SMPpt:          .long   0               /* relocated version */
  136 #endif /* SMP */
  137 
  138         .globl  IdlePTD
  139 IdlePTD:        .long   0               /* phys addr of kernel PTD */
  140 
  141 #ifdef SMP
  142         .globl  KPTphys
  143 #endif
  144 KPTphys:        .long   0               /* phys addr of kernel page tables */
  145 
  146         .globl  proc0uarea, proc0kstack
  147 proc0uarea:     .long   0               /* address of proc 0 uarea space */
  148 proc0kstack:    .long   0               /* address of proc 0 kstack space */
  149 p0upa:          .long   0               /* phys addr of proc0's UAREA */
  150 p0kpa:          .long   0               /* phys addr of proc0's STACK */
  151 
  152 vm86phystk:     .long   0               /* PA of vm86/bios stack */
  153 
  154         .globl  vm86paddr, vm86pa
  155 vm86paddr:      .long   0               /* address of vm86 region */
  156 vm86pa:         .long   0               /* phys addr of vm86 region */
  157 
  158 #ifdef BDE_DEBUGGER
  159         .globl  _bdb_exists             /* flag to indicate BDE debugger is present */
  160 _bdb_exists:    .long   0
  161 #endif
  162 
  163 #ifdef PC98
  164         .globl  pc98_system_parameter
  165 pc98_system_parameter:
  166         .space  0x240
  167 #endif
  168 
  169 /**********************************************************************
  170  *
  171  * Some handy macros
  172  *
  173  */
  174 
  175 #define R(foo) ((foo)-KERNBASE)
  176 
  177 #define ALLOCPAGES(foo) \
  178         movl    R(physfree), %esi ; \
  179         movl    $((foo)*PAGE_SIZE), %eax ; \
  180         addl    %esi, %eax ; \
  181         movl    %eax, R(physfree) ; \
  182         movl    %esi, %edi ; \
  183         movl    $((foo)*PAGE_SIZE),%ecx ; \
  184         xorl    %eax,%eax ; \
  185         cld ; \
  186         rep ; \
  187         stosb
  188 
  189 /*
  190  * fillkpt
  191  *      eax = page frame address
  192  *      ebx = index into page table
  193  *      ecx = how many pages to map
  194  *      base = base address of page dir/table
  195  *      prot = protection bits
  196  */
  197 #define fillkpt(base, prot)               \
  198         shll    $2,%ebx                 ; \
  199         addl    base,%ebx               ; \
  200         orl     $PG_V,%eax              ; \
  201         orl     prot,%eax               ; \
  202 1:      movl    %eax,(%ebx)             ; \
  203         addl    $PAGE_SIZE,%eax         ; /* increment physical address */ \
  204         addl    $4,%ebx                 ; /* next pte */ \
  205         loop    1b
  206 
  207 /*
  208  * fillkptphys(prot)
  209  *      eax = physical address
  210  *      ecx = how many pages to map
  211  *      prot = protection bits
  212  */
  213 #define fillkptphys(prot)                 \
  214         movl    %eax, %ebx              ; \
  215         shrl    $PAGE_SHIFT, %ebx       ; \
  216         fillkpt(R(KPTphys), prot)
  217 
  218         .text
  219 /**********************************************************************
  220  *
  221  * This is where the bootblocks start us, set the ball rolling...
  222  *
  223  */
  224 NON_GPROF_ENTRY(btext)
  225 
  226 #ifdef PC98
  227         /* save SYSTEM PARAMETER for resume (NS/T or other) */
  228         movl    $0xa1400,%esi
  229         movl    $R(pc98_system_parameter),%edi
  230         movl    $0x0240,%ecx
  231         cld
  232         rep
  233         movsb
  234 #else   /* IBM-PC */
  235 #ifdef BDE_DEBUGGER
  236 #ifdef BIOS_STEALS_3K
  237         cmpl    $0x0375c339,0x95504
  238 #else
  239         cmpl    $0x0375c339,0x96104     /* XXX - debugger signature */
  240 #endif
  241         jne     1f
  242         movb    $1,R(_bdb_exists)
  243 1:
  244 #endif
  245 /* Tell the bios to warmboot next time */
  246         movw    $0x1234,0x472
  247 #endif  /* PC98 */
  248 
  249 /* Set up a real frame in case the double return in newboot is executed. */
  250         pushl   %ebp
  251         movl    %esp, %ebp
  252 
  253 /* Don't trust what the BIOS gives for eflags. */
  254         pushl   $PSL_KERNEL
  255         popfl
  256 
  257 /*
  258  * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
  259  * to set %cs, %ds, %es and %ss.
  260  */
  261         mov     %ds, %ax
  262         mov     %ax, %fs
  263         mov     %ax, %gs
  264 
  265         call    recover_bootinfo
  266 
  267 /* Get onto a stack that we can trust. */
  268 /*
  269  * XXX this step is delayed in case recover_bootinfo needs to return via
  270  * the old stack, but it need not be, since recover_bootinfo actually
  271  * returns via the old frame.
  272  */
  273         movl    $R(HIDENAME(tmpstk)),%esp
  274 
  275 #ifdef PC98
  276         /* pc98_machine_type & M_EPSON_PC98 */
  277         testb   $0x02,R(pc98_system_parameter)+220
  278         jz      3f
  279         /* epson_machine_id <= 0x0b */
  280         cmpb    $0x0b,R(pc98_system_parameter)+224
  281         ja      3f
  282 
  283         /* count up memory */
  284         movl    $0x100000,%eax          /* next, talley remaining memory */
  285         movl    $0xFFF-0x100,%ecx
  286 1:      movl    0(%eax),%ebx            /* save location to check */
  287         movl    $0xa55a5aa5,0(%eax)     /* write test pattern */
  288         cmpl    $0xa55a5aa5,0(%eax)     /* does not check yet for rollover */
  289         jne     2f
  290         movl    %ebx,0(%eax)            /* restore memory */
  291         addl    $PAGE_SIZE,%eax
  292         loop    1b
  293 2:      subl    $0x100000,%eax
  294         shrl    $17,%eax
  295         movb    %al,R(pc98_system_parameter)+1
  296 3:
  297 
  298         movw    R(pc98_system_parameter+0x86),%ax
  299         movw    %ax,R(cpu_id)
  300 #endif
  301 
  302         call    identify_cpu
  303 
  304 /* clear bss */
  305 /*
  306  * XXX this should be done a little earlier.
  307  *
  308  * XXX we don't check that there is memory for our bss and page tables
  309  * before using it.
  310  *
  311  * XXX the boot program somewhat bogusly clears the bss.  We still have
  312  * to do it in case we were unzipped by kzipboot.  Then the boot program
  313  * only clears kzipboot's bss.
  314  *
  315  * XXX the gdt and idt are still somewhere in the boot program.  We
  316  * depend on the convention that the boot program is below 1MB and we
  317  * are above 1MB to keep the gdt and idt  away from the bss and page
  318  * tables.  The idt is only used if BDE_DEBUGGER is enabled.
  319  */
  320         movl    $R(end),%ecx
  321         movl    $R(edata),%edi
  322         subl    %edi,%ecx
  323         xorl    %eax,%eax
  324         cld
  325         rep
  326         stosb
  327 
  328         call    create_pagetables
  329 
  330 /*
  331  * If the CPU has support for VME, turn it on.
  332  */ 
  333         testl   $CPUID_VME, R(cpu_feature)
  334         jz      1f
  335         movl    %cr4, %eax
  336         orl     $CR4_VME, %eax
  337         movl    %eax, %cr4
  338 1:
  339 
  340 #ifdef BDE_DEBUGGER
  341 /*
  342  * Adjust as much as possible for paging before enabling paging so that the
  343  * adjustments can be traced.
  344  */
  345         call    bdb_prepare_paging
  346 #endif
  347 
  348 /* Now enable paging */
  349         movl    R(IdlePTD), %eax
  350         movl    %eax,%cr3               /* load ptd addr into mmu */
  351         movl    %cr0,%eax               /* get control word */
  352         orl     $CR0_PE|CR0_PG,%eax     /* enable paging */
  353         movl    %eax,%cr0               /* and let's page NOW! */
  354 
  355 #ifdef BDE_DEBUGGER
  356 /*
  357  * Complete the adjustments for paging so that we can keep tracing through
  358  * initi386() after the low (physical) addresses for the gdt and idt become
  359  * invalid.
  360  */
  361         call    bdb_commit_paging
  362 #endif
  363 
  364         pushl   $begin                  /* jump to high virtualized address */
  365         ret
  366 
  367 /* now running relocated at KERNBASE where the system is linked to run */
  368 begin:
  369         /* set up bootstrap stack */
  370         movl    proc0kstack,%eax        /* location of in-kernel stack */
  371                         /* bootstrap stack end location */
  372         leal    (KSTACK_PAGES*PAGE_SIZE-PCB_SIZE)(%eax),%esp
  373 
  374         xorl    %ebp,%ebp               /* mark end of frames */
  375 
  376         movl    IdlePTD,%esi
  377         movl    %esi,(KSTACK_PAGES*PAGE_SIZE-PCB_SIZE+PCB_CR3)(%eax)
  378 
  379         pushl   physfree                /* value of first for init386(first) */
  380         call    init386                 /* wire 386 chip for unix operation */
  381 
  382         /*
  383          * Clean up the stack in a way that db_numargs() understands, so
  384          * that backtraces in ddb don't underrun the stack.  Traps for
  385          * inaccessible memory are more fatal than usual this early.
  386          */
  387         addl    $4,%esp
  388 
  389         call    mi_startup              /* autoconfiguration, mountroot etc */
  390         /* NOTREACHED */
  391         addl    $0,%esp                 /* for db_numargs() again */
  392 
  393 /*
  394  * Signal trampoline, copied to top of user stack
  395  */
  396 NON_GPROF_ENTRY(sigcode)
  397         calll   *SIGF_HANDLER(%esp)
  398         leal    SIGF_UC(%esp),%eax      /* get ucontext */
  399         pushl   %eax
  400         testl   $PSL_VM,UC_EFLAGS(%eax)
  401         jne     1f
  402         movl    UC_GS(%eax),%gs         /* restore %gs */
  403 1:
  404         movl    $SYS_sigreturn,%eax
  405         pushl   %eax                    /* junk to fake return addr. */
  406         int     $0x80                   /* enter kernel with args */
  407                                         /* on stack */
  408 1:
  409         jmp     1b
  410 
  411 #ifdef COMPAT_FREEBSD4
  412         ALIGN_TEXT
  413 freebsd4_sigcode:
  414         calll   *SIGF_HANDLER(%esp)
  415         leal    SIGF_UC4(%esp),%eax     /* get ucontext */
  416         pushl   %eax
  417         testl   $PSL_VM,UC4_EFLAGS(%eax)
  418         jne     1f
  419         movl    UC4_GS(%eax),%gs        /* restore %gs */
  420 1:
  421         movl    $344,%eax               /* 4.x SYS_sigreturn */
  422         pushl   %eax                    /* junk to fake return addr. */
  423         int     $0x80                   /* enter kernel with args */
  424                                         /* on stack */
  425 1:
  426         jmp     1b
  427 #endif
  428 
  429 #ifdef COMPAT_43
  430         ALIGN_TEXT
  431 osigcode:
  432         call    *SIGF_HANDLER(%esp)     /* call signal handler */
  433         lea     SIGF_SC(%esp),%eax      /* get sigcontext */
  434         pushl   %eax
  435         testl   $PSL_VM,SC_PS(%eax)
  436         jne     9f
  437         movl    SC_GS(%eax),%gs         /* restore %gs */
  438 9:
  439         movl    $103,%eax               /* 3.x SYS_sigreturn */
  440         pushl   %eax                    /* junk to fake return addr. */
  441         int     $0x80                   /* enter kernel with args */
  442 0:      jmp     0b
  443 #endif /* COMPAT_43 */
  444 
  445         ALIGN_TEXT
  446 esigcode:
  447 
  448         .data
  449         .globl  szsigcode
  450 szsigcode:
  451         .long   esigcode-sigcode
  452 #ifdef COMPAT_FREEBSD4
  453         .globl  szfreebsd4_sigcode
  454 szfreebsd4_sigcode:
  455         .long   esigcode-freebsd4_sigcode
  456 #endif
  457 #ifdef COMPAT_43
  458         .globl  szosigcode
  459 szosigcode:
  460         .long   esigcode-osigcode
  461 #endif
  462         .text
  463 
  464 /**********************************************************************
  465  *
  466  * Recover the bootinfo passed to us from the boot program
  467  *
  468  */
  469 recover_bootinfo:
  470         /*
  471          * This code is called in different ways depending on what loaded
  472          * and started the kernel.  This is used to detect how we get the
  473          * arguments from the other code and what we do with them.
  474          *
  475          * Old disk boot blocks:
  476          *      (*btext)(howto, bootdev, cyloffset, esym);
  477          *      [return address == 0, and can NOT be returned to]
  478          *      [cyloffset was not supported by the FreeBSD boot code
  479          *       and always passed in as 0]
  480          *      [esym is also known as total in the boot code, and
  481          *       was never properly supported by the FreeBSD boot code]
  482          *
  483          * Old diskless netboot code:
  484          *      (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
  485          *      [return address != 0, and can NOT be returned to]
  486          *      If we are being booted by this code it will NOT work,
  487          *      so we are just going to halt if we find this case.
  488          *
  489          * New uniform boot code:
  490          *      (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
  491          *      [return address != 0, and can be returned to]
  492          *
  493          * There may seem to be a lot of wasted arguments in here, but
  494          * that is so the newer boot code can still load very old kernels
  495          * and old boot code can load new kernels.
  496          */
  497 
  498         /*
  499          * The old style disk boot blocks fake a frame on the stack and
  500          * did an lret to get here.  The frame on the stack has a return
  501          * address of 0.
  502          */
  503         cmpl    $0,4(%ebp)
  504         je      olddiskboot
  505 
  506         /*
  507          * We have some form of return address, so this is either the
  508          * old diskless netboot code, or the new uniform code.  That can
  509          * be detected by looking at the 5th argument, if it is 0
  510          * we are being booted by the new uniform boot code.
  511          */
  512         cmpl    $0,24(%ebp)
  513         je      newboot
  514 
  515         /*
  516          * Seems we have been loaded by the old diskless boot code, we
  517          * don't stand a chance of running as the diskless structure
  518          * changed considerably between the two, so just halt.
  519          */
  520          hlt
  521 
  522         /*
  523          * We have been loaded by the new uniform boot code.
  524          * Let's check the bootinfo version, and if we do not understand
  525          * it we return to the loader with a status of 1 to indicate this error
  526          */
  527 newboot:
  528         movl    28(%ebp),%ebx           /* &bootinfo.version */
  529         movl    BI_VERSION(%ebx),%eax
  530         cmpl    $1,%eax                 /* We only understand version 1 */
  531         je      1f
  532         movl    $1,%eax                 /* Return status */
  533         leave
  534         /*
  535          * XXX this returns to our caller's caller (as is required) since
  536          * we didn't set up a frame and our caller did.
  537          */
  538         ret
  539 
  540 1:
  541         /*
  542          * If we have a kernelname copy it in
  543          */
  544         movl    BI_KERNELNAME(%ebx),%esi
  545         cmpl    $0,%esi
  546         je      2f                      /* No kernelname */
  547         movl    $MAXPATHLEN,%ecx        /* Brute force!!! */
  548         movl    $R(kernelname),%edi
  549         cmpb    $'/',(%esi)             /* Make sure it starts with a slash */
  550         je      1f
  551         movb    $'/',(%edi)
  552         incl    %edi
  553         decl    %ecx
  554 1:
  555         cld
  556         rep
  557         movsb
  558 
  559 2:
  560         /*
  561          * Determine the size of the boot loader's copy of the bootinfo
  562          * struct.  This is impossible to do properly because old versions
  563          * of the struct don't contain a size field and there are 2 old
  564          * versions with the same version number.
  565          */
  566         movl    $BI_ENDCOMMON,%ecx      /* prepare for sizeless version */
  567         testl   $RB_BOOTINFO,8(%ebp)    /* bi_size (and bootinfo) valid? */
  568         je      got_bi_size             /* no, sizeless version */
  569         movl    BI_SIZE(%ebx),%ecx
  570 got_bi_size:
  571 
  572         /*
  573          * Copy the common part of the bootinfo struct
  574          */
  575         movl    %ebx,%esi
  576         movl    $R(bootinfo),%edi
  577         cmpl    $BOOTINFO_SIZE,%ecx
  578         jbe     got_common_bi_size
  579         movl    $BOOTINFO_SIZE,%ecx
  580 got_common_bi_size:
  581         cld
  582         rep
  583         movsb
  584 
  585 #ifdef NFS_ROOT
  586 #ifndef BOOTP_NFSV3
  587         /*
  588          * If we have a nfs_diskless structure copy it in
  589          */
  590         movl    BI_NFS_DISKLESS(%ebx),%esi
  591         cmpl    $0,%esi
  592         je      olddiskboot
  593         movl    $R(nfs_diskless),%edi
  594         movl    $NFSDISKLESS_SIZE,%ecx
  595         cld
  596         rep
  597         movsb
  598         movl    $R(nfs_diskless_valid),%edi
  599         movl    $1,(%edi)
  600 #endif
  601 #endif
  602 
  603         /*
  604          * The old style disk boot.
  605          *      (*btext)(howto, bootdev, cyloffset, esym);
  606          * Note that the newer boot code just falls into here to pick
  607          * up howto and bootdev, cyloffset and esym are no longer used
  608          */
  609 olddiskboot:
  610         movl    8(%ebp),%eax
  611         movl    %eax,R(boothowto)
  612         movl    12(%ebp),%eax
  613         movl    %eax,R(bootdev)
  614 
  615         ret
  616 
  617 
  618 /**********************************************************************
  619  *
  620  * Identify the CPU and initialize anything special about it
  621  *
  622  */
  623 identify_cpu:
  624 
  625         /* Try to toggle alignment check flag; does not exist on 386. */
  626         pushfl
  627         popl    %eax
  628         movl    %eax,%ecx
  629         orl     $PSL_AC,%eax
  630         pushl   %eax
  631         popfl
  632         pushfl
  633         popl    %eax
  634         xorl    %ecx,%eax
  635         andl    $PSL_AC,%eax
  636         pushl   %ecx
  637         popfl
  638 
  639         testl   %eax,%eax
  640         jnz     try486
  641 
  642         /* NexGen CPU does not have aligment check flag. */
  643         pushfl
  644         movl    $0x5555, %eax
  645         xorl    %edx, %edx
  646         movl    $2, %ecx
  647         clc
  648         divl    %ecx
  649         jz      trynexgen
  650         popfl
  651         movl    $CPU_386,R(cpu)
  652         jmp     3f
  653 
  654 trynexgen:
  655         popfl
  656         movl    $CPU_NX586,R(cpu)
  657         movl    $0x4778654e,R(cpu_vendor)       # store vendor string
  658         movl    $0x72446e65,R(cpu_vendor+4)
  659         movl    $0x6e657669,R(cpu_vendor+8)
  660         movl    $0,R(cpu_vendor+12)
  661         jmp     3f
  662 
  663 try486: /* Try to toggle identification flag; does not exist on early 486s. */
  664         pushfl
  665         popl    %eax
  666         movl    %eax,%ecx
  667         xorl    $PSL_ID,%eax
  668         pushl   %eax
  669         popfl
  670         pushfl
  671         popl    %eax
  672         xorl    %ecx,%eax
  673         andl    $PSL_ID,%eax
  674         pushl   %ecx
  675         popfl
  676 
  677         testl   %eax,%eax
  678         jnz     trycpuid
  679         movl    $CPU_486,R(cpu)
  680 
  681         /*
  682          * Check Cyrix CPU
  683          * Cyrix CPUs do not change the undefined flags following
  684          * execution of the divide instruction which divides 5 by 2.
  685          *
  686          * Note: CPUID is enabled on M2, so it passes another way.
  687          */
  688         pushfl
  689         movl    $0x5555, %eax
  690         xorl    %edx, %edx
  691         movl    $2, %ecx
  692         clc
  693         divl    %ecx
  694         jnc     trycyrix
  695         popfl
  696         jmp     3f              /* You may use Intel CPU. */
  697 
  698 trycyrix:
  699         popfl
  700         /*
  701          * IBM Bluelighting CPU also doesn't change the undefined flags.
  702          * Because IBM doesn't disclose the information for Bluelighting
  703          * CPU, we couldn't distinguish it from Cyrix's (including IBM
  704          * brand of Cyrix CPUs).
  705          */
  706         movl    $0x69727943,R(cpu_vendor)       # store vendor string
  707         movl    $0x736e4978,R(cpu_vendor+4)
  708         movl    $0x64616574,R(cpu_vendor+8)
  709         jmp     3f
  710 
  711 trycpuid:       /* Use the `cpuid' instruction. */
  712         xorl    %eax,%eax
  713         cpuid                                   # cpuid 0
  714         movl    %eax,R(cpu_high)                # highest capability
  715         movl    %ebx,R(cpu_vendor)              # store vendor string
  716         movl    %edx,R(cpu_vendor+4)
  717         movl    %ecx,R(cpu_vendor+8)
  718         movb    $0,R(cpu_vendor+12)
  719 
  720         movl    $1,%eax
  721         cpuid                                   # cpuid 1
  722         movl    %eax,R(cpu_id)                  # store cpu_id
  723         movl    %edx,R(cpu_feature)             # store cpu_feature
  724         rorl    $8,%eax                         # extract family type
  725         andl    $15,%eax
  726         cmpl    $5,%eax
  727         jae     1f
  728 
  729         /* less than Pentium; must be 486 */
  730         movl    $CPU_486,R(cpu)
  731         jmp     3f
  732 1:
  733         /* a Pentium? */
  734         cmpl    $5,%eax
  735         jne     2f
  736         movl    $CPU_586,R(cpu)
  737         jmp     3f
  738 2:
  739         /* Greater than Pentium...call it a Pentium Pro */
  740         movl    $CPU_686,R(cpu)
  741 3:
  742         ret
  743 
  744 
  745 /**********************************************************************
  746  *
  747  * Create the first page directory and its page tables.
  748  *
  749  */
  750 
  751 create_pagetables:
  752 
  753 /* Find end of kernel image (rounded up to a page boundary). */
  754         movl    $R(_end),%esi
  755 
  756 /* Include symbols, if any. */
  757         movl    R(bootinfo+BI_ESYMTAB),%edi
  758         testl   %edi,%edi
  759         je      over_symalloc
  760         movl    %edi,%esi
  761         movl    $KERNBASE,%edi
  762         addl    %edi,R(bootinfo+BI_SYMTAB)
  763         addl    %edi,R(bootinfo+BI_ESYMTAB)
  764 over_symalloc:
  765 
  766 /* If we are told where the end of the kernel space is, believe it. */
  767         movl    R(bootinfo+BI_KERNEND),%edi
  768         testl   %edi,%edi
  769         je      no_kernend
  770         movl    %edi,%esi
  771 no_kernend:
  772         
  773         addl    $PAGE_MASK,%esi
  774         andl    $~PAGE_MASK,%esi
  775         movl    %esi,R(KERNend)         /* save end of kernel */
  776         movl    %esi,R(physfree)        /* next free page is at end of kernel */
  777 
  778 /* Allocate Kernel Page Tables */
  779         ALLOCPAGES(NKPT)
  780         movl    %esi,R(KPTphys)
  781 
  782 /* Allocate Page Table Directory */
  783         ALLOCPAGES(1)
  784         movl    %esi,R(IdlePTD)
  785 
  786 /* Allocate UPAGES */
  787         ALLOCPAGES(UAREA_PAGES)
  788         movl    %esi,R(p0upa)
  789         addl    $KERNBASE, %esi
  790         movl    %esi, R(proc0uarea)
  791 
  792         ALLOCPAGES(KSTACK_PAGES)
  793         movl    %esi,R(p0kpa)
  794         addl    $KERNBASE, %esi
  795         movl    %esi, R(proc0kstack)
  796 
  797         ALLOCPAGES(1)                   /* vm86/bios stack */
  798         movl    %esi,R(vm86phystk)
  799 
  800         ALLOCPAGES(3)                   /* pgtable + ext + IOPAGES */
  801         movl    %esi,R(vm86pa)
  802         addl    $KERNBASE, %esi
  803         movl    %esi, R(vm86paddr)
  804 
  805 #ifdef SMP
  806 /* Allocate cpu0's private data page */
  807         ALLOCPAGES(1)
  808         movl    %esi,R(cpu0pp)
  809         addl    $KERNBASE, %esi
  810         movl    %esi, R(cpu0prvpage)    /* relocated to KVM space */
  811 
  812 /* Allocate SMP page table page */
  813         ALLOCPAGES(1)
  814         movl    %esi,R(SMPptpa)
  815         addl    $KERNBASE, %esi
  816         movl    %esi, R(SMPpt)          /* relocated to KVM space */
  817 #endif  /* SMP */
  818 
  819 /* Map read-only from zero to the end of the kernel text section */
  820         xorl    %eax, %eax
  821 #ifdef BDE_DEBUGGER
  822 /* If the debugger is present, actually map everything read-write. */
  823         cmpl    $0,R(_bdb_exists)
  824         jne     map_read_write
  825 #endif
  826         xorl    %edx,%edx
  827         movl    $R(etext),%ecx
  828         addl    $PAGE_MASK,%ecx
  829         shrl    $PAGE_SHIFT,%ecx
  830         fillkptphys(%edx)
  831 
  832 /* Map read-write, data, bss and symbols */
  833         movl    $R(etext),%eax
  834         addl    $PAGE_MASK, %eax
  835         andl    $~PAGE_MASK, %eax
  836 map_read_write:
  837         movl    $PG_RW,%edx
  838         movl    R(KERNend),%ecx
  839         subl    %eax,%ecx
  840         shrl    $PAGE_SHIFT,%ecx
  841         fillkptphys(%edx)
  842 
  843 /* Map page directory. */
  844         movl    R(IdlePTD), %eax
  845         movl    $1, %ecx
  846         fillkptphys($PG_RW)
  847 
  848 /* Map proc0's UPAGES in the physical way ... */
  849         movl    R(p0upa), %eax
  850         movl    $(UAREA_PAGES), %ecx
  851         fillkptphys($PG_RW)
  852 
  853 /* Map proc0's KSTACK in the physical way ... */
  854         movl    R(p0kpa), %eax
  855         movl    $(KSTACK_PAGES), %ecx
  856         fillkptphys($PG_RW)
  857 
  858 /* Map ISA hole */
  859         movl    $ISA_HOLE_START, %eax
  860         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  861         fillkptphys($PG_RW)
  862 
  863 /* Map space for the vm86 region */
  864         movl    R(vm86phystk), %eax
  865         movl    $4, %ecx
  866         fillkptphys($PG_RW)
  867 
  868 /* Map page 0 into the vm86 page table */
  869         movl    $0, %eax
  870         movl    $0, %ebx
  871         movl    $1, %ecx
  872         fillkpt(R(vm86pa), $PG_RW|PG_U)
  873 
  874 /* ...likewise for the ISA hole */
  875         movl    $ISA_HOLE_START, %eax
  876         movl    $ISA_HOLE_START>>PAGE_SHIFT, %ebx
  877         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  878         fillkpt(R(vm86pa), $PG_RW|PG_U)
  879 
  880 #ifdef SMP
  881 /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
  882         movl    R(cpu0pp), %eax
  883         movl    $1, %ecx
  884         fillkptphys($PG_RW)
  885 
  886 /* Map SMP page table page into global kmem FWIW */
  887         movl    R(SMPptpa), %eax
  888         movl    $1, %ecx
  889         fillkptphys($PG_RW)
  890 
  891 /* Map the private page into the SMP page table */
  892         movl    R(cpu0pp), %eax
  893         movl    $0, %ebx                /* pte offset = 0 */
  894         movl    $1, %ecx                /* one private page coming right up */
  895         fillkpt(R(SMPptpa), $PG_RW)
  896 
  897 /* ... and put the page table table in the pde. */
  898         movl    R(SMPptpa), %eax
  899         movl    $MPPTDI, %ebx
  900         movl    $1, %ecx
  901         fillkpt(R(IdlePTD), $PG_RW)
  902 
  903 /* Fakeup VA for the local apic to allow early traps. */
  904         ALLOCPAGES(1)
  905         movl    %esi, %eax
  906         movl    $(NPTEPG-1), %ebx       /* pte offset = NTEPG-1 */
  907         movl    $1, %ecx                /* one private pt coming right up */
  908         fillkpt(R(SMPptpa), $PG_RW)
  909 #endif  /* SMP */
  910 
  911 /* install a pde for temporary double map of bottom of VA */
  912         movl    R(KPTphys), %eax
  913         xorl    %ebx, %ebx
  914         movl    $NKPT, %ecx
  915         fillkpt(R(IdlePTD), $PG_RW)
  916 
  917 /* install pde's for pt's */
  918         movl    R(KPTphys), %eax
  919         movl    $KPTDI, %ebx
  920         movl    $NKPT, %ecx
  921         fillkpt(R(IdlePTD), $PG_RW)
  922 
  923 /* install a pde recursively mapping page directory as a page table */
  924         movl    R(IdlePTD), %eax
  925         movl    $PTDPTDI, %ebx
  926         movl    $1,%ecx
  927         fillkpt(R(IdlePTD), $PG_RW)
  928 
  929         ret
  930 
  931 #ifdef BDE_DEBUGGER
  932 bdb_prepare_paging:
  933         cmpl    $0,R(_bdb_exists)
  934         je      bdb_prepare_paging_exit
  935 
  936         subl    $6,%esp
  937 
  938         /*
  939          * Copy and convert debugger entries from the bootstrap gdt and idt
  940          * to the kernel gdt and idt.  Everything is still in low memory.
  941          * Tracing continues to work after paging is enabled because the
  942          * low memory addresses remain valid until everything is relocated.
  943          * However, tracing through the setidt() that initializes the trace
  944          * trap will crash.
  945          */
  946         sgdt    (%esp)
  947         movl    2(%esp),%esi            /* base address of bootstrap gdt */
  948         movl    $R(_gdt),%edi
  949         movl    %edi,2(%esp)            /* prepare to load kernel gdt */
  950         movl    $8*18/4,%ecx
  951         cld
  952         rep                             /* copy gdt */
  953         movsl
  954         movl    $R(_gdt),-8+2(%edi)     /* adjust gdt self-ptr */
  955         movb    $0x92,-8+5(%edi)
  956         lgdt    (%esp)
  957 
  958         sidt    (%esp)
  959         movl    2(%esp),%esi            /* base address of current idt */
  960         movl    8+4(%esi),%eax          /* convert dbg descriptor to ... */
  961         movw    8(%esi),%ax
  962         movl    %eax,R(bdb_dbg_ljmp+1)  /* ... immediate offset ... */
  963         movl    8+2(%esi),%eax
  964         movw    %ax,R(bdb_dbg_ljmp+5)   /* ... and selector for ljmp */
  965         movl    24+4(%esi),%eax         /* same for bpt descriptor */
  966         movw    24(%esi),%ax
  967         movl    %eax,R(bdb_bpt_ljmp+1)
  968         movl    24+2(%esi),%eax
  969         movw    %ax,R(bdb_bpt_ljmp+5)
  970         movl    R(_idt),%edi
  971         movl    %edi,2(%esp)            /* prepare to load kernel idt */
  972         movl    $8*4/4,%ecx
  973         cld
  974         rep                             /* copy idt */
  975         movsl
  976         lidt    (%esp)
  977 
  978         addl    $6,%esp
  979 
  980 bdb_prepare_paging_exit:
  981         ret
  982 
  983 /* Relocate debugger gdt entries and gdt and idt pointers. */
  984 bdb_commit_paging:
  985         cmpl    $0,_bdb_exists
  986         je      bdb_commit_paging_exit
  987 
  988         movl    $gdt+8*9,%eax           /* adjust slots 9-17 */
  989         movl    $9,%ecx
  990 reloc_gdt:
  991         movb    $KERNBASE>>24,7(%eax)   /* top byte of base addresses, was 0, */
  992         addl    $8,%eax                 /* now KERNBASE>>24 */
  993         loop    reloc_gdt
  994 
  995         subl    $6,%esp
  996         sgdt    (%esp)
  997         addl    $KERNBASE,2(%esp)
  998         lgdt    (%esp)
  999         sidt    (%esp)
 1000         addl    $KERNBASE,2(%esp)
 1001         lidt    (%esp)
 1002         addl    $6,%esp
 1003 
 1004         int     $3
 1005 
 1006 bdb_commit_paging_exit:
 1007         ret
 1008 
 1009 #endif /* BDE_DEBUGGER */

Cache object: 2bae0d3a8c4c50fbf6275c9688752e17


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.