The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1990 The Regents of the University of California.
    3  * All rights reserved.
    4  *
    5  * This code is derived from software contributed to Berkeley by
    6  * William Jolitz.
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 4. Neither the name of the University nor the names of its contributors
   17  *    may be used to endorse or promote products derived from this software
   18  *    without specific prior written permission.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  *
   32  *      from: @(#)locore.s      7.3 (Berkeley) 5/13/91
   33  * $FreeBSD$
   34  *
   35  *              originally from: locore.s, by William F. Jolitz
   36  *
   37  *              Substantially rewritten by David Greenman, Rod Grimes,
   38  *                      Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
   39  *                      and many others.
   40  */
   41 
   42 #include "opt_bootp.h"
   43 #include "opt_compat.h"
   44 #include "opt_nfsroot.h"
   45 #include "opt_pmap.h"
   46 
   47 #include <sys/reboot.h>
   48 
   49 #include <machine/asmacros.h>
   50 #include <machine/cputypes.h>
   51 #include <machine/psl.h>
   52 #include <machine/pmap.h>
   53 #include <machine/specialreg.h>
   54 
   55 #include "assym.s"
   56 
   57 /*
   58  *      XXX
   59  *
   60  * Note: This version greatly munged to avoid various assembler errors
   61  * that may be fixed in newer versions of gas. Perhaps newer versions
   62  * will have more pleasant appearance.
   63  */
   64 
   65 /*
   66  * PTmap is recursive pagemap at top of virtual address space.
   67  * Within PTmap, the page directory can be found (third indirection).
   68  */
   69         .globl  PTmap,PTD,PTDpde
   70         .set    PTmap,(PTDPTDI << PDRSHIFT)
   71         .set    PTD,PTmap + (PTDPTDI * PAGE_SIZE)
   72         .set    PTDpde,PTD + (PTDPTDI * PDESIZE)
   73 
   74 /*
   75  * Compiled KERNBASE location and the kernel load address
   76  */
   77         .globl  kernbase
   78         .set    kernbase,KERNBASE
   79         .globl  kernload
   80         .set    kernload,KERNLOAD
   81 
   82 /*
   83  * Globals
   84  */
   85         .data
   86         ALIGN_DATA                      /* just to be sure */
   87 
   88         .space  0x2000                  /* space for tmpstk - temporary stack */
   89 tmpstk:
   90 
   91         .globl  bootinfo
   92 bootinfo:       .space  BOOTINFO_SIZE   /* bootinfo that we can handle */
   93 
   94                 .globl KERNend
   95 KERNend:        .long   0               /* phys addr end of kernel (just after bss) */
   96 physfree:       .long   0               /* phys addr of next free page */
   97 
   98         .globl  IdlePTD
   99 IdlePTD:        .long   0               /* phys addr of kernel PTD */
  100 
  101 #if defined(PAE) || defined(PAE_TABLES)
  102         .globl  IdlePDPT
  103 IdlePDPT:       .long   0               /* phys addr of kernel PDPT */
  104 #endif
  105 
  106         .globl  KPTmap
  107 KPTmap:         .long   0               /* address of kernel page tables */
  108 
  109         .globl  KPTphys
  110 KPTphys:        .long   0               /* phys addr of kernel page tables */
  111 
  112         .globl  proc0kstack
  113 proc0kstack:    .long   0               /* address of proc 0 kstack space */
  114 p0kpa:          .long   0               /* phys addr of proc0's STACK */
  115 
  116 vm86phystk:     .long   0               /* PA of vm86/bios stack */
  117 
  118         .globl  vm86paddr, vm86pa
  119 vm86paddr:      .long   0               /* address of vm86 region */
  120 vm86pa:         .long   0               /* phys addr of vm86 region */
  121 
  122 #ifdef PC98
  123         .globl  pc98_system_parameter
  124 pc98_system_parameter:
  125         .space  0x240
  126 #endif
  127 
  128 /**********************************************************************
  129  *
  130  * Some handy macros
  131  *
  132  */
  133 
  134 #define R(foo) ((foo)-KERNBASE)
  135 
  136 #define ALLOCPAGES(foo) \
  137         movl    R(physfree), %esi ; \
  138         movl    $((foo)*PAGE_SIZE), %eax ; \
  139         addl    %esi, %eax ; \
  140         movl    %eax, R(physfree) ; \
  141         movl    %esi, %edi ; \
  142         movl    $((foo)*PAGE_SIZE),%ecx ; \
  143         xorl    %eax,%eax ; \
  144         cld ; \
  145         rep ; \
  146         stosb
  147 
  148 /*
  149  * fillkpt
  150  *      eax = page frame address
  151  *      ebx = index into page table
  152  *      ecx = how many pages to map
  153  *      base = base address of page dir/table
  154  *      prot = protection bits
  155  */
  156 #define fillkpt(base, prot)               \
  157         shll    $PTESHIFT,%ebx          ; \
  158         addl    base,%ebx               ; \
  159         orl     $PG_V,%eax              ; \
  160         orl     prot,%eax               ; \
  161 1:      movl    %eax,(%ebx)             ; \
  162         addl    $PAGE_SIZE,%eax         ; /* increment physical address */ \
  163         addl    $PTESIZE,%ebx           ; /* next pte */ \
  164         loop    1b
  165 
  166 /*
  167  * fillkptphys(prot)
  168  *      eax = physical address
  169  *      ecx = how many pages to map
  170  *      prot = protection bits
  171  */
  172 #define fillkptphys(prot)                 \
  173         movl    %eax, %ebx              ; \
  174         shrl    $PAGE_SHIFT, %ebx       ; \
  175         fillkpt(R(KPTphys), prot)
  176 
  177         .text
  178 /**********************************************************************
  179  *
  180  * This is where the bootblocks start us, set the ball rolling...
  181  *
  182  */
  183 NON_GPROF_ENTRY(btext)
  184 
  185 #ifdef PC98
  186         /* save SYSTEM PARAMETER for resume (NS/T or other) */
  187         movl    $0xa1400,%esi
  188         movl    $R(pc98_system_parameter),%edi
  189         movl    $0x0240,%ecx
  190         cld
  191         rep
  192         movsb
  193 #else   /* IBM-PC */
  194 /* Tell the bios to warmboot next time */
  195         movw    $0x1234,0x472
  196 #endif  /* PC98 */
  197 
  198 /* Set up a real frame in case the double return in newboot is executed. */
  199         pushl   %ebp
  200         movl    %esp, %ebp
  201 
  202 /* Don't trust what the BIOS gives for eflags. */
  203         pushl   $PSL_KERNEL
  204         popfl
  205 
  206 /*
  207  * Don't trust what the BIOS gives for %fs and %gs.  Trust the bootstrap
  208  * to set %cs, %ds, %es and %ss.
  209  */
  210         mov     %ds, %ax
  211         mov     %ax, %fs
  212         mov     %ax, %gs
  213 
  214 /*
  215  * Clear the bss.  Not all boot programs do it, and it is our job anyway.
  216  *
  217  * XXX we don't check that there is memory for our bss and page tables
  218  * before using it.
  219  *
  220  * Note: we must be careful to not overwrite an active gdt or idt.  They
  221  * inactive from now until we switch to new ones, since we don't load any
  222  * more segment registers or permit interrupts until after the switch.
  223  */
  224         movl    $R(__bss_end),%ecx
  225         movl    $R(__bss_start),%edi
  226         subl    %edi,%ecx
  227         xorl    %eax,%eax
  228         cld
  229         rep
  230         stosb
  231 
  232         call    recover_bootinfo
  233 
  234 /* Get onto a stack that we can trust. */
  235 /*
  236  * XXX this step is delayed in case recover_bootinfo needs to return via
  237  * the old stack, but it need not be, since recover_bootinfo actually
  238  * returns via the old frame.
  239  */
  240         movl    $R(tmpstk),%esp
  241 
  242 #ifdef PC98
  243         /* pc98_machine_type & M_EPSON_PC98 */
  244         testb   $0x02,R(pc98_system_parameter)+220
  245         jz      3f
  246         /* epson_machine_id <= 0x0b */
  247         cmpb    $0x0b,R(pc98_system_parameter)+224
  248         ja      3f
  249 
  250         /* count up memory */
  251         movl    $0x100000,%eax          /* next, talley remaining memory */
  252         movl    $0xFFF-0x100,%ecx
  253 1:      movl    0(%eax),%ebx            /* save location to check */
  254         movl    $0xa55a5aa5,0(%eax)     /* write test pattern */
  255         cmpl    $0xa55a5aa5,0(%eax)     /* does not check yet for rollover */
  256         jne     2f
  257         movl    %ebx,0(%eax)            /* restore memory */
  258         addl    $PAGE_SIZE,%eax
  259         loop    1b
  260 2:      subl    $0x100000,%eax
  261         shrl    $17,%eax
  262         movb    %al,R(pc98_system_parameter)+1
  263 3:
  264 
  265         movw    R(pc98_system_parameter+0x86),%ax
  266         movw    %ax,R(cpu_id)
  267 #endif
  268 
  269         call    identify_cpu
  270         call    create_pagetables
  271 
  272 /*
  273  * If the CPU has support for VME, turn it on.
  274  */ 
  275         testl   $CPUID_VME, R(cpu_feature)
  276         jz      1f
  277         movl    %cr4, %eax
  278         orl     $CR4_VME, %eax
  279         movl    %eax, %cr4
  280 1:
  281 
  282 /* Now enable paging */
  283 #if defined(PAE) || defined(PAE_TABLES)
  284         movl    R(IdlePDPT), %eax
  285         movl    %eax, %cr3
  286         movl    %cr4, %eax
  287         orl     $CR4_PAE, %eax
  288         movl    %eax, %cr4
  289 #else
  290         movl    R(IdlePTD), %eax
  291         movl    %eax,%cr3               /* load ptd addr into mmu */
  292 #endif
  293         movl    %cr0,%eax               /* get control word */
  294         orl     $CR0_PE|CR0_PG,%eax     /* enable paging */
  295         movl    %eax,%cr0               /* and let's page NOW! */
  296 
  297         pushl   $begin                  /* jump to high virtualized address */
  298         ret
  299 
  300 /* now running relocated at KERNBASE where the system is linked to run */
  301 begin:
  302         /* set up bootstrap stack */
  303         movl    proc0kstack,%eax        /* location of in-kernel stack */
  304 
  305         /*
  306          * Only use bottom page for init386().  init386() calculates the
  307          * PCB + FPU save area size and returns the true top of stack.
  308          */
  309         leal    PAGE_SIZE(%eax),%esp
  310 
  311         xorl    %ebp,%ebp               /* mark end of frames */
  312 
  313         pushl   physfree                /* value of first for init386(first) */
  314         call    init386                 /* wire 386 chip for unix operation */
  315 
  316         /*
  317          * Clean up the stack in a way that db_numargs() understands, so
  318          * that backtraces in ddb don't underrun the stack.  Traps for
  319          * inaccessible memory are more fatal than usual this early.
  320          */
  321         addl    $4,%esp
  322 
  323         /* Switch to true top of stack. */
  324         movl    %eax,%esp
  325 
  326         call    mi_startup              /* autoconfiguration, mountroot etc */
  327         /* NOTREACHED */
  328         addl    $0,%esp                 /* for db_numargs() again */
  329 
  330 /**********************************************************************
  331  *
  332  * Recover the bootinfo passed to us from the boot program
  333  *
  334  */
  335 recover_bootinfo:
  336         /*
  337          * This code is called in different ways depending on what loaded
  338          * and started the kernel.  This is used to detect how we get the
  339          * arguments from the other code and what we do with them.
  340          *
  341          * Old disk boot blocks:
  342          *      (*btext)(howto, bootdev, cyloffset, esym);
  343          *      [return address == 0, and can NOT be returned to]
  344          *      [cyloffset was not supported by the FreeBSD boot code
  345          *       and always passed in as 0]
  346          *      [esym is also known as total in the boot code, and
  347          *       was never properly supported by the FreeBSD boot code]
  348          *
  349          * Old diskless netboot code:
  350          *      (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
  351          *      [return address != 0, and can NOT be returned to]
  352          *      If we are being booted by this code it will NOT work,
  353          *      so we are just going to halt if we find this case.
  354          *
  355          * New uniform boot code:
  356          *      (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
  357          *      [return address != 0, and can be returned to]
  358          *
  359          * There may seem to be a lot of wasted arguments in here, but
  360          * that is so the newer boot code can still load very old kernels
  361          * and old boot code can load new kernels.
  362          */
  363 
  364         /*
  365          * The old style disk boot blocks fake a frame on the stack and
  366          * did an lret to get here.  The frame on the stack has a return
  367          * address of 0.
  368          */
  369         cmpl    $0,4(%ebp)
  370         je      olddiskboot
  371 
  372         /*
  373          * We have some form of return address, so this is either the
  374          * old diskless netboot code, or the new uniform code.  That can
  375          * be detected by looking at the 5th argument, if it is 0
  376          * we are being booted by the new uniform boot code.
  377          */
  378         cmpl    $0,24(%ebp)
  379         je      newboot
  380 
  381         /*
  382          * Seems we have been loaded by the old diskless boot code, we
  383          * don't stand a chance of running as the diskless structure
  384          * changed considerably between the two, so just halt.
  385          */
  386          hlt
  387 
  388         /*
  389          * We have been loaded by the new uniform boot code.
  390          * Let's check the bootinfo version, and if we do not understand
  391          * it we return to the loader with a status of 1 to indicate this error
  392          */
  393 newboot:
  394         movl    28(%ebp),%ebx           /* &bootinfo.version */
  395         movl    BI_VERSION(%ebx),%eax
  396         cmpl    $1,%eax                 /* We only understand version 1 */
  397         je      1f
  398         movl    $1,%eax                 /* Return status */
  399         leave
  400         /*
  401          * XXX this returns to our caller's caller (as is required) since
  402          * we didn't set up a frame and our caller did.
  403          */
  404         ret
  405 
  406 1:
  407         /*
  408          * If we have a kernelname copy it in
  409          */
  410         movl    BI_KERNELNAME(%ebx),%esi
  411         cmpl    $0,%esi
  412         je      2f                      /* No kernelname */
  413         movl    $MAXPATHLEN,%ecx        /* Brute force!!! */
  414         movl    $R(kernelname),%edi
  415         cmpb    $'/',(%esi)             /* Make sure it starts with a slash */
  416         je      1f
  417         movb    $'/',(%edi)
  418         incl    %edi
  419         decl    %ecx
  420 1:
  421         cld
  422         rep
  423         movsb
  424 
  425 2:
  426         /*
  427          * Determine the size of the boot loader's copy of the bootinfo
  428          * struct.  This is impossible to do properly because old versions
  429          * of the struct don't contain a size field and there are 2 old
  430          * versions with the same version number.
  431          */
  432         movl    $BI_ENDCOMMON,%ecx      /* prepare for sizeless version */
  433         testl   $RB_BOOTINFO,8(%ebp)    /* bi_size (and bootinfo) valid? */
  434         je      got_bi_size             /* no, sizeless version */
  435         movl    BI_SIZE(%ebx),%ecx
  436 got_bi_size:
  437 
  438         /*
  439          * Copy the common part of the bootinfo struct
  440          */
  441         movl    %ebx,%esi
  442         movl    $R(bootinfo),%edi
  443         cmpl    $BOOTINFO_SIZE,%ecx
  444         jbe     got_common_bi_size
  445         movl    $BOOTINFO_SIZE,%ecx
  446 got_common_bi_size:
  447         cld
  448         rep
  449         movsb
  450 
  451 #ifdef NFS_ROOT
  452 #ifndef BOOTP_NFSV3
  453         /*
  454          * If we have a nfs_diskless structure copy it in
  455          */
  456         movl    BI_NFS_DISKLESS(%ebx),%esi
  457         cmpl    $0,%esi
  458         je      olddiskboot
  459         movl    $R(nfs_diskless),%edi
  460         movl    $NFSDISKLESS_SIZE,%ecx
  461         cld
  462         rep
  463         movsb
  464         movl    $R(nfs_diskless_valid),%edi
  465         movl    $1,(%edi)
  466 #endif
  467 #endif
  468 
  469         /*
  470          * The old style disk boot.
  471          *      (*btext)(howto, bootdev, cyloffset, esym);
  472          * Note that the newer boot code just falls into here to pick
  473          * up howto and bootdev, cyloffset and esym are no longer used
  474          */
  475 olddiskboot:
  476         movl    8(%ebp),%eax
  477         movl    %eax,R(boothowto)
  478         movl    12(%ebp),%eax
  479         movl    %eax,R(bootdev)
  480 
  481         ret
  482 
  483 
  484 /**********************************************************************
  485  *
  486  * Identify the CPU and initialize anything special about it
  487  *
  488  */
  489 ENTRY(identify_cpu)
  490 
  491         pushl   %ebx
  492 
  493         /* Try to toggle alignment check flag; does not exist on 386. */
  494         pushfl
  495         popl    %eax
  496         movl    %eax,%ecx
  497         orl     $PSL_AC,%eax
  498         pushl   %eax
  499         popfl
  500         pushfl
  501         popl    %eax
  502         xorl    %ecx,%eax
  503         andl    $PSL_AC,%eax
  504         pushl   %ecx
  505         popfl
  506 
  507         testl   %eax,%eax
  508         jnz     try486
  509 
  510         /* NexGen CPU does not have aligment check flag. */
  511         pushfl
  512         movl    $0x5555, %eax
  513         xorl    %edx, %edx
  514         movl    $2, %ecx
  515         clc
  516         divl    %ecx
  517         jz      trynexgen
  518         popfl
  519         movl    $CPU_386,R(cpu)
  520         jmp     3f
  521 
  522 trynexgen:
  523         popfl
  524         movl    $CPU_NX586,R(cpu)
  525         movl    $0x4778654e,R(cpu_vendor)       # store vendor string
  526         movl    $0x72446e65,R(cpu_vendor+4)
  527         movl    $0x6e657669,R(cpu_vendor+8)
  528         movl    $0,R(cpu_vendor+12)
  529         jmp     3f
  530 
  531 try486: /* Try to toggle identification flag; does not exist on early 486s. */
  532         pushfl
  533         popl    %eax
  534         movl    %eax,%ecx
  535         xorl    $PSL_ID,%eax
  536         pushl   %eax
  537         popfl
  538         pushfl
  539         popl    %eax
  540         xorl    %ecx,%eax
  541         andl    $PSL_ID,%eax
  542         pushl   %ecx
  543         popfl
  544 
  545         testl   %eax,%eax
  546         jnz     trycpuid
  547         movl    $CPU_486,R(cpu)
  548 
  549         /*
  550          * Check Cyrix CPU
  551          * Cyrix CPUs do not change the undefined flags following
  552          * execution of the divide instruction which divides 5 by 2.
  553          *
  554          * Note: CPUID is enabled on M2, so it passes another way.
  555          */
  556         pushfl
  557         movl    $0x5555, %eax
  558         xorl    %edx, %edx
  559         movl    $2, %ecx
  560         clc
  561         divl    %ecx
  562         jnc     trycyrix
  563         popfl
  564         jmp     3f              /* You may use Intel CPU. */
  565 
  566 trycyrix:
  567         popfl
  568         /*
  569          * IBM Bluelighting CPU also doesn't change the undefined flags.
  570          * Because IBM doesn't disclose the information for Bluelighting
  571          * CPU, we couldn't distinguish it from Cyrix's (including IBM
  572          * brand of Cyrix CPUs).
  573          */
  574         movl    $0x69727943,R(cpu_vendor)       # store vendor string
  575         movl    $0x736e4978,R(cpu_vendor+4)
  576         movl    $0x64616574,R(cpu_vendor+8)
  577         jmp     3f
  578 
  579 trycpuid:       /* Use the `cpuid' instruction. */
  580         xorl    %eax,%eax
  581         cpuid                                   # cpuid 0
  582         movl    %eax,R(cpu_high)                # highest capability
  583         movl    %ebx,R(cpu_vendor)              # store vendor string
  584         movl    %edx,R(cpu_vendor+4)
  585         movl    %ecx,R(cpu_vendor+8)
  586         movb    $0,R(cpu_vendor+12)
  587 
  588         movl    $1,%eax
  589         cpuid                                   # cpuid 1
  590         movl    %eax,R(cpu_id)                  # store cpu_id
  591         movl    %ebx,R(cpu_procinfo)            # store cpu_procinfo
  592         movl    %edx,R(cpu_feature)             # store cpu_feature
  593         movl    %ecx,R(cpu_feature2)            # store cpu_feature2
  594         rorl    $8,%eax                         # extract family type
  595         andl    $15,%eax
  596         cmpl    $5,%eax
  597         jae     1f
  598 
  599         /* less than Pentium; must be 486 */
  600         movl    $CPU_486,R(cpu)
  601         jmp     3f
  602 1:
  603         /* a Pentium? */
  604         cmpl    $5,%eax
  605         jne     2f
  606         movl    $CPU_586,R(cpu)
  607         jmp     3f
  608 2:
  609         /* Greater than Pentium...call it a Pentium Pro */
  610         movl    $CPU_686,R(cpu)
  611 3:
  612         popl    %ebx
  613         ret
  614 END(identify_cpu)
  615 
  616 
  617 /**********************************************************************
  618  *
  619  * Create the first page directory and its page tables.
  620  *
  621  */
  622 
  623 create_pagetables:
  624 
  625 /* Find end of kernel image (rounded up to a page boundary). */
  626         movl    $R(_end),%esi
  627 
  628 /* Include symbols, if any. */
  629         movl    R(bootinfo+BI_ESYMTAB),%edi
  630         testl   %edi,%edi
  631         je      over_symalloc
  632         movl    %edi,%esi
  633         movl    $KERNBASE,%edi
  634         addl    %edi,R(bootinfo+BI_SYMTAB)
  635         addl    %edi,R(bootinfo+BI_ESYMTAB)
  636 over_symalloc:
  637 
  638 /* If we are told where the end of the kernel space is, believe it. */
  639         movl    R(bootinfo+BI_KERNEND),%edi
  640         testl   %edi,%edi
  641         je      no_kernend
  642         movl    %edi,%esi
  643 no_kernend:
  644 
  645         addl    $PDRMASK,%esi           /* Play conservative for now, and */
  646         andl    $~PDRMASK,%esi          /*   ... wrap to next 4M. */
  647         movl    %esi,R(KERNend)         /* save end of kernel */
  648         movl    %esi,R(physfree)        /* next free page is at end of kernel */
  649 
  650 /* Allocate Kernel Page Tables */
  651         ALLOCPAGES(NKPT)
  652         movl    %esi,R(KPTphys)
  653         addl    $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi
  654         movl    %esi,R(KPTmap)
  655 
  656 /* Allocate Page Table Directory */
  657 #if defined(PAE) || defined(PAE_TABLES)
  658         /* XXX only need 32 bytes (easier for now) */
  659         ALLOCPAGES(1)
  660         movl    %esi,R(IdlePDPT)
  661 #endif
  662         ALLOCPAGES(NPGPTD)
  663         movl    %esi,R(IdlePTD)
  664 
  665 /* Allocate KSTACK */
  666         ALLOCPAGES(TD0_KSTACK_PAGES)
  667         movl    %esi,R(p0kpa)
  668         addl    $KERNBASE, %esi
  669         movl    %esi, R(proc0kstack)
  670 
  671         ALLOCPAGES(1)                   /* vm86/bios stack */
  672         movl    %esi,R(vm86phystk)
  673 
  674         ALLOCPAGES(3)                   /* pgtable + ext + IOPAGES */
  675         movl    %esi,R(vm86pa)
  676         addl    $KERNBASE, %esi
  677         movl    %esi, R(vm86paddr)
  678 
  679 /*
  680  * Enable PSE and PGE.
  681  */
  682 #ifndef DISABLE_PSE
  683         testl   $CPUID_PSE, R(cpu_feature)
  684         jz      1f
  685         movl    $PG_PS, R(pseflag)
  686         movl    %cr4, %eax
  687         orl     $CR4_PSE, %eax
  688         movl    %eax, %cr4
  689 1:
  690 #endif
  691 #ifndef DISABLE_PG_G
  692         testl   $CPUID_PGE, R(cpu_feature)
  693         jz      2f
  694         movl    $PG_G, R(pgeflag)
  695         movl    %cr4, %eax
  696         orl     $CR4_PGE, %eax
  697         movl    %eax, %cr4
  698 2:
  699 #endif
  700 
  701 /*
  702  * Initialize page table pages mapping physical address zero through the
  703  * end of the kernel.  All of the page table entries allow read and write
  704  * access.  Write access to the first physical page is required by bios32
  705  * calls, and write access to the first 1 MB of physical memory is required
  706  * by ACPI for implementing suspend and resume.  We do this even
  707  * if we've enabled PSE above, we'll just switch the corresponding kernel
  708  * PDEs before we turn on paging.
  709  *
  710  * XXX: We waste some pages here in the PSE case!
  711  */
  712         xorl    %eax, %eax
  713         movl    R(KERNend),%ecx
  714         shrl    $PAGE_SHIFT,%ecx
  715         fillkptphys($PG_RW)
  716 
  717 /* Map page table pages. */
  718         movl    R(KPTphys),%eax
  719         movl    $NKPT,%ecx
  720         fillkptphys($PG_RW)
  721 
  722 /* Map page directory. */
  723 #if defined(PAE) || defined(PAE_TABLES)
  724         movl    R(IdlePDPT), %eax
  725         movl    $1, %ecx
  726         fillkptphys($PG_RW)
  727 #endif
  728 
  729         movl    R(IdlePTD), %eax
  730         movl    $NPGPTD, %ecx
  731         fillkptphys($PG_RW)
  732 
  733 /* Map proc0's KSTACK in the physical way ... */
  734         movl    R(p0kpa), %eax
  735         movl    $(TD0_KSTACK_PAGES), %ecx
  736         fillkptphys($PG_RW)
  737 
  738 /* Map ISA hole */
  739         movl    $ISA_HOLE_START, %eax
  740         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  741         fillkptphys($PG_RW)
  742 
  743 /* Map space for the vm86 region */
  744         movl    R(vm86phystk), %eax
  745         movl    $4, %ecx
  746         fillkptphys($PG_RW)
  747 
  748 /* Map page 0 into the vm86 page table */
  749         movl    $0, %eax
  750         movl    $0, %ebx
  751         movl    $1, %ecx
  752         fillkpt(R(vm86pa), $PG_RW|PG_U)
  753 
  754 /* ...likewise for the ISA hole */
  755         movl    $ISA_HOLE_START, %eax
  756         movl    $ISA_HOLE_START>>PAGE_SHIFT, %ebx
  757         movl    $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
  758         fillkpt(R(vm86pa), $PG_RW|PG_U)
  759 
  760 /*
  761  * Create an identity mapping for low physical memory, including the kernel.
  762  * The part of this mapping that covers the first 1 MB of physical memory
  763  * becomes a permanent part of the kernel's address space.  The rest of this
  764  * mapping is destroyed in pmap_bootstrap().  Ordinarily, the same page table
  765  * pages are shared by the identity mapping and the kernel's native mapping.
  766  * However, the permanent identity mapping cannot contain PG_G mappings.
  767  * Thus, if the kernel is loaded within the permanent identity mapping, that
  768  * page table page must be duplicated and not shared.
  769  *
  770  * N.B. Due to errata concerning large pages and physical address zero,
  771  * a PG_PS mapping is not used.
  772  */
  773         movl    R(KPTphys), %eax
  774         xorl    %ebx, %ebx
  775         movl    $NKPT, %ecx
  776         fillkpt(R(IdlePTD), $PG_RW)
  777 #if KERNLOAD < (1 << PDRSHIFT)
  778         testl   $PG_G, R(pgeflag)
  779         jz      1f
  780         ALLOCPAGES(1)
  781         movl    %esi, %edi
  782         movl    R(IdlePTD), %eax
  783         movl    (%eax), %esi
  784         movl    %edi, (%eax)
  785         movl    $PAGE_SIZE, %ecx
  786         cld
  787         rep
  788         movsb
  789 1:      
  790 #endif
  791 
  792 /*
  793  * For the non-PSE case, install PDEs for PTs covering the KVA.
  794  * For the PSE case, do the same, but clobber the ones corresponding
  795  * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
  796  * PDEs immediately after.
  797  */
  798         movl    R(KPTphys), %eax
  799         movl    $KPTDI, %ebx
  800         movl    $NKPT, %ecx
  801         fillkpt(R(IdlePTD), $PG_RW)
  802         cmpl    $0,R(pseflag)
  803         je      done_pde
  804 
  805         movl    R(KERNend), %ecx
  806         movl    $KERNLOAD, %eax
  807         subl    %eax, %ecx
  808         shrl    $PDRSHIFT, %ecx
  809         movl    $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
  810         shll    $PDESHIFT, %ebx
  811         addl    R(IdlePTD), %ebx
  812         orl     $(PG_V|PG_RW|PG_PS), %eax
  813 1:      movl    %eax, (%ebx)
  814         addl    $(1 << PDRSHIFT), %eax
  815         addl    $PDESIZE, %ebx
  816         loop    1b
  817 
  818 done_pde:
  819 /* install a pde recursively mapping page directory as a page table */
  820         movl    R(IdlePTD), %eax
  821         movl    $PTDPTDI, %ebx
  822         movl    $NPGPTD,%ecx
  823         fillkpt(R(IdlePTD), $PG_RW)
  824 
  825 #if defined(PAE) || defined(PAE_TABLES)
  826         movl    R(IdlePTD), %eax
  827         xorl    %ebx, %ebx
  828         movl    $NPGPTD, %ecx
  829         fillkpt(R(IdlePDPT), $0x0)
  830 #endif
  831 
  832         ret
  833 
  834 #ifdef XENHVM
  835 /* Xen Hypercall page */
  836         .text
  837 .p2align PAGE_SHIFT, 0x90       /* Hypercall_page needs to be PAGE aligned */
  838 
  839 NON_GPROF_ENTRY(hypercall_page)
  840         .skip   0x1000, 0x90    /* Fill with "nop"s */
  841 #endif

Cache object: 419094b5097fa14d8aee6f29fdfbf16c


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.