FreeBSD/Linux Kernel Cross Reference
sys/i386/i386/locore.s
1 /*-
2 * Copyright (c) 1990 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * William Jolitz.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 * from: @(#)locore.s 7.3 (Berkeley) 5/13/91
33 * $FreeBSD: releng/11.2/sys/i386/i386/locore.s 329199 2018-02-13 12:54:03Z kib $
34 *
35 * originally from: locore.s, by William F. Jolitz
36 *
37 * Substantially rewritten by David Greenman, Rod Grimes,
38 * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp
39 * and many others.
40 */
41
42 #include "opt_bootp.h"
43 #include "opt_compat.h"
44 #include "opt_nfsroot.h"
45 #include "opt_pmap.h"
46
47 #include <sys/reboot.h>
48
49 #include <machine/asmacros.h>
50 #include <machine/cputypes.h>
51 #include <machine/psl.h>
52 #include <machine/pmap.h>
53 #include <machine/specialreg.h>
54
55 #include "assym.s"
56
57 /*
58 * XXX
59 *
60 * Note: This version greatly munged to avoid various assembler errors
61 * that may be fixed in newer versions of gas. Perhaps newer versions
62 * will have more pleasant appearance.
63 */
64
65 /*
66 * PTmap is recursive pagemap at top of virtual address space.
67 * Within PTmap, the page directory can be found (third indirection).
68 */
69 .globl PTmap,PTD,PTDpde
70 .set PTmap,(PTDPTDI << PDRSHIFT)
71 .set PTD,PTmap + (PTDPTDI * PAGE_SIZE)
72 .set PTDpde,PTD + (PTDPTDI * PDESIZE)
73
74 /*
75 * Compiled KERNBASE location and the kernel load address
76 */
77 .globl kernbase
78 .set kernbase,KERNBASE
79 .globl kernload
80 .set kernload,KERNLOAD
81
82 /*
83 * Globals
84 */
85 .data
86 ALIGN_DATA /* just to be sure */
87
88 .space 0x2000 /* space for tmpstk - temporary stack */
89 tmpstk:
90
91 .globl bootinfo
92 bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
93
94 .globl KERNend
95 KERNend: .long 0 /* phys addr end of kernel (just after bss) */
96 physfree: .long 0 /* phys addr of next free page */
97
98 .globl IdlePTD
99 IdlePTD: .long 0 /* phys addr of kernel PTD */
100
101 #if defined(PAE) || defined(PAE_TABLES)
102 .globl IdlePDPT
103 IdlePDPT: .long 0 /* phys addr of kernel PDPT */
104 #endif
105
106 .globl KPTmap
107 KPTmap: .long 0 /* address of kernel page tables */
108
109 .globl KPTphys
110 KPTphys: .long 0 /* phys addr of kernel page tables */
111
112 .globl proc0kstack
113 proc0kstack: .long 0 /* address of proc 0 kstack space */
114 p0kpa: .long 0 /* phys addr of proc0's STACK */
115
116 vm86phystk: .long 0 /* PA of vm86/bios stack */
117
118 .globl vm86paddr, vm86pa
119 vm86paddr: .long 0 /* address of vm86 region */
120 vm86pa: .long 0 /* phys addr of vm86 region */
121
122 #ifdef PC98
123 .globl pc98_system_parameter
124 pc98_system_parameter:
125 .space 0x240
126 #endif
127
128 /**********************************************************************
129 *
130 * Some handy macros
131 *
132 */
133
134 #define R(foo) ((foo)-KERNBASE)
135
136 #define ALLOCPAGES(foo) \
137 movl R(physfree), %esi ; \
138 movl $((foo)*PAGE_SIZE), %eax ; \
139 addl %esi, %eax ; \
140 movl %eax, R(physfree) ; \
141 movl %esi, %edi ; \
142 movl $((foo)*PAGE_SIZE),%ecx ; \
143 xorl %eax,%eax ; \
144 cld ; \
145 rep ; \
146 stosb
147
148 /*
149 * fillkpt
150 * eax = page frame address
151 * ebx = index into page table
152 * ecx = how many pages to map
153 * base = base address of page dir/table
154 * prot = protection bits
155 */
156 #define fillkpt(base, prot) \
157 shll $PTESHIFT,%ebx ; \
158 addl base,%ebx ; \
159 orl $PG_V,%eax ; \
160 orl prot,%eax ; \
161 1: movl %eax,(%ebx) ; \
162 addl $PAGE_SIZE,%eax ; /* increment physical address */ \
163 addl $PTESIZE,%ebx ; /* next pte */ \
164 loop 1b
165
166 /*
167 * fillkptphys(prot)
168 * eax = physical address
169 * ecx = how many pages to map
170 * prot = protection bits
171 */
172 #define fillkptphys(prot) \
173 movl %eax, %ebx ; \
174 shrl $PAGE_SHIFT, %ebx ; \
175 fillkpt(R(KPTphys), prot)
176
177 .text
178 /**********************************************************************
179 *
180 * This is where the bootblocks start us, set the ball rolling...
181 *
182 */
183 NON_GPROF_ENTRY(btext)
184
185 #ifdef PC98
186 /* save SYSTEM PARAMETER for resume (NS/T or other) */
187 movl $0xa1400,%esi
188 movl $R(pc98_system_parameter),%edi
189 movl $0x0240,%ecx
190 cld
191 rep
192 movsb
193 #else /* IBM-PC */
194 /* Tell the bios to warmboot next time */
195 movw $0x1234,0x472
196 #endif /* PC98 */
197
198 /* Set up a real frame in case the double return in newboot is executed. */
199 pushl %ebp
200 movl %esp, %ebp
201
202 /* Don't trust what the BIOS gives for eflags. */
203 pushl $PSL_KERNEL
204 popfl
205
206 /*
207 * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap
208 * to set %cs, %ds, %es and %ss.
209 */
210 mov %ds, %ax
211 mov %ax, %fs
212 mov %ax, %gs
213
214 /*
215 * Clear the bss. Not all boot programs do it, and it is our job anyway.
216 *
217 * XXX we don't check that there is memory for our bss and page tables
218 * before using it.
219 *
220 * Note: we must be careful to not overwrite an active gdt or idt. They
221 * inactive from now until we switch to new ones, since we don't load any
222 * more segment registers or permit interrupts until after the switch.
223 */
224 movl $R(end),%ecx
225 movl $R(edata),%edi
226 subl %edi,%ecx
227 xorl %eax,%eax
228 cld
229 rep
230 stosb
231
232 call recover_bootinfo
233
234 /* Get onto a stack that we can trust. */
235 /*
236 * XXX this step is delayed in case recover_bootinfo needs to return via
237 * the old stack, but it need not be, since recover_bootinfo actually
238 * returns via the old frame.
239 */
240 movl $R(tmpstk),%esp
241
242 #ifdef PC98
243 /* pc98_machine_type & M_EPSON_PC98 */
244 testb $0x02,R(pc98_system_parameter)+220
245 jz 3f
246 /* epson_machine_id <= 0x0b */
247 cmpb $0x0b,R(pc98_system_parameter)+224
248 ja 3f
249
250 /* count up memory */
251 movl $0x100000,%eax /* next, talley remaining memory */
252 movl $0xFFF-0x100,%ecx
253 1: movl 0(%eax),%ebx /* save location to check */
254 movl $0xa55a5aa5,0(%eax) /* write test pattern */
255 cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */
256 jne 2f
257 movl %ebx,0(%eax) /* restore memory */
258 addl $PAGE_SIZE,%eax
259 loop 1b
260 2: subl $0x100000,%eax
261 shrl $17,%eax
262 movb %al,R(pc98_system_parameter)+1
263 3:
264
265 movw R(pc98_system_parameter+0x86),%ax
266 movw %ax,R(cpu_id)
267 #endif
268
269 call identify_cpu
270 call create_pagetables
271
272 /*
273 * If the CPU has support for VME, turn it on.
274 */
275 testl $CPUID_VME, R(cpu_feature)
276 jz 1f
277 movl %cr4, %eax
278 orl $CR4_VME, %eax
279 movl %eax, %cr4
280 1:
281
282 /* Now enable paging */
283 #if defined(PAE) || defined(PAE_TABLES)
284 movl R(IdlePDPT), %eax
285 movl %eax, %cr3
286 movl %cr4, %eax
287 orl $CR4_PAE, %eax
288 movl %eax, %cr4
289 #else
290 movl R(IdlePTD), %eax
291 movl %eax,%cr3 /* load ptd addr into mmu */
292 #endif
293 movl %cr0,%eax /* get control word */
294 orl $CR0_PE|CR0_PG,%eax /* enable paging */
295 movl %eax,%cr0 /* and let's page NOW! */
296
297 pushl $begin /* jump to high virtualized address */
298 ret
299
300 /* now running relocated at KERNBASE where the system is linked to run */
301 begin:
302 /* set up bootstrap stack */
303 movl proc0kstack,%eax /* location of in-kernel stack */
304
305 /*
306 * Only use bottom page for init386(). init386() calculates the
307 * PCB + FPU save area size and returns the true top of stack.
308 */
309 leal PAGE_SIZE(%eax),%esp
310
311 xorl %ebp,%ebp /* mark end of frames */
312
313 pushl physfree /* value of first for init386(first) */
314 call init386 /* wire 386 chip for unix operation */
315
316 /*
317 * Clean up the stack in a way that db_numargs() understands, so
318 * that backtraces in ddb don't underrun the stack. Traps for
319 * inaccessible memory are more fatal than usual this early.
320 */
321 addl $4,%esp
322
323 /* Switch to true top of stack. */
324 movl %eax,%esp
325
326 call mi_startup /* autoconfiguration, mountroot etc */
327 /* NOTREACHED */
328 addl $0,%esp /* for db_numargs() again */
329
330 /**********************************************************************
331 *
332 * Recover the bootinfo passed to us from the boot program
333 *
334 */
335 recover_bootinfo:
336 /*
337 * This code is called in different ways depending on what loaded
338 * and started the kernel. This is used to detect how we get the
339 * arguments from the other code and what we do with them.
340 *
341 * Old disk boot blocks:
342 * (*btext)(howto, bootdev, cyloffset, esym);
343 * [return address == 0, and can NOT be returned to]
344 * [cyloffset was not supported by the FreeBSD boot code
345 * and always passed in as 0]
346 * [esym is also known as total in the boot code, and
347 * was never properly supported by the FreeBSD boot code]
348 *
349 * Old diskless netboot code:
350 * (*btext)(0,0,0,0,&nfsdiskless,0,0,0);
351 * [return address != 0, and can NOT be returned to]
352 * If we are being booted by this code it will NOT work,
353 * so we are just going to halt if we find this case.
354 *
355 * New uniform boot code:
356 * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo)
357 * [return address != 0, and can be returned to]
358 *
359 * There may seem to be a lot of wasted arguments in here, but
360 * that is so the newer boot code can still load very old kernels
361 * and old boot code can load new kernels.
362 */
363
364 /*
365 * The old style disk boot blocks fake a frame on the stack and
366 * did an lret to get here. The frame on the stack has a return
367 * address of 0.
368 */
369 cmpl $0,4(%ebp)
370 je olddiskboot
371
372 /*
373 * We have some form of return address, so this is either the
374 * old diskless netboot code, or the new uniform code. That can
375 * be detected by looking at the 5th argument, if it is 0
376 * we are being booted by the new uniform boot code.
377 */
378 cmpl $0,24(%ebp)
379 je newboot
380
381 /*
382 * Seems we have been loaded by the old diskless boot code, we
383 * don't stand a chance of running as the diskless structure
384 * changed considerably between the two, so just halt.
385 */
386 hlt
387
388 /*
389 * We have been loaded by the new uniform boot code.
390 * Let's check the bootinfo version, and if we do not understand
391 * it we return to the loader with a status of 1 to indicate this error
392 */
393 newboot:
394 movl 28(%ebp),%ebx /* &bootinfo.version */
395 movl BI_VERSION(%ebx),%eax
396 cmpl $1,%eax /* We only understand version 1 */
397 je 1f
398 movl $1,%eax /* Return status */
399 leave
400 /*
401 * XXX this returns to our caller's caller (as is required) since
402 * we didn't set up a frame and our caller did.
403 */
404 ret
405
406 1:
407 /*
408 * If we have a kernelname copy it in
409 */
410 movl BI_KERNELNAME(%ebx),%esi
411 cmpl $0,%esi
412 je 2f /* No kernelname */
413 movl $MAXPATHLEN,%ecx /* Brute force!!! */
414 movl $R(kernelname),%edi
415 cmpb $'/',(%esi) /* Make sure it starts with a slash */
416 je 1f
417 movb $'/',(%edi)
418 incl %edi
419 decl %ecx
420 1:
421 cld
422 rep
423 movsb
424
425 2:
426 /*
427 * Determine the size of the boot loader's copy of the bootinfo
428 * struct. This is impossible to do properly because old versions
429 * of the struct don't contain a size field and there are 2 old
430 * versions with the same version number.
431 */
432 movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */
433 testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */
434 je got_bi_size /* no, sizeless version */
435 movl BI_SIZE(%ebx),%ecx
436 got_bi_size:
437
438 /*
439 * Copy the common part of the bootinfo struct
440 */
441 movl %ebx,%esi
442 movl $R(bootinfo),%edi
443 cmpl $BOOTINFO_SIZE,%ecx
444 jbe got_common_bi_size
445 movl $BOOTINFO_SIZE,%ecx
446 got_common_bi_size:
447 cld
448 rep
449 movsb
450
451 #ifdef NFS_ROOT
452 #ifndef BOOTP_NFSV3
453 /*
454 * If we have a nfs_diskless structure copy it in
455 */
456 movl BI_NFS_DISKLESS(%ebx),%esi
457 cmpl $0,%esi
458 je olddiskboot
459 movl $R(nfs_diskless),%edi
460 movl $NFSDISKLESS_SIZE,%ecx
461 cld
462 rep
463 movsb
464 movl $R(nfs_diskless_valid),%edi
465 movl $1,(%edi)
466 #endif
467 #endif
468
469 /*
470 * The old style disk boot.
471 * (*btext)(howto, bootdev, cyloffset, esym);
472 * Note that the newer boot code just falls into here to pick
473 * up howto and bootdev, cyloffset and esym are no longer used
474 */
475 olddiskboot:
476 movl 8(%ebp),%eax
477 movl %eax,R(boothowto)
478 movl 12(%ebp),%eax
479 movl %eax,R(bootdev)
480
481 ret
482
483
484 /**********************************************************************
485 *
486 * Identify the CPU and initialize anything special about it
487 *
488 */
489 identify_cpu:
490
491 /* Try to toggle alignment check flag; does not exist on 386. */
492 pushfl
493 popl %eax
494 movl %eax,%ecx
495 orl $PSL_AC,%eax
496 pushl %eax
497 popfl
498 pushfl
499 popl %eax
500 xorl %ecx,%eax
501 andl $PSL_AC,%eax
502 pushl %ecx
503 popfl
504
505 testl %eax,%eax
506 jnz try486
507
508 /* NexGen CPU does not have aligment check flag. */
509 pushfl
510 movl $0x5555, %eax
511 xorl %edx, %edx
512 movl $2, %ecx
513 clc
514 divl %ecx
515 jz trynexgen
516 popfl
517 movl $CPU_386,R(cpu)
518 jmp 3f
519
520 trynexgen:
521 popfl
522 movl $CPU_NX586,R(cpu)
523 movl $0x4778654e,R(cpu_vendor) # store vendor string
524 movl $0x72446e65,R(cpu_vendor+4)
525 movl $0x6e657669,R(cpu_vendor+8)
526 movl $0,R(cpu_vendor+12)
527 jmp 3f
528
529 try486: /* Try to toggle identification flag; does not exist on early 486s. */
530 pushfl
531 popl %eax
532 movl %eax,%ecx
533 xorl $PSL_ID,%eax
534 pushl %eax
535 popfl
536 pushfl
537 popl %eax
538 xorl %ecx,%eax
539 andl $PSL_ID,%eax
540 pushl %ecx
541 popfl
542
543 testl %eax,%eax
544 jnz trycpuid
545 movl $CPU_486,R(cpu)
546
547 /*
548 * Check Cyrix CPU
549 * Cyrix CPUs do not change the undefined flags following
550 * execution of the divide instruction which divides 5 by 2.
551 *
552 * Note: CPUID is enabled on M2, so it passes another way.
553 */
554 pushfl
555 movl $0x5555, %eax
556 xorl %edx, %edx
557 movl $2, %ecx
558 clc
559 divl %ecx
560 jnc trycyrix
561 popfl
562 jmp 3f /* You may use Intel CPU. */
563
564 trycyrix:
565 popfl
566 /*
567 * IBM Bluelighting CPU also doesn't change the undefined flags.
568 * Because IBM doesn't disclose the information for Bluelighting
569 * CPU, we couldn't distinguish it from Cyrix's (including IBM
570 * brand of Cyrix CPUs).
571 */
572 movl $0x69727943,R(cpu_vendor) # store vendor string
573 movl $0x736e4978,R(cpu_vendor+4)
574 movl $0x64616574,R(cpu_vendor+8)
575 jmp 3f
576
577 trycpuid: /* Use the `cpuid' instruction. */
578 xorl %eax,%eax
579 cpuid # cpuid 0
580 movl %eax,R(cpu_high) # highest capability
581 movl %ebx,R(cpu_vendor) # store vendor string
582 movl %edx,R(cpu_vendor+4)
583 movl %ecx,R(cpu_vendor+8)
584 movb $0,R(cpu_vendor+12)
585
586 movl $1,%eax
587 cpuid # cpuid 1
588 movl %eax,R(cpu_id) # store cpu_id
589 movl %ebx,R(cpu_procinfo) # store cpu_procinfo
590 movl %edx,R(cpu_feature) # store cpu_feature
591 movl %ecx,R(cpu_feature2) # store cpu_feature2
592 rorl $8,%eax # extract family type
593 andl $15,%eax
594 cmpl $5,%eax
595 jae 1f
596
597 /* less than Pentium; must be 486 */
598 movl $CPU_486,R(cpu)
599 jmp 3f
600 1:
601 /* a Pentium? */
602 cmpl $5,%eax
603 jne 2f
604 movl $CPU_586,R(cpu)
605 jmp 3f
606 2:
607 /* Greater than Pentium...call it a Pentium Pro */
608 movl $CPU_686,R(cpu)
609 3:
610 ret
611
612
613 /**********************************************************************
614 *
615 * Create the first page directory and its page tables.
616 *
617 */
618
619 create_pagetables:
620
621 /* Find end of kernel image (rounded up to a page boundary). */
622 movl $R(_end),%esi
623
624 /* Include symbols, if any. */
625 movl R(bootinfo+BI_ESYMTAB),%edi
626 testl %edi,%edi
627 je over_symalloc
628 movl %edi,%esi
629 movl $KERNBASE,%edi
630 addl %edi,R(bootinfo+BI_SYMTAB)
631 addl %edi,R(bootinfo+BI_ESYMTAB)
632 over_symalloc:
633
634 /* If we are told where the end of the kernel space is, believe it. */
635 movl R(bootinfo+BI_KERNEND),%edi
636 testl %edi,%edi
637 je no_kernend
638 movl %edi,%esi
639 no_kernend:
640
641 addl $PDRMASK,%esi /* Play conservative for now, and */
642 andl $~PDRMASK,%esi /* ... wrap to next 4M. */
643 movl %esi,R(KERNend) /* save end of kernel */
644 movl %esi,R(physfree) /* next free page is at end of kernel */
645
646 /* Allocate Kernel Page Tables */
647 ALLOCPAGES(NKPT)
648 movl %esi,R(KPTphys)
649 addl $(KERNBASE-(KPTDI<<(PDRSHIFT-PAGE_SHIFT+PTESHIFT))),%esi
650 movl %esi,R(KPTmap)
651
652 /* Allocate Page Table Directory */
653 #if defined(PAE) || defined(PAE_TABLES)
654 /* XXX only need 32 bytes (easier for now) */
655 ALLOCPAGES(1)
656 movl %esi,R(IdlePDPT)
657 #endif
658 ALLOCPAGES(NPGPTD)
659 movl %esi,R(IdlePTD)
660
661 /* Allocate KSTACK */
662 ALLOCPAGES(TD0_KSTACK_PAGES)
663 movl %esi,R(p0kpa)
664 addl $KERNBASE, %esi
665 movl %esi, R(proc0kstack)
666
667 ALLOCPAGES(1) /* vm86/bios stack */
668 movl %esi,R(vm86phystk)
669
670 ALLOCPAGES(3) /* pgtable + ext + IOPAGES */
671 movl %esi,R(vm86pa)
672 addl $KERNBASE, %esi
673 movl %esi, R(vm86paddr)
674
675 /*
676 * Enable PSE and PGE.
677 */
678 #ifndef DISABLE_PSE
679 testl $CPUID_PSE, R(cpu_feature)
680 jz 1f
681 movl $PG_PS, R(pseflag)
682 movl %cr4, %eax
683 orl $CR4_PSE, %eax
684 movl %eax, %cr4
685 1:
686 #endif
687 #ifndef DISABLE_PG_G
688 testl $CPUID_PGE, R(cpu_feature)
689 jz 2f
690 movl $PG_G, R(pgeflag)
691 movl %cr4, %eax
692 orl $CR4_PGE, %eax
693 movl %eax, %cr4
694 2:
695 #endif
696
697 /*
698 * Initialize page table pages mapping physical address zero through the
699 * end of the kernel. All of the page table entries allow read and write
700 * access. Write access to the first physical page is required by bios32
701 * calls, and write access to the first 1 MB of physical memory is required
702 * by ACPI for implementing suspend and resume. We do this even
703 * if we've enabled PSE above, we'll just switch the corresponding kernel
704 * PDEs before we turn on paging.
705 *
706 * XXX: We waste some pages here in the PSE case!
707 */
708 xorl %eax, %eax
709 movl R(KERNend),%ecx
710 shrl $PAGE_SHIFT,%ecx
711 fillkptphys($PG_RW)
712
713 /* Map page table pages. */
714 movl R(KPTphys),%eax
715 movl $NKPT,%ecx
716 fillkptphys($PG_RW)
717
718 /* Map page directory. */
719 #if defined(PAE) || defined(PAE_TABLES)
720 movl R(IdlePDPT), %eax
721 movl $1, %ecx
722 fillkptphys($PG_RW)
723 #endif
724
725 movl R(IdlePTD), %eax
726 movl $NPGPTD, %ecx
727 fillkptphys($PG_RW)
728
729 /* Map proc0's KSTACK in the physical way ... */
730 movl R(p0kpa), %eax
731 movl $(TD0_KSTACK_PAGES), %ecx
732 fillkptphys($PG_RW)
733
734 /* Map ISA hole */
735 movl $ISA_HOLE_START, %eax
736 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
737 fillkptphys($PG_RW)
738
739 /* Map space for the vm86 region */
740 movl R(vm86phystk), %eax
741 movl $4, %ecx
742 fillkptphys($PG_RW)
743
744 /* Map page 0 into the vm86 page table */
745 movl $0, %eax
746 movl $0, %ebx
747 movl $1, %ecx
748 fillkpt(R(vm86pa), $PG_RW|PG_U)
749
750 /* ...likewise for the ISA hole */
751 movl $ISA_HOLE_START, %eax
752 movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
753 movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
754 fillkpt(R(vm86pa), $PG_RW|PG_U)
755
756 /*
757 * Create an identity mapping for low physical memory, including the kernel.
758 * The part of this mapping that covers the first 1 MB of physical memory
759 * becomes a permanent part of the kernel's address space. The rest of this
760 * mapping is destroyed in pmap_bootstrap(). Ordinarily, the same page table
761 * pages are shared by the identity mapping and the kernel's native mapping.
762 * However, the permanent identity mapping cannot contain PG_G mappings.
763 * Thus, if the kernel is loaded within the permanent identity mapping, that
764 * page table page must be duplicated and not shared.
765 *
766 * N.B. Due to errata concerning large pages and physical address zero,
767 * a PG_PS mapping is not used.
768 */
769 movl R(KPTphys), %eax
770 xorl %ebx, %ebx
771 movl $NKPT, %ecx
772 fillkpt(R(IdlePTD), $PG_RW)
773 #if KERNLOAD < (1 << PDRSHIFT)
774 testl $PG_G, R(pgeflag)
775 jz 1f
776 ALLOCPAGES(1)
777 movl %esi, %edi
778 movl R(IdlePTD), %eax
779 movl (%eax), %esi
780 movl %edi, (%eax)
781 movl $PAGE_SIZE, %ecx
782 cld
783 rep
784 movsb
785 1:
786 #endif
787
788 /*
789 * For the non-PSE case, install PDEs for PTs covering the KVA.
790 * For the PSE case, do the same, but clobber the ones corresponding
791 * to the kernel (from btext to KERNend) with 4M (2M for PAE) ('PS')
792 * PDEs immediately after.
793 */
794 movl R(KPTphys), %eax
795 movl $KPTDI, %ebx
796 movl $NKPT, %ecx
797 fillkpt(R(IdlePTD), $PG_RW)
798 cmpl $0,R(pseflag)
799 je done_pde
800
801 movl R(KERNend), %ecx
802 movl $KERNLOAD, %eax
803 subl %eax, %ecx
804 shrl $PDRSHIFT, %ecx
805 movl $(KPTDI+(KERNLOAD/(1 << PDRSHIFT))), %ebx
806 shll $PDESHIFT, %ebx
807 addl R(IdlePTD), %ebx
808 orl $(PG_V|PG_RW|PG_PS), %eax
809 1: movl %eax, (%ebx)
810 addl $(1 << PDRSHIFT), %eax
811 addl $PDESIZE, %ebx
812 loop 1b
813
814 done_pde:
815 /* install a pde recursively mapping page directory as a page table */
816 movl R(IdlePTD), %eax
817 movl $PTDPTDI, %ebx
818 movl $NPGPTD,%ecx
819 fillkpt(R(IdlePTD), $PG_RW)
820
821 #if defined(PAE) || defined(PAE_TABLES)
822 movl R(IdlePTD), %eax
823 xorl %ebx, %ebx
824 movl $NPGPTD, %ecx
825 fillkpt(R(IdlePDPT), $0x0)
826 #endif
827
828 ret
829
830 #ifdef XENHVM
831 /* Xen Hypercall page */
832 .text
833 .p2align PAGE_SHIFT, 0x90 /* Hypercall_page needs to be PAGE aligned */
834
835 NON_GPROF_ENTRY(hypercall_page)
836 .skip 0x1000, 0x90 /* Fill with "nop"s */
837 #endif
Cache object: 014d2b0693d8de578361ebac8ca821af
|