1 /*-
2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/8.0/sys/i386/acpica/acpi_wakecode.S 159478 2006-06-10 08:20:03Z njl $
28 */
29
30 #define LOCORE
31
32 #include <machine/asmacros.h>
33 #include <machine/specialreg.h>
34
35 #include "assym.s"
36
37 /*
38 * Resume entry point. The BIOS enters here in real mode after POST with
39 * CS set to the page where we stored this code. It should configure the
40 * segment registers with a flat 4 GB address space and EFLAGS.IF = 0.
41 * Depending on the previous sleep state, we may need to initialize more
42 * of the system (i.e., S3 suspend-to-RAM vs. S4 suspend-to-disk).
43 */
44 .align 4
45 .code16
46 wakeup_16:
47 nop
48 cli
49 cld
50
51 /*
52 * Set up segment registers for real mode, a small stack for
53 * any calls we make, and clear any flags.
54 */
55 movw %cs,%ax
56 movw %ax,%ds
57 movw %ax,%ss
58 movw $PAGE_SIZE,%sp
59 pushl $0
60 popfl
61
62 /* To debug resume hangs, beep the speaker if the user requested. */
63 cmpl $1,resume_beep
64 jne nobeep
65 movb $0xc0,%al
66 outb %al,$0x42
67 movb $0x04,%al
68 outb %al,$0x42
69 inb $0x61,%al
70 orb $0x3,%al
71 outb %al,$0x61
72 nobeep:
73
74 /* Re-initialize video BIOS if the reset_video tunable is set. */
75 cmpl $1,reset_video
76 jne nobiosreset
77 lcall $0xc000,$3
78
79 /*
80 * Set up segment registers for real mode again in case the
81 * previous BIOS call clobbers them.
82 */
83 movw %cs,%ax
84 movw %ax,%ds
85 movw %ax,%ss
86 nobiosreset:
87
88 /* Load GDT for real mode. Use 32 bit prefix for addresses >16 MB. */
89 lgdtl physical_gdt
90
91 /* Restore CR2, CR3 and CR4 */
92 movl previous_cr2,%eax
93 movl %eax,%cr2
94 movl previous_cr3,%eax
95 movl %eax,%cr3
96 movl previous_cr4,%eax
97 movl %eax,%cr4
98
99 /* Transfer some values to protected mode with an inline stack */
100 #define NVALUES 9
101 #define TRANSFER_STACK32(val, idx) \
102 movl val,%eax; \
103 movl %eax,wakeup_32stack+(idx+1)+(idx*4)
104
105 TRANSFER_STACK32(previous_ss, (NVALUES - 9))
106 TRANSFER_STACK32(previous_fs, (NVALUES - 8))
107 TRANSFER_STACK32(previous_ds, (NVALUES - 7))
108 TRANSFER_STACK32(physical_gdt+2, (NVALUES - 6))
109 TRANSFER_STACK32(where_to_recover, (NVALUES - 5))
110 TRANSFER_STACK32(previous_idt+2, (NVALUES - 4))
111 TRANSFER_STACK32(previous_ldt, (NVALUES - 3))
112 TRANSFER_STACK32(previous_gdt+2, (NVALUES - 2))
113 TRANSFER_STACK32(previous_tr, (NVALUES - 1))
114 TRANSFER_STACK32(previous_cr0, (NVALUES - 0))
115
116 mov physical_esp,%esi /* to be used in 32bit code */
117
118 /* Enable protected mode */
119 movl %cr0,%eax
120 orl $(CR0_PE),%eax
121 movl %eax,%cr0
122
123 wakeup_sw32:
124 /* Switch to protected mode by intersegmental jump */
125 ljmpl $KCSEL,$0x12345678 /* Code location, to be replaced */
126
127 /*
128 * Now switched to protected mode without paging enabled.
129 * %esi: KERNEL stack pointer (physical address)
130 */
131 .code32
132 wakeup_32:
133 nop
134
135 /* Set up segment registers for protected mode */
136 movw $KDSEL,%ax /* KDSEL to segment registers */
137 movw %ax,%ds
138 movw %ax,%es
139 movw %ax,%gs
140 movw %ax,%ss
141 movw $KPSEL,%ax /* KPSEL to %fs */
142 movw %ax,%fs
143 movl %esi,%esp /* physical address stack pointer */
144
145 wakeup_32stack:
146 /* Operands are overwritten in 16 bit code by TRANSFER_STACK32 macro */
147 pushl $0xabcdef09 /* ss + dummy */
148 pushl $0xabcdef08 /* fs + gs */
149 pushl $0xabcdef07 /* ds + es */
150 pushl $0xabcdef06 /* gdt:base (physical address) */
151 pushl $0xabcdef05 /* recover address */
152 pushl $0xabcdef04 /* idt:base */
153 pushl $0xabcdef03 /* ldt + idt:limit */
154 pushl $0xabcdef02 /* gdt:base */
155 pushl $0xabcdef01 /* TR + gdt:limit */
156 pushl $0xabcdef00 /* CR0 */
157
158 movl %esp,%ebp
159 #define CR0_REGISTER 0(%ebp)
160 #define TASK_REGISTER 4(%ebp)
161 #define PREVIOUS_GDT 6(%ebp)
162 #define PREVIOUS_LDT 12(%ebp)
163 #define PREVIOUS_IDT 14(%ebp)
164 #define RECOVER_ADDR 20(%ebp)
165 #define PHYSICAL_GDT_BASE 24(%ebp)
166 #define PREVIOUS_DS 28(%ebp)
167 #define PREVIOUS_ES 30(%ebp)
168 #define PREVIOUS_FS 32(%ebp)
169 #define PREVIOUS_GS 34(%ebp)
170 #define PREVIOUS_SS 36(%ebp)
171
172 /* Fixup TSS type field */
173 #define TSS_TYPEFIX_MASK 0xf9
174 xorl %esi,%esi
175 movl PHYSICAL_GDT_BASE,%ebx
176 movw TASK_REGISTER,%si
177 leal (%ebx,%esi),%eax /* get TSS segment descriptor */
178 andb $TSS_TYPEFIX_MASK,5(%eax)
179
180 /* Prepare to return to sleep/wakeup code point */
181 lgdtl PREVIOUS_GDT
182 lidtl PREVIOUS_IDT
183
184 /* Pack values from the GDT to be loaded into segment registers. */
185 movl PREVIOUS_DS,%ebx
186 movl PREVIOUS_FS,%ecx
187 movl PREVIOUS_SS,%edx
188 movw TASK_REGISTER,%si
189 shll $16,%esi
190 movw PREVIOUS_LDT,%si
191 movl RECOVER_ADDR,%edi
192
193 /* Enable paging and etc. */
194 movl CR0_REGISTER,%eax
195 movl %eax,%cr0
196
197 /* Flush the prefetch queue */
198 jmp 1f
199 1: jmp 1f
200 1:
201
202 /*
203 * Now we are in kernel virtual memory addressing with the following
204 * original register values:
205 * %ebx: ds + es
206 * %ecx: fs + gs
207 * %edx: ss + dummy
208 * %esi: LDTR + TR
209 * %edi: recover address
210 * We'll load these back into the segment registers now.
211 */
212 nop
213
214 movl %esi,%eax /* LDTR + TR */
215 lldt %ax /* load LDT register */
216 shrl $16,%eax
217 ltr %ax /* load task register */
218
219 /* Restore segment registers */
220 movl %ebx,%eax /* ds + es */
221 movw %ax,%ds
222 shrl $16,%eax
223 movw %ax,%es
224 movl %ecx,%eax /* fs + gs */
225 movw %ax,%fs
226 shrl $16,%eax
227 movw %ax,%gs
228 movl %edx,%eax /* ss */
229 movw %ax,%ss
230
231 /* Jump to acpi_restorecpu() */
232 jmp *%edi
233
234 /* used in real mode */
235 physical_gdt: .word 0
236 .long 0
237 physical_esp: .long 0
238 previous_cr2: .long 0
239 previous_cr3: .long 0
240 previous_cr4: .long 0
241 resume_beep: .long 0
242 reset_video: .long 0
243
244 /*
245 * Transfer from real mode to protected mode. The order of these variables
246 * is very important, DO NOT INSERT OR CHANGE unless you know why.
247 */
248 previous_cr0: .long 0
249 previous_tr: .word 0
250 previous_gdt: .word 0
251 .long 0
252 previous_ldt: .word 0
253 previous_idt: .word 0
254 .long 0
255 where_to_recover: .long 0
256 previous_ds: .word 0
257 previous_es: .word 0
258 previous_fs: .word 0
259 previous_gs: .word 0
260 previous_ss: .word 0
261 dummy: .word 0
Cache object: 175816932ff5b2ad7d963fe92032ce31
|