1 /*-
2 * Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
3 * Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: releng/9.0/sys/i386/acpica/acpi_wakecode.S 215200 2010-11-12 20:55:14Z jkim $
28 */
29
30 #include <machine/asmacros.h>
31 #include <machine/specialreg.h>
32
33 #include "assym.s"
34
35 /*
36 * Resume entry point. The BIOS enters here in real mode after POST with
37 * CS set to the page where we stored this code. It should configure the
38 * segment registers with a flat 4 GB address space and EFLAGS.IF = 0.
39 * Depending on the previous sleep state, we may need to initialize more
40 * of the system (i.e., S3 suspend-to-RAM vs. S4 suspend-to-disk).
41 */
42 .align 4
43 .code16
44 wakeup_16:
45 nop
46 cli
47 cld
48
49 /*
50 * Set up segment registers for real mode, a small stack for
51 * any calls we make, and clear any flags.
52 */
53 movw %cs,%ax
54 movw %ax,%ds
55 movw %ax,%ss
56 movw $PAGE_SIZE,%sp
57 pushl $0
58 popfl
59
60 /* To debug resume hangs, beep the speaker if the user requested. */
61 cmpl $1,resume_beep
62 jne nobeep
63 movb $0xc0,%al
64 outb %al,$0x42
65 movb $0x04,%al
66 outb %al,$0x42
67 inb $0x61,%al
68 orb $0x3,%al
69 outb %al,$0x61
70 nobeep:
71
72 /* Re-initialize video BIOS if the reset_video tunable is set. */
73 cmpl $1,reset_video
74 jne nobiosreset
75 lcall $0xc000,$3
76
77 /*
78 * Set up segment registers for real mode again in case the
79 * previous BIOS call clobbers them.
80 */
81 movw %cs,%ax
82 movw %ax,%ds
83 movw %ax,%ss
84 nobiosreset:
85
86 /* Load GDT for real mode. Use 32 bit prefix for addresses >16 MB. */
87 lgdtl physical_gdt
88
89 /* Restore CR2, CR3 and CR4 */
90 movl previous_cr2,%eax
91 movl %eax,%cr2
92 movl previous_cr3,%eax
93 movl %eax,%cr3
94 movl previous_cr4,%eax
95 movl %eax,%cr4
96
97 /* Transfer some values to protected mode with an inline stack */
98 #define NVALUES 9
99 #define TRANSFER_STACK32(val, idx) \
100 movl val,%eax; \
101 movl %eax,wakeup_32stack+(idx+1)+(idx*4)
102
103 TRANSFER_STACK32(previous_ss, (NVALUES - 9))
104 TRANSFER_STACK32(previous_fs, (NVALUES - 8))
105 TRANSFER_STACK32(previous_ds, (NVALUES - 7))
106 TRANSFER_STACK32(physical_gdt+2, (NVALUES - 6))
107 TRANSFER_STACK32(where_to_recover, (NVALUES - 5))
108 TRANSFER_STACK32(previous_idt+2, (NVALUES - 4))
109 TRANSFER_STACK32(previous_ldt, (NVALUES - 3))
110 TRANSFER_STACK32(previous_gdt+2, (NVALUES - 2))
111 TRANSFER_STACK32(previous_tr, (NVALUES - 1))
112 TRANSFER_STACK32(previous_cr0, (NVALUES - 0))
113
114 mov physical_esp,%esi /* to be used in 32bit code */
115
116 /* Enable protected mode */
117 movl %cr0,%eax
118 orl $(CR0_PE),%eax
119 movl %eax,%cr0
120
121 wakeup_sw32:
122 /* Switch to protected mode by intersegmental jump */
123 ljmpl $KCSEL,$0x12345678 /* Code location, to be replaced */
124
125 /*
126 * Now switched to protected mode without paging enabled.
127 * %esi: KERNEL stack pointer (physical address)
128 */
129 .code32
130 wakeup_32:
131 nop
132
133 /* Set up segment registers for protected mode */
134 movw $KDSEL,%ax /* KDSEL to segment registers */
135 movw %ax,%ds
136 movw %ax,%es
137 movw %ax,%gs
138 movw %ax,%ss
139 movw $KPSEL,%ax /* KPSEL to %fs */
140 movw %ax,%fs
141 movl %esi,%esp /* physical address stack pointer */
142
143 wakeup_32stack:
144 /* Operands are overwritten in 16 bit code by TRANSFER_STACK32 macro */
145 pushl $0xabcdef09 /* ss + dummy */
146 pushl $0xabcdef08 /* fs + gs */
147 pushl $0xabcdef07 /* ds + es */
148 pushl $0xabcdef06 /* gdt:base (physical address) */
149 pushl $0xabcdef05 /* recover address */
150 pushl $0xabcdef04 /* idt:base */
151 pushl $0xabcdef03 /* ldt + idt:limit */
152 pushl $0xabcdef02 /* gdt:base */
153 pushl $0xabcdef01 /* TR + gdt:limit */
154 pushl $0xabcdef00 /* CR0 */
155
156 movl %esp,%ebp
157 #define CR0_REGISTER 0(%ebp)
158 #define TASK_REGISTER 4(%ebp)
159 #define PREVIOUS_GDT 6(%ebp)
160 #define PREVIOUS_LDT 12(%ebp)
161 #define PREVIOUS_IDT 14(%ebp)
162 #define RECOVER_ADDR 20(%ebp)
163 #define PHYSICAL_GDT_BASE 24(%ebp)
164 #define PREVIOUS_DS 28(%ebp)
165 #define PREVIOUS_ES 30(%ebp)
166 #define PREVIOUS_FS 32(%ebp)
167 #define PREVIOUS_GS 34(%ebp)
168 #define PREVIOUS_SS 36(%ebp)
169
170 /* Fixup TSS type field */
171 #define TSS_TYPEFIX_MASK 0xf9
172 xorl %esi,%esi
173 movl PHYSICAL_GDT_BASE,%ebx
174 movw TASK_REGISTER,%si
175 leal (%ebx,%esi),%eax /* get TSS segment descriptor */
176 andb $TSS_TYPEFIX_MASK,5(%eax)
177
178 /* Prepare to return to sleep/wakeup code point */
179 lgdtl PREVIOUS_GDT
180 lidtl PREVIOUS_IDT
181
182 /* Pack values from the GDT to be loaded into segment registers. */
183 movl PREVIOUS_DS,%ebx
184 movl PREVIOUS_FS,%ecx
185 movl PREVIOUS_SS,%edx
186 movw TASK_REGISTER,%si
187 shll $16,%esi
188 movw PREVIOUS_LDT,%si
189 movl RECOVER_ADDR,%edi
190
191 /* Enable paging and etc. */
192 movl CR0_REGISTER,%eax
193 movl %eax,%cr0
194
195 /* Flush the prefetch queue */
196 jmp 1f
197 1: jmp 1f
198 1:
199
200 /*
201 * Now we are in kernel virtual memory addressing with the following
202 * original register values:
203 * %ebx: ds + es
204 * %ecx: fs + gs
205 * %edx: ss + dummy
206 * %esi: LDTR + TR
207 * %edi: recover address
208 * We'll load these back into the segment registers now.
209 */
210 nop
211
212 movl %esi,%eax /* LDTR + TR */
213 lldt %ax /* load LDT register */
214 shrl $16,%eax
215 ltr %ax /* load task register */
216
217 /* Restore segment registers */
218 movl %ebx,%eax /* ds + es */
219 movw %ax,%ds
220 shrl $16,%eax
221 movw %ax,%es
222 movl %ecx,%eax /* fs + gs */
223 movw %ax,%fs
224 shrl $16,%eax
225 movw %ax,%gs
226 movl %edx,%eax /* ss */
227 movw %ax,%ss
228
229 /* Jump to acpi_restorecpu() */
230 jmp *%edi
231
232 /* used in real mode */
233 physical_gdt: .word 0
234 .long 0
235 physical_esp: .long 0
236 previous_cr2: .long 0
237 previous_cr3: .long 0
238 previous_cr4: .long 0
239 resume_beep: .long 0
240 reset_video: .long 0
241
242 /*
243 * Transfer from real mode to protected mode. The order of these variables
244 * is very important, DO NOT INSERT OR CHANGE unless you know why.
245 */
246 previous_cr0: .long 0
247 previous_tr: .word 0
248 previous_gdt: .word 0
249 .long 0
250 previous_ldt: .word 0
251 previous_idt: .word 0
252 .long 0
253 where_to_recover: .long 0
254 previous_ds: .word 0
255 previous_es: .word 0
256 previous_fs: .word 0
257 previous_gs: .word 0
258 previous_ss: .word 0
259 dummy: .word 0
Cache object: 21958e8e3e595481cb5ddcd2d14071f7
|