1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1990 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * William Jolitz.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * $FreeBSD: releng/9.0/sys/amd64/amd64/cpu_switch.S 216673 2010-12-22 19:57:03Z jkim $
34 */
35
36 #include <machine/asmacros.h>
37 #include <machine/specialreg.h>
38
39 #include "assym.s"
40 #include "opt_sched.h"
41
42 /*****************************************************************************/
43 /* Scheduling */
44 /*****************************************************************************/
45
46 .text
47
48 #ifdef SMP
49 #define LK lock ;
50 #else
51 #define LK
52 #endif
53
54 #if defined(SCHED_ULE) && defined(SMP)
55 #define SETLK xchgq
56 #else
57 #define SETLK movq
58 #endif
59
60 /*
61 * cpu_throw()
62 *
63 * This is the second half of cpu_switch(). It is used when the current
64 * thread is either a dummy or slated to die, and we no longer care
65 * about its state. This is only a slight optimization and is probably
66 * not worth it anymore. Note that we need to clear the pm_active bits so
67 * we do need the old proc if it still exists.
68 * %rdi = oldtd
69 * %rsi = newtd
70 */
71 ENTRY(cpu_throw)
72 movl PCPU(CPUID),%eax
73 testq %rdi,%rdi
74 jz 1f
75 /* release bit from old pm_active */
76 movq PCPU(CURPMAP),%rdx
77 LK btrl %eax,PM_ACTIVE(%rdx) /* clear old */
78 1:
79 movq TD_PCB(%rsi),%r8 /* newtd->td_proc */
80 movq PCB_CR3(%r8),%rdx
81 movq %rdx,%cr3 /* new address space */
82 jmp swact
83 END(cpu_throw)
84
85 /*
86 * cpu_switch(old, new, mtx)
87 *
88 * Save the current thread state, then select the next thread to run
89 * and load its state.
90 * %rdi = oldtd
91 * %rsi = newtd
92 * %rdx = mtx
93 */
94 ENTRY(cpu_switch)
95 /* Switch to new thread. First, save context. */
96 movq TD_PCB(%rdi),%r8
97 orl $PCB_FULL_IRET,PCB_FLAGS(%r8)
98
99 movq (%rsp),%rax /* Hardware registers */
100 movq %r15,PCB_R15(%r8)
101 movq %r14,PCB_R14(%r8)
102 movq %r13,PCB_R13(%r8)
103 movq %r12,PCB_R12(%r8)
104 movq %rbp,PCB_RBP(%r8)
105 movq %rsp,PCB_RSP(%r8)
106 movq %rbx,PCB_RBX(%r8)
107 movq %rax,PCB_RIP(%r8)
108
109 testl $PCB_DBREGS,PCB_FLAGS(%r8)
110 jnz store_dr /* static predict not taken */
111 done_store_dr:
112
113 /* have we used fp, and need a save? */
114 cmpq %rdi,PCPU(FPCURTHREAD)
115 jne 1f
116 movq PCB_SAVEFPU(%r8),%r8
117 clts
118 fxsave (%r8)
119 smsw %ax
120 orb $CR0_TS,%al
121 lmsw %ax
122 xorl %eax,%eax
123 movq %rax,PCPU(FPCURTHREAD)
124 1:
125
126 /* Save is done. Now fire up new thread. Leave old vmspace. */
127 movq TD_PCB(%rsi),%r8
128
129 /* switch address space */
130 movq PCB_CR3(%r8),%rcx
131 movq %cr3,%rax
132 cmpq %rcx,%rax /* Same address space? */
133 jne swinact
134 SETLK %rdx, TD_LOCK(%rdi) /* Release the old thread */
135 jmp sw1
136 swinact:
137 movq %rcx,%cr3 /* new address space */
138 movl PCPU(CPUID), %eax
139 /* Release bit from old pmap->pm_active */
140 movq PCPU(CURPMAP),%rcx
141 LK btrl %eax,PM_ACTIVE(%rcx) /* clear old */
142 SETLK %rdx, TD_LOCK(%rdi) /* Release the old thread */
143 swact:
144 /* Set bit in new pmap->pm_active */
145 movq TD_PROC(%rsi),%rdx /* newproc */
146 movq P_VMSPACE(%rdx), %rdx
147 addq $VM_PMAP,%rdx
148 LK btsl %eax,PM_ACTIVE(%rdx) /* set new */
149 movq %rdx,PCPU(CURPMAP)
150
151 sw1:
152 #if defined(SCHED_ULE) && defined(SMP)
153 /* Wait for the new thread to become unblocked */
154 movq $blocked_lock, %rdx
155 1:
156 movq TD_LOCK(%rsi),%rcx
157 cmpq %rcx, %rdx
158 pause
159 je 1b
160 #endif
161 /*
162 * At this point, we've switched address spaces and are ready
163 * to load up the rest of the next context.
164 */
165
166 /* Skip loading user fsbase/gsbase for kthreads */
167 testl $TDP_KTHREAD,TD_PFLAGS(%rsi)
168 jnz do_kthread
169
170 /*
171 * Load ldt register
172 */
173 movq TD_PROC(%rsi),%rcx
174 cmpq $0, P_MD+MD_LDT(%rcx)
175 jne do_ldt
176 xorl %eax,%eax
177 ld_ldt: lldt %ax
178
179 /* Restore fs base in GDT */
180 movl PCB_FSBASE(%r8),%eax
181 movq PCPU(FS32P),%rdx
182 movw %ax,2(%rdx)
183 shrl $16,%eax
184 movb %al,4(%rdx)
185 shrl $8,%eax
186 movb %al,7(%rdx)
187
188 /* Restore gs base in GDT */
189 movl PCB_GSBASE(%r8),%eax
190 movq PCPU(GS32P),%rdx
191 movw %ax,2(%rdx)
192 shrl $16,%eax
193 movb %al,4(%rdx)
194 shrl $8,%eax
195 movb %al,7(%rdx)
196
197 do_kthread:
198 /* Do we need to reload tss ? */
199 movq PCPU(TSSP),%rax
200 movq PCB_TSSP(%r8),%rdx
201 testq %rdx,%rdx
202 cmovzq PCPU(COMMONTSSP),%rdx
203 cmpq %rax,%rdx
204 jne do_tss
205 done_tss:
206 movq %r8,PCPU(RSP0)
207 movq %r8,PCPU(CURPCB)
208 /* Update the TSS_RSP0 pointer for the next interrupt */
209 movq %r8,COMMON_TSS_RSP0(%rdx)
210 movq %rsi,PCPU(CURTHREAD) /* into next thread */
211
212 /* Test if debug registers should be restored. */
213 testl $PCB_DBREGS,PCB_FLAGS(%r8)
214 jnz load_dr /* static predict not taken */
215 done_load_dr:
216
217 /* Restore context. */
218 movq PCB_R15(%r8),%r15
219 movq PCB_R14(%r8),%r14
220 movq PCB_R13(%r8),%r13
221 movq PCB_R12(%r8),%r12
222 movq PCB_RBP(%r8),%rbp
223 movq PCB_RSP(%r8),%rsp
224 movq PCB_RBX(%r8),%rbx
225 movq PCB_RIP(%r8),%rax
226 movq %rax,(%rsp)
227 ret
228
229 /*
230 * We order these strangely for several reasons.
231 * 1: I wanted to use static branch prediction hints
232 * 2: Most athlon64/opteron cpus don't have them. They define
233 * a forward branch as 'predict not taken'. Intel cores have
234 * the 'rep' prefix to invert this.
235 * So, to make it work on both forms of cpu we do the detour.
236 * We use jumps rather than call in order to avoid the stack.
237 */
238
239 store_dr:
240 movq %dr7,%rax /* yes, do the save */
241 movq %dr0,%r15
242 movq %dr1,%r14
243 movq %dr2,%r13
244 movq %dr3,%r12
245 movq %dr6,%r11
246 movq %r15,PCB_DR0(%r8)
247 movq %r14,PCB_DR1(%r8)
248 movq %r13,PCB_DR2(%r8)
249 movq %r12,PCB_DR3(%r8)
250 movq %r11,PCB_DR6(%r8)
251 movq %rax,PCB_DR7(%r8)
252 andq $0x0000fc00, %rax /* disable all watchpoints */
253 movq %rax,%dr7
254 jmp done_store_dr
255
256 load_dr:
257 movq %dr7,%rax
258 movq PCB_DR0(%r8),%r15
259 movq PCB_DR1(%r8),%r14
260 movq PCB_DR2(%r8),%r13
261 movq PCB_DR3(%r8),%r12
262 movq PCB_DR6(%r8),%r11
263 movq PCB_DR7(%r8),%rcx
264 movq %r15,%dr0
265 movq %r14,%dr1
266 /* Preserve reserved bits in %dr7 */
267 andq $0x0000fc00,%rax
268 andq $~0x0000fc00,%rcx
269 movq %r13,%dr2
270 movq %r12,%dr3
271 orq %rcx,%rax
272 movq %r11,%dr6
273 movq %rax,%dr7
274 jmp done_load_dr
275
276 do_tss: movq %rdx,PCPU(TSSP)
277 movq %rdx,%rcx
278 movq PCPU(TSS),%rax
279 movw %cx,2(%rax)
280 shrq $16,%rcx
281 movb %cl,4(%rax)
282 shrq $8,%rcx
283 movb %cl,7(%rax)
284 shrq $8,%rcx
285 movl %ecx,8(%rax)
286 movb $0x89,5(%rax) /* unset busy */
287 movl $TSSSEL,%eax
288 ltr %ax
289 jmp done_tss
290
291 do_ldt: movq PCPU(LDT),%rax
292 movq P_MD+MD_LDT_SD(%rcx),%rdx
293 movq %rdx,(%rax)
294 movq P_MD+MD_LDT_SD+8(%rcx),%rdx
295 movq %rdx,8(%rax)
296 movl $LDTSEL,%eax
297 jmp ld_ldt
298 END(cpu_switch)
299
300 /*
301 * savectx(pcb)
302 * Update pcb, saving current processor state.
303 */
304 ENTRY(savectx)
305 /* Save caller's return address. */
306 movq (%rsp),%rax
307 movq %rax,PCB_RIP(%rdi)
308
309 movq %rbx,PCB_RBX(%rdi)
310 movq %rsp,PCB_RSP(%rdi)
311 movq %rbp,PCB_RBP(%rdi)
312 movq %r12,PCB_R12(%rdi)
313 movq %r13,PCB_R13(%rdi)
314 movq %r14,PCB_R14(%rdi)
315 movq %r15,PCB_R15(%rdi)
316
317 movq %cr0,%rsi
318 movq %rsi,PCB_CR0(%rdi)
319 movq %cr2,%rax
320 movq %rax,PCB_CR2(%rdi)
321 movq %cr3,%rax
322 movq %rax,PCB_CR3(%rdi)
323 movq %cr4,%rax
324 movq %rax,PCB_CR4(%rdi)
325
326 movq %dr0,%rax
327 movq %rax,PCB_DR0(%rdi)
328 movq %dr1,%rax
329 movq %rax,PCB_DR1(%rdi)
330 movq %dr2,%rax
331 movq %rax,PCB_DR2(%rdi)
332 movq %dr3,%rax
333 movq %rax,PCB_DR3(%rdi)
334 movq %dr6,%rax
335 movq %rax,PCB_DR6(%rdi)
336 movq %dr7,%rax
337 movq %rax,PCB_DR7(%rdi)
338
339 movl $MSR_FSBASE,%ecx
340 rdmsr
341 movl %eax,PCB_FSBASE(%rdi)
342 movl %edx,PCB_FSBASE+4(%rdi)
343 movl $MSR_GSBASE,%ecx
344 rdmsr
345 movl %eax,PCB_GSBASE(%rdi)
346 movl %edx,PCB_GSBASE+4(%rdi)
347 movl $MSR_KGSBASE,%ecx
348 rdmsr
349 movl %eax,PCB_KGSBASE(%rdi)
350 movl %edx,PCB_KGSBASE+4(%rdi)
351
352 sgdt PCB_GDT(%rdi)
353 sidt PCB_IDT(%rdi)
354 sldt PCB_LDT(%rdi)
355 str PCB_TR(%rdi)
356
357 clts
358 fxsave PCB_USERFPU(%rdi)
359 movq %rsi,%cr0 /* The previous %cr0 is saved in %rsi. */
360
361 movl $1,%eax
362 ret
363 END(savectx)
Cache object: 8ada7fe82a2c1658a1dad08af5b4ac3b
|