FreeBSD/Linux Kernel Cross Reference
sys/arm/arm/swtch.S
1 /* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
2
3 /*-
4 * Copyright 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Steve C. Woodford for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37 /*-
38 * Copyright (c) 1994-1998 Mark Brinicombe.
39 * Copyright (c) 1994 Brini.
40 * All rights reserved.
41 *
42 * This code is derived from software written for Brini by Mark Brinicombe
43 *
44 * Redistribution and use in source and binary forms, with or without
45 * modification, are permitted provided that the following conditions
46 * are met:
47 * 1. Redistributions of source code must retain the above copyright
48 * notice, this list of conditions and the following disclaimer.
49 * 2. Redistributions in binary form must reproduce the above copyright
50 * notice, this list of conditions and the following disclaimer in the
51 * documentation and/or other materials provided with the distribution.
52 * 3. All advertising materials mentioning features or use of this software
53 * must display the following acknowledgement:
54 * This product includes software developed by Brini.
55 * 4. The name of the company nor the name of the author may be used to
56 * endorse or promote products derived from this software without specific
57 * prior written permission.
58 *
59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
70 *
71 * RiscBSD kernel project
72 *
73 * cpuswitch.S
74 *
75 * cpu switching functions
76 *
77 * Created : 15/10/94
78 *
79 */
80
81 #include "assym.s"
82
83 #include <machine/asm.h>
84 #include <machine/asmacros.h>
85 #include <machine/armreg.h>
86 __FBSDID("$FreeBSD$");
87
88
89 /*
90 * New experimental definitions of IRQdisable and IRQenable
91 * These keep FIQ's enabled since FIQ's are special.
92 */
93
94 #define DOMAIN_CLIENT 0x01
95 #define IRQdisable \
96 mrs r14, cpsr ; \
97 orr r14, r14, #(I32_bit) ; \
98 msr cpsr_c, r14 ; \
99
100 #define IRQenable \
101 mrs r14, cpsr ; \
102 bic r14, r14, #(I32_bit) ; \
103 msr cpsr_c, r14 ; \
104
105 /*
106 * These are used for switching the translation table/DACR.
107 * Since the vector page can be invalid for a short time, we must
108 * disable both regular IRQs *and* FIQs.
109 *
110 * XXX: This is not necessary if the vector table is relocated.
111 */
112 #define IRQdisableALL \
113 mrs r14, cpsr ; \
114 orr r14, r14, #(I32_bit | F32_bit) ; \
115 msr cpsr_c, r14
116
117 #define IRQenableALL \
118 mrs r14, cpsr ; \
119 bic r14, r14, #(I32_bit | F32_bit) ; \
120 msr cpsr_c, r14
121
122 .Lcurpcb:
123 .word _C_LABEL(__pcpu) + PC_CURPCB
124 .Lcpufuncs:
125 .word _C_LABEL(cpufuncs)
126 .Lblock_userspace_access:
127 .word _C_LABEL(block_userspace_access)
128 .Lcpu_do_powersave:
129 .word _C_LABEL(cpu_do_powersave)
130 ENTRY(cpu_throw)
131 mov r5, r1
132
133 /*
134 * r5 = newtd
135 */
136
137 ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
138
139 /* Switch to lwp0 context */
140
141 ldr r9, .Lcpufuncs
142 mov lr, pc
143 ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
144 ldr r0, [r7, #(PCB_PL1VEC)]
145 ldr r1, [r7, #(PCB_DACR)]
146 /*
147 * r0 = Pointer to L1 slot for vector_page (or NULL)
148 * r1 = lwp0's DACR
149 * r5 = lwp0
150 * r6 = exit func
151 * r7 = lwp0's PCB
152 * r9 = cpufuncs
153 */
154
155 /*
156 * Ensure the vector table is accessible by fixing up lwp0's L1
157 */
158 cmp r0, #0 /* No need to fixup vector table? */
159 ldrne r3, [r0] /* But if yes, fetch current value */
160 ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
161 mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */
162 cmpne r3, r2 /* Stuffing the same value? */
163 strne r2, [r0] /* Store if not. */
164
165 #ifdef PMAP_INCLUDE_PTE_SYNC
166 /*
167 * Need to sync the cache to make sure that last store is
168 * visible to the MMU.
169 */
170 movne r1, #4
171 movne lr, pc
172 ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
173 #endif /* PMAP_INCLUDE_PTE_SYNC */
174
175 /*
176 * Note: We don't do the same optimisation as cpu_switch() with
177 * respect to avoiding flushing the TLB if we're switching to
178 * the same L1 since this process' VM space may be about to go
179 * away, so we don't want *any* turds left in the TLB.
180 */
181
182 /* Switch the memory to the new process */
183 ldr r0, [r7, #(PCB_PAGEDIR)]
184 mov lr, pc
185 ldr pc, [r9, #CF_CONTEXT_SWITCH]
186
187 /* Restore all the save registers */
188 #ifndef __XSCALE__
189 add r1, r7, #PCB_R8
190 ldmia r1, {r8-r13}
191 #else
192 ldr r8, [r7, #(PCB_R8)]
193 ldr r9, [r7, #(PCB_R9)]
194 ldr r10, [r7, #(PCB_R10)]
195 ldr r11, [r7, #(PCB_R11)]
196 ldr r12, [r7, #(PCB_R12)]
197 ldr r13, [r7, #(PCB_SP)]
198 #endif
199
200 /* We have a new curthread now so make a note it */
201 ldr r6, .Lcurthread
202 str r5, [r6]
203
204 /* Set the new tp */
205 ldr r6, [r5, #(TD_MD + MD_TP)]
206 mov r5, #ARM_TP_ADDRESS
207 strt r6, [r5]
208
209 /* Hook in a new pcb */
210 ldr r6, .Lcurpcb
211 str r7, [r6]
212
213 ldmfd sp!, {r4-r7, pc}
214
215 ENTRY(cpu_switch)
216 stmfd sp!, {r4-r7, lr}
217
218 .Lswitch_resume:
219 /* rem: r0 = old lwp */
220 /* rem: interrupts are disabled */
221
222 #ifdef MULTIPROCESSOR
223 /* XXX use curcpu() */
224 ldr r2, .Lcpu_info_store
225 str r2, [r6, #(L_CPU)]
226 #endif
227
228 /* Process is now on a processor. */
229
230 /* We have a new curthread now so make a note it */
231 ldr r7, .Lcurthread
232 str r1, [r7]
233
234 /* Hook in a new pcb */
235 ldr r7, .Lcurpcb
236 ldr r2, [r1, #TD_PCB]
237 str r2, [r7]
238
239 /* rem: r1 = new process */
240 /* rem: interrupts are enabled */
241
242 /* Stage two : Save old context */
243
244 /* Get the user structure for the old lwp. */
245 ldr r2, [r0, #(TD_PCB)]
246
247 /* Save all the registers in the old lwp's pcb */
248 #ifndef __XSCALE__
249 add r7, r2, #(PCB_R8)
250 stmia r7, {r8-r13}
251 #else
252 strd r8, [r2, #(PCB_R8)]
253 strd r10, [r2, #(PCB_R10)]
254 strd r12, [r2, #(PCB_R12)]
255 #endif
256
257 /*
258 * NOTE: We can now use r8-r13 until it is time to restore
259 * them for the new process.
260 */
261 /* Store the old tp */
262 mov r3, #ARM_TP_ADDRESS
263 ldrt r9, [r3]
264 str r9, [r0, #(TD_MD + MD_TP)]
265
266 /* Set the new tp */
267 ldr r9, [r1, #(TD_MD + MD_TP)]
268 strt r9, [r3]
269
270 /* Get the user structure for the new process in r9 */
271 ldr r9, [r1, #(TD_PCB)]
272
273 /* r1 now free! */
274
275 mrs r3, cpsr
276 /*
277 * We can do that, since
278 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
279 */
280 orr r8, r3, #(PSR_UND32_MODE)
281 msr cpsr_c, r8
282
283 str sp, [r2, #(PCB_UND_SP)]
284
285 msr cpsr_c, r3 /* Restore the old mode */
286 /* rem: r8 = old PCB */
287 /* rem: r9 = new PCB */
288 /* rem: interrupts are enabled */
289
290 /* What else needs to be saved Only FPA stuff when that is supported */
291
292 /* Third phase : restore saved context */
293
294 /* rem: r8 = old PCB */
295 /* rem: r9 = new PCB */
296 /* rem: interrupts are enabled */
297
298 ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */
299 mov r2, #DOMAIN_CLIENT
300 cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
301 beq .Lcs_context_switched /* Yup. Don't flush cache */
302 mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */
303 /*
304 * Get the new L1 table pointer into r11. If we're switching to
305 * an LWP with the same address space as the outgoing one, we can
306 * skip the cache purge and the TTB load.
307 *
308 * To avoid data dep stalls that would happen anyway, we try
309 * and get some useful work done in the mean time.
310 */
311 mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
312 ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
313
314
315 teq r10, r11 /* Same L1? */
316 cmpeq r0, r5 /* Same DACR? */
317 beq .Lcs_context_switched /* yes! */
318
319 /*
320 * Definately need to flush the cache.
321 */
322
323 ldr r1, .Lcpufuncs
324 mov lr, pc
325 ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
326 .Lcs_cache_purge_skipped:
327 /* rem: r4 = &block_userspace_access */
328 /* rem: r6 = new lwp */
329 /* rem: r9 = new PCB */
330 /* rem: r10 = old L1 */
331 /* rem: r11 = new L1 */
332
333 mov r2, #0x00000000
334 ldr r7, [r9, #(PCB_PL1VEC)]
335
336 /*
337 * Ensure the vector table is accessible by fixing up the L1
338 */
339 cmp r7, #0 /* No need to fixup vector table? */
340 ldrne r2, [r7] /* But if yes, fetch current value */
341 ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
342 mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */
343 cmpne r2, r0 /* Stuffing the same value? */
344 #ifndef PMAP_INCLUDE_PTE_SYNC
345 strne r0, [r7] /* Nope, update it */
346 #else
347 beq .Lcs_same_vector
348 str r0, [r7] /* Otherwise, update it */
349
350 /*
351 * Need to sync the cache to make sure that last store is
352 * visible to the MMU.
353 */
354 ldr r2, .Lcpufuncs
355 mov r0, r7
356 mov r1, #4
357 mov lr, pc
358 ldr pc, [r2, #CF_DCACHE_WB_RANGE]
359
360 .Lcs_same_vector:
361 #endif /* PMAP_INCLUDE_PTE_SYNC */
362
363 cmp r10, r11 /* Switching to the same L1? */
364 ldr r10, .Lcpufuncs
365 beq .Lcs_same_l1 /* Yup. */
366 /*
367 * Do a full context switch, including full TLB flush.
368 */
369 mov r0, r11
370 mov lr, pc
371 ldr pc, [r10, #CF_CONTEXT_SWITCH]
372
373 b .Lcs_context_switched
374
375 /*
376 * We're switching to a different process in the same L1.
377 * In this situation, we only need to flush the TLB for the
378 * vector_page mapping, and even then only if r7 is non-NULL.
379 */
380 .Lcs_same_l1:
381 cmp r7, #0
382 movne r0, #0 /* We *know* vector_page's VA is 0x0 */
383 movne lr, pc
384 ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
385 /*
386 * We can do that, since
387 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
388 */
389
390 .Lcs_context_switched:
391
392 /* XXXSCW: Safe to re-enable FIQs here */
393
394 /* rem: r9 = new PCB */
395
396 mrs r3, cpsr
397 /*
398 * We can do that, since
399 * PSR_SVC32_MODE|PSR_UND32_MODE == MSR_UND32_MODE
400 */
401 orr r2, r3, #(PSR_UND32_MODE)
402 msr cpsr_c, r2
403
404 ldr sp, [r9, #(PCB_UND_SP)]
405
406 msr cpsr_c, r3 /* Restore the old mode */
407 /* Restore all the save registers */
408 #ifndef __XSCALE__
409 add r7, r9, #PCB_R8
410 ldmia r7, {r8-r13}
411 sub r7, r7, #PCB_R8 /* restore PCB pointer */
412 #else
413 mov r7, r9
414 ldr r8, [r7, #(PCB_R8)]
415 ldr r9, [r7, #(PCB_R9)]
416 ldr r10, [r7, #(PCB_R10)]
417 ldr r11, [r7, #(PCB_R11)]
418 ldr r12, [r7, #(PCB_R12)]
419 ldr r13, [r7, #(PCB_SP)]
420 #endif
421
422 /* rem: r6 = new lwp */
423 /* rem: r7 = new pcb */
424
425 #ifdef ARMFPE
426 add r0, r7, #(USER_SIZE) & 0x00ff
427 add r0, r0, #(USER_SIZE) & 0xff00
428 bl _C_LABEL(arm_fpe_core_changecontext)
429 #endif
430
431 /* rem: r5 = new lwp's proc */
432 /* rem: r6 = new lwp */
433 /* rem: r7 = new PCB */
434
435 .Lswitch_return:
436
437 /*
438 * Pull the registers that got pushed when either savectx() or
439 * cpu_switch() was called and return.
440 */
441 ldmfd sp!, {r4-r7, pc}
442 #ifdef DIAGNOSTIC
443 .Lswitch_bogons:
444 adr r0, .Lswitch_panic_str
445 bl _C_LABEL(panic)
446 1: nop
447 b 1b
448
449 .Lswitch_panic_str:
450 .asciz "cpu_switch: sched_qs empty with non-zero sched_whichqs!\n"
451 #endif
452 ENTRY(savectx)
453 RET
454 ENTRY(fork_trampoline)
455 mov r1, r5
456 mov r2, sp
457 mov r0, r4
458 mov fp, #0
459 bl _C_LABEL(fork_exit)
460 /* Kill irq"s */
461 mrs r0, cpsr
462 orr r0, r0, #(I32_bit)
463 msr cpsr_c, r0
464 DO_AST
465 PULLFRAME
466
467 movs pc, lr /* Exit */
468
469 AST_LOCALS
Cache object: 8c50c1c1a7a619e2bc446b8fab3322d5
|