1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28
29 /* Low level routines dealing with exception entry and exit.
30 * There are various types of exception:
31 *
32 * Interrupt, trap, system call and debugger entry. Each has it's own
33 * handler since the state save routine is different for each. The
34 * code is very similar (a lot of cut and paste).
35 *
36 * The code for the FPU disabled handler (lazy fpu) is in cswtch.s
37 */
38
39 #include <debug.h>
40 #include <mach_assert.h>
41 #include <mach/exception_types.h>
42 #include <mach/kern_return.h>
43 #include <mach/ppc/vm_param.h>
44
45 #include <assym.s>
46
47 #include <ppc/asm.h>
48 #include <ppc/proc_reg.h>
49 #include <ppc/trap.h>
50 #include <ppc/exception.h>
51 #include <ppc/savearea.h>
52 #include <ppc/spl.h>
53
54
55 #define VERIFYSAVE 0
56 #define FPVECDBG 0
57 #define INSTRUMENT 0
58
59 /*
60 * thandler(type)
61 *
62 * ENTRY: VM switched ON
63 * Interrupts OFF
64 * R3 contains exception code
65 * R4 points to the saved context (virtual address)
66 * Everything is saved in savearea
67 */
68
69 /*
70 * If pcb.ksp == 0 then the kernel stack is already busy,
71 * we make a stack frame
72 * leaving enough space for the 'red zone' in case the
73 * trapped thread was in the middle of saving state below
74 * its stack pointer.
75 *
76 * otherwise we make a stack frame and
77 * the kernel stack (setting pcb.ksp to 0)
78 *
79 * on return, we do the reverse, the last state is popped from the pcb
80 * and pcb.ksp is set to the top of stack
81 */
82
83 /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when
84 * another trap is taken. We need at least enough space for a saved state
85 * structure plus two small backpointer frames, and we add a few
86 * hundred bytes for the space needed by the C (which may be less but
87 * may be much more). We're trying to catch kernel stack overflows :-)
88 */
89
90 #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256
91
92 .text
93
94 .align 5
95 .globl EXT(thandler)
96 LEXT(thandler) ; Trap handler
97
98 mfsprg r25,0 ; Get the per_proc
99
100 lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
101
102 mfsprg r13,1 ; Get the current thread
103 cmpwi cr0,r1,0 ; Are we on interrupt stack?
104 lwz r6,ACT_THREAD(r13) ; Get the shuttle
105 beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt...
106 lwz r26,ACT_MACT_SPF(r13) ; Get special flags
107 lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used
108 rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active?
109 lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack
110 bnel- checkassist ; See if we should assist this
111 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
112 stw r8,SAVprev+4(r4) ; Queue the new save area in the front
113
114 #if VERIFYSAVE
115 bl versave ; (TEST/DEBUG)
116 #endif
117
118 lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start
119 cmpwi cr1,r1,0 ; Are we already on kernel stack?
120 stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation
121 lwz r26,saver1+4(r4) ; Get the stack at interrupt time
122
123 bne+ cr1,.L_kstackfree ; We are not on kernel stack yet...
124
125 subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack
126
127 .L_kstackfree:
128 lwz r7,savesrr1+4(r4) ; Pick up the entry MSR
129 sub r9,r1,r9 ; Get displacment into the kernel stack
130 li r0,0 ; Make this 0
131 rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0)
132 cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack?
133 beq cr1,.L_state_on_kstack ; using above test for pcb/stack
134
135 stw r0,ACT_MACT_KSP(r13) ; Show that we have taken the stack
136
137 .L_state_on_kstack:
138 lwz r9,savevrsave(r4) ; Get the VRSAVE register
139 bne-- kernelStackUnaligned ; Stack is unaligned...
140 rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
141 subi r1,r1,FM_SIZE ; Push a header onto the current stack
142 bgt-- cr2,kernelStackBad ; Kernel stack is bogus...
143
144 kernelStackNotBad: ; Vector was off
145 beq++ tvecoff ; Vector off, do not save vrsave...
146 stw r9,liveVRS(r25) ; Set the live value
147
148 tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame
149
150 #if DEBUG
151 /* If debugging, we need two frames, the first being a dummy
152 * which links back to the trapped routine. The second is
153 * that which the C routine below will need
154 */
155 lwz r3,savesrr0+4(r4) ; Get the point of interruption
156 stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value
157 stwu r1, -FM_SIZE(r1) ; and make new frame
158 #endif /* DEBUG */
159
160
161 /* call trap handler proper, with
162 * ARG0 = type (not yet, holds pcb ptr)
163 * ARG1 = saved_state ptr (already there)
164 * ARG2 = dsisr (already there)
165 * ARG3 = dar (already there)
166 */
167
168
169 lwz r3,saveexception(r4) ; Get the exception code
170 lwz r0,ACT_MACT_SPF(r13) ; Get the special flags
171
172 addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range
173 rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes)
174 cmplwi cr2,r5,T_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not)
175
176 lwz r5,savedsisr(r4) ; Get the saved DSISR
177
178 crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes)
179 rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
180
181 cmpi cr2,r3,T_PREEMPT ; Is this a preemption?
182
183 beq-- .L_check_VM
184 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
185 .L_check_VM:
186
187 crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes)
188
189 lwz r6,savedar(r4) ; Get the DAR (top)
190 lwz r7,savedar+4(r4) ; Get the DAR (bottom)
191
192 beq- cr2,.L_call_trap ; Do not turn on interrupts for T_PREEMPT
193 beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM...
194
195 /* syscall exception might warp here if there's nothing left
196 * to do except generate a trap
197 */
198
199 .L_call_trap:
200
201 bl EXT(trap)
202
203 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
204 mfmsr r7 ; Get the MSR
205 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
206 andc r7,r7,r10 ; Turn off VEC, FP, and EE
207 mtmsr r7 ; Disable for interrupts
208 mfsprg r10,0 ; Restore the per_proc info
209 /*
210 * This is also the point where new threads come when they are created.
211 * The new thread is setup to look like a thread that took an
212 * interrupt and went immediatly into trap.
213 */
214
215 thread_return:
216 lwz r11,SAVflags(r3) ; Get the flags of the current savearea
217 lwz r0,savesrr1+4(r3) ; Get the MSR we are going to
218 lwz r4,SAVprev+4(r3) ; Pick up the previous savearea
219 mfsprg r8,1 ; Get the current thread
220 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
221 rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user?
222 lwz r1,ACT_THREAD(r8) ; Get the shuttle
223 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
224
225 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
226 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
227 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
228
229 beq-- chkfac ; We are not leaving the kernel yet...
230
231 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
232 b chkfac ; Go end it all...
233
234
235 ;
236 ; Here is where we go when we detect that the kernel stack is all messed up.
237 ; We just try to dump some info and get into the debugger.
238 ;
239
240 kernelStackBad:
241
242 lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
243 subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
244 sub r3,r1,r3 ; Get displacement into debug stack
245 cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
246 blt+ cr2,kernelStackNotBad ; Yeah, that is ok too...
247
248 lis r0,hi16(Choke) ; Choke code
249 ori r0,r0,lo16(Choke) ; and the rest
250 li r3,failStack ; Bad stack code
251 sc ; System ABEND
252
253 kernelStackUnaligned:
254 lis r0,hi16(Choke) ; Choke code
255 ori r0,r0,lo16(Choke) ; and the rest
256 li r3,failUnalignedStk ; Unaligned stack code
257 sc ; System ABEND
258
259
260 /*
261 * shandler(type)
262 *
263 * ENTRY: VM switched ON
264 * Interrupts OFF
265 * R3 contains exception code
266 * R4 points to the saved context (virtual address)
267 * Everything is saved in savearea
268 */
269
270 /*
271 * If pcb.ksp == 0 then the kernel stack is already busy,
272 * this is an error - jump to the debugger entry
273 *
274 * otherwise depending upon the type of
275 * syscall, look it up in the kernel table
276 * or pass it to the server.
277 *
278 * on return, we do the reverse, the state is popped from the pcb
279 * and pcb.ksp is set to the top of stack.
280 */
281
282 /*
283 * NOTE:
284 * mach system calls are negative
285 * BSD system calls are low positive
286 * PPC-only system calls are in the range 0x6xxx
287 * PPC-only "fast" traps are in the range 0x7xxx
288 */
289
290 .align 5
291 .globl EXT(shandler)
292 LEXT(shandler) ; System call handler
293
294 lwz r7,savesrr1+4(r4) ; Get the SRR1 value
295 mfsprg r25,0 ; Get the per proc area
296 lwz r0,saver0+4(r4) ; Get the original syscall number
297 lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
298 mfsprg r13,1 ; Get the current thread
299 rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check
300 mr. r17,r17 ; Are we on interrupt stack?
301 lwz r9,savevrsave(r4) ; Get the VRsave register
302 beq-- EXT(ihandler) ; On interrupt stack, not allowed...
303 rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
304 lwz r16,ACT_THREAD(r13) ; Get the shuttle
305
306 beq++ svecoff ; Vector off, do not save vrsave...
307 stw r9,liveVRS(r25) ; Set the live value
308 ;
309 ; Check if SCs are being redirected for the BlueBox or to VMM
310 ;
311
312 svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags
313 mtcrf 0x40,r6 ; Check special flags
314 mtcrf 0x01,r6 ; Check special flags
315 crmove cr6_eq,runningVMbit ; Remember if we are in VMM
316 bne++ cr6,sVMchecked ; Not running VM
317 lwz r18,spcFlags(r25) ; Load per_proc special flags
318 rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set?
319 beq sVMchecked ; Not in FAM
320 cmpwi r0,0x6004 ; Is it vmm_dispatch syscall:
321 bne sVMchecked
322 lwz r26,saver3+4(r4) ; Get the original syscall number
323 cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request
324 sVMchecked:
325 bf++ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected
326 lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area
327 b EXT(atomic_switch_syscall) ; Go to the assist...
328
329 noassist: cmplwi r15,0x7000 ; Do we have a fast path trap?
330 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
331 beql fastpath ; We think it is a fastpath...
332
333 lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer
334 #if DEBUG
335 mr. r1,r1 ; Are we already on the kernel stack?
336 li r3,T_SYSTEM_CALL ; Yup, pretend we had an interrupt...
337 beq- EXT(ihandler) ; Bad boy, bad boy... What cha gonna do when they come for you?
338 #endif /* DEBUG */
339
340 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
341 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
342 li r0,0 ; Clear this out
343 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
344 stw r13,SAVact(r4) ; Point the savearea at its activation
345
346 #if VERIFYSAVE
347 bl versave ; (TEST/DEBUG)
348 #endif
349
350 lwz r15,saver1+4(r4) ; Grab interrupt time stack
351 mr r30,r4 ; Save pointer to the new context savearea
352 stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
353 stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
354
355 #if DEBUG
356 /* If debugging, we need two frames, the first being a dummy
357 * which links back to the trapped routine. The second is
358 * that which the C routine below will need
359 */
360 lwz r8,savesrr0+4(r30) ; Get the point of interruption
361 stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value
362 stwu r1, -FM_SIZE(r1) ; and make new frame
363 #endif /* DEBUG */
364
365 lwz r15,SAVflags(r30) ; Get the savearea flags
366 lwz r0,saver0+4(r30) ; Get R0 back
367 mfmsr r11 ; Get the MSR
368 stwu r1,-(FM_SIZE+ARG_SIZE)(r1) ; Make a stack frame
369 ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit
370 rlwinm r10,r0,0,0,19 ; Keep only the top part
371 oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall
372 cmplwi r10,0x6000 ; Is it the special ppc-only guy?
373 stw r15,SAVflags(r30) ; Save syscall marker
374 beq-- cr6,exitFromVM ; It is time to exit from alternate context...
375
376 beq-- ppcscall ; Call the ppc-only system call handler...
377
378 mr. r0,r0 ; What kind is it?
379 mtmsr r11 ; Enable interruptions
380
381 blt-- .L_kernel_syscall ; System call number if negative, this is a mach call...
382
383 lwz r8,ACT_TASK(r13) ; Get our task
384 cmpwi cr0,r0,0x7FFA ; Special blue box call?
385 beq-- .L_notify_interrupt_syscall ; Yeah, call it...
386
387 lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count
388 mr r3,r30 ; Get PCB/savearea
389 mr r4,r13 ; current activation
390 addi r7,r7,1 ; Bump it
391 stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
392 bl EXT(unix_syscall) ; Check out unix...
393
394 .L_call_server_syscall_exception:
395 li r3,EXC_SYSCALL ; doexception(EXC_SYSCALL, num, 1)
396
397 .L_call_server_exception:
398 mr r4,r0 ; Set syscall selector
399 li r5,1
400 b EXT(doexception) ; Go away, never to return...
401
402 .L_notify_interrupt_syscall:
403 lwz r3,saver3+4(r30) ; Get the new PC address to pass in
404 bl EXT(syscall_notify_interrupt)
405 /*
406 * Ok, return from C function, R3 = return value
407 *
408 * saved state is still in R30 and the active thread is in R16 .
409 */
410 mr r31,r16 ; Move the current thread pointer
411 stw r3,saver3+4(r30) ; Stash the return code
412 b .L_thread_syscall_ret_check_ast
413
414 ;
415 ; Handle PPC-only system call interface
416 ; These are called with interruptions disabled
417 ; and the savearea/pcb as the first parameter.
418 ; It is up to the callee to enable interruptions if
419 ; they should be. We are in a state here where
420 ; both interrupts and preemption is ok, but because we could
421 ; be calling diagnostic code we will not enable.
422 ;
423 ; Also, the callee is responsible for finding any parameters
424 ; in the savearea/pcb. It also must set saver3 with any return
425 ; code before returning.
426 ;
427 ; There are 3 possible return codes:
428 ; 0 the call is disabled or something, we treat this like it was bogus
429 ; + the call finished ok, check for AST
430 ; - the call finished ok, do not check for AST
431 ;
432 ; Note: the last option is intended for special diagnostics calls that
433 ; want the thread to return and execute before checking for preemption.
434 ;
435 ; NOTE: Both R16 (thread) and R30 (savearea) need to be preserved over this call!!!!
436 ;
437
438 .align 5
439
440 ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table
441 lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table
442 cmplwi r11,PPCcallmax ; See if we are too big
443 ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half
444 bgt- .L_call_server_syscall_exception ; Bogus call...
445 lwzx r11,r10,r11 ; Get function address
446
447 ;
448 ; Note: make sure we do not change the savearea in R30 to
449 ; a different register without checking. Some of the PPCcalls
450 ; depend upon it being there.
451 ;
452
453 mr r3,r30 ; Pass the savearea
454 mr r4,r13 ; Pass the activation
455 mr. r11,r11 ; See if there is a function here
456 mtctr r11 ; Set the function address
457 beq- .L_call_server_syscall_exception ; Disabled call...
458 #if INSTRUMENT
459 mfspr r4,pmc1 ; Get stamp
460 stw r4,0x6100+(9*16)+0x0(0) ; Save it
461 mfspr r4,pmc2 ; Get stamp
462 stw r4,0x6100+(9*16)+0x4(0) ; Save it
463 mfspr r4,pmc3 ; Get stamp
464 stw r4,0x6100+(9*16)+0x8(0) ; Save it
465 mfspr r4,pmc4 ; Get stamp
466 stw r4,0x6100+(9*16)+0xC(0) ; Save it
467 #endif
468 bctrl ; Call it
469
470 .globl EXT(ppcscret)
471
472 LEXT(ppcscret)
473 mr. r3,r3 ; See what we should do
474 mr r31,r16 ; Restore the current thread pointer
475 bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return....
476 mfsprg r10,0 ; Get the per_proc
477 blt+ .L_thread_syscall_return ; Return, but no ASTs....
478 lwz r0,saver0+4(r30) ; Restore the system call number
479 b .L_call_server_syscall_exception ; Go to common exit...
480
481
482
483 /*
484 * we get here for mach system calls
485 * when kdebug tracing is enabled
486 */
487
488 ksystrace:
489 mr r4,r30 ; Pass in saved state
490 bl EXT(syscall_trace)
491
492 cmplw r31,r29 ; Is this syscall in the table?
493 add r31,r27,r28 ; Point right to the syscall table entry
494
495 bge- .L_call_server_syscall_exception ; The syscall number is invalid
496
497 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
498 ;
499 ; NOTE: We do not support more than 8 parameters for PPC. The only
500 ; system call to use more than 8 is mach_msg_overwrite_trap and it
501 ; uses 9. We pass a 0 in as number 9.
502 ;
503 lwz r3,saver3+4(r30) ; Restore r3
504 lwz r4,saver4+4(r30) ; Restore r4
505 mtctr r0 ; Set the function call address
506 lwz r5,saver5+4(r30) ; Restore r5
507 lwz r6,saver6+4(r30) ; Restore r6
508 lwz r7,saver7+4(r30) ; Restore r7
509 li r0,0 ; Clear this out
510 lwz r8,saver8+4(r30) ; Restore r8
511 lwz r9,saver9+4(r30) ; Restore r9
512 lwz r10,saver10+4(r30) ; Restore r10
513 stw r0,FM_ARG0(r1) ; Clear that 9th parameter just in case some fool uses it
514 bctrl ; perform the actual syscall
515
516 mr r4,r30 ; Pass in the savearea
517 bl EXT(syscall_trace_end) ; Trace the exit of the system call
518 b .L_mach_return
519
520
521
522 /* Once here, we know that the syscall was -ve
523 * we should still have r1=ksp,
524 * r16 = pointer to current thread,
525 * r13 = pointer to top activation,
526 * r0 = syscall number
527 * r30 = pointer to saved state (in pcb)
528 */
529
530 .align 5
531
532 .L_kernel_syscall:
533 ;
534 ; Call a function that can print out our syscall info
535 ; Note that we don t care about any volatiles yet
536 ;
537 lwz r10,ACT_TASK(r13) ; Get our task
538 lwz r0,saver0+4(r30)
539 lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
540 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
541 ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
542 lwz r8,0(r8) ; Get kdebug_enable
543
544 lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
545 neg r31,r0 ; Make this positive
546 slwi r27,r31,MACH_TRAP_OFFSET_POW2 ; Convert index to offset
547 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
548 addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
549 cmplwi r8,0 ; Is kdebug_enable non-zero
550 stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
551 bne-- ksystrace ; yes, tracing enabled
552
553 cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
554 add r31,r27,r28 ; Point right to the syscall table entry
555
556 bge-- .L_call_server_syscall_exception ; The syscall number is invalid
557
558 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
559
560 ;
561 ; NOTE: We do not support more than 8 parameters for PPC. The only
562 ; system call to use more than 8 is mach_msg_overwrite_trap and it
563 ; uses 9. We pass a 0 in as number 9.
564 ;
565 lwz r3,saver3+4(r30) ; Restore r3
566 lwz r4,saver4+4(r30) ; Restore r4
567 lwz r5,saver5+4(r30) ; Restore r5
568 mtctr r0 ; Set the function call address
569 lwz r6,saver6+4(r30) ; Restore r6
570 lwz r7,saver7+4(r30) ; Restore r7
571 lwz r8,saver8+4(r30) ; Restore r8
572 li r0,0 ; Clear this out
573 lwz r9,saver9+4(r30) ; Restore r9
574 lwz r10,saver10+4(r30) ; Restore r10
575 stw r0,FM_ARG0(r1) ; Clear that 9th parameter just in case some fool uses it
576 bctrl ; perform the actual syscall
577
578 /*
579 * Ok, return from C function, R3 = return value
580 *
581 * get the active thread's PCB pointer and thus pointer to user state
582 * saved state is still in R30 and the active thread is in R16
583 */
584
585 .L_mach_return:
586 mr r31,r16 ; Move the current thread pointer
587 stw r3,saver3+4(r30) ; Stash the return code
588 cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
589 beq- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
590 .L_mach_invalid_arg:
591
592
593 /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON
594 * the syscall may perform a thread_set_syscall_return
595 * followed by a thread_exception_return, ending up
596 * at thread_syscall_return below, with SS_R3 having
597 * been set up already
598 *
599 * When we are here, r31 should point to the current thread,
600 * r30 should point to the current pcb
601 * r3 contains value that we're going to return to the user
602 * which has already been stored back into the save area
603 */
604
605 .L_thread_syscall_ret_check_ast:
606 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
607 mfmsr r12 ; Get the current MSR
608 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
609 andc r12,r12,r10 ; Turn off VEC, FP, and EE
610 mtmsr r12 ; Turn interruptions off
611
612 mfsprg r10,0 ; Get the per_processor block
613
614 /* Check to see if there's an outstanding AST */
615
616 lwz r4,PP_NEED_AST(r10) ; Get the pointer to the ast requests
617 lwz r4,0(r4) ; Get the flags
618 cmpi cr0,r4, 0 ; Any pending asts?
619 beq++ cr0,.L_syscall_no_ast ; Nope...
620
621 /* Yes there is, call ast_taken
622 * pretending that the user thread took an AST exception here,
623 * ast_taken will save all state and bring us back here
624 */
625
626 #if DEBUG
627 /* debug assert - make sure that we're not returning to kernel */
628 lwz r3,savesrr1+4(r30)
629 andi. r3,r3,MASK(MSR_PR)
630 bne++ scrnotkern ; returning to user level, check
631
632 lis r0,hi16(Choke) ; Choke code
633 ori r0,r0,lo16(Choke) ; and the rest
634 li r3,failContext ; Bad state code
635 sc ; System ABEND
636
637 scrnotkern:
638 #endif /* DEBUG */
639
640 li r3,AST_ALL ; Set ast flags
641 li r4,1 ; Set interrupt allowed
642 bl EXT(ast_taken) ; Process the pending ast
643 b .L_thread_syscall_ret_check_ast ; Go see if there was another...
644
645 .L_mach_invalid_ret:
646 /*
647 * need to figure out why we got an KERN_INVALID_ARG
648 * if it was due to a non-existent system call
649 * then we want to throw an exception... otherwise
650 * we want to pass the error code back to the caller
651 */
652 lwz r0,saver0+4(r30) ; reload the original syscall number
653 neg r28,r0 ; Make this positive
654 slwi r27,r28,MACH_TRAP_OFFSET_POW2 ; Convert index to offset
655 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
656 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
657 add r28,r27,r28 ; Point right to the syscall table entry
658 lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
659 lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
660 ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
661 cmpw cr0,r27,r28 ; Check if this is an invalid system call
662 beq-- .L_call_server_syscall_exception ; We have a bad system call
663 b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG
664
665
666 /* thread_exception_return returns to here, almost all
667 * registers intact. It expects a full context restore
668 * of what it hasn't restored itself (ie. what we use).
669 *
670 * In particular for us,
671 * we still have r31 points to the current thread,
672 * r30 points to the current pcb
673 */
674
675 .align 5
676
677 .L_syscall_no_ast:
678 .L_thread_syscall_return:
679
680 mr r3,r30 ; Get savearea to the correct register for common exit
681
682 lwz r11,SAVflags(r30) ; Get the flags
683 lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
684 lwz r4,SAVprev+4(r30) ; Get the previous save area
685 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
686 mfsprg r8,1 ; Now find the current activation
687 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
688 stw r11,SAVflags(r30) ; Stick back the flags
689 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
690 stw r4,ACT_MACT_PCB(r8) ; Save previous save area
691 b chkfac ; Go end it all...
692
693 /*
694 * thread_exception_return()
695 *
696 * Return to user mode directly from within a system call.
697 */
698
699 .align 5
700 .globl EXT(thread_bootstrap_return)
701 LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS....
702
703 .globl EXT(thread_exception_return)
704 LEXT(thread_exception_return) ; Directly return to user mode
705
706 .L_thread_exc_ret_check_ast:
707 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
708 mfmsr r3 ; Get the MSR
709 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
710 andc r3,r3,r10 ; Turn off VEC, FP, and EE
711 mtmsr r3 ; Disable interrupts
712
713 /* Check to see if there's an outstanding AST */
714 /* We don't bother establishing a call frame even though CHECK_AST
715 can invoke ast_taken(), because it can just borrow our caller's
716 frame, given that we're not going to return.
717 */
718
719 mfsprg r10,0 ; Get the per_processor block
720 lwz r4,PP_NEED_AST(r10)
721 lwz r4,0(r4)
722 cmpi cr0,r4, 0
723 beq+ cr0,.L_exc_ret_no_ast
724
725 /* Yes there is, call ast_taken
726 * pretending that the user thread took an AST exception here,
727 * ast_taken will save all state and bring us back here
728 */
729
730 li r3,AST_ALL
731 li r4,1
732
733 bl EXT(ast_taken)
734 b .L_thread_exc_ret_check_ast ; check for a second AST (rare)
735
736 /* arriving here, interrupts should be disabled */
737 /* Get the active thread's PCB pointer to restore regs
738 */
739 .L_exc_ret_no_ast:
740
741 mfsprg r30,1 ; Get the currrent activation
742 lwz r31,ACT_THREAD(r30) ; Get the current thread
743
744 lwz r30,ACT_MACT_PCB(r30)
745 mr. r30,r30 ; Is there any context yet?
746 beq- makeDummyCtx ; No, hack one up...
747 #if DEBUG
748 /*
749 * debug assert - make sure that we're not returning to kernel
750 * get the active thread's PCB pointer and thus pointer to user state
751 */
752
753 lwz r3,savesrr1+4(r30)
754 andi. r3,r3,MASK(MSR_PR)
755 bne+ ret_user2 ; We are ok...
756
757 lis r0,hi16(Choke) ; Choke code
758 ori r0,r0,lo16(Choke) ; and the rest
759 li r3,failContext ; Bad state code
760 sc ; System ABEND
761
762 ret_user2:
763 #endif /* DEBUG */
764
765 /* If the system call flag isn't set, then we came from a trap,
766 * so warp into the return_from_trap (thread_return) routine,
767 * which takes PCB pointer in R3, not in r30!
768 */
769 lwz r0,SAVflags(r30) ; Grab the savearea flags
770 andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall?
771 mr r3,r30 ; Copy pcb pointer into r3 in case we need it
772 beq-- cr0,thread_return ; Nope, must be a thread return...
773 b .L_thread_syscall_return ; Join up with the system call return...
774
775 ;
776 ; This is where we handle someone trying who did a thread_create followed
777 ; by a thread_resume with no intervening thread_set_state. Just make an
778 ; empty context, initialize it to trash and let em execute at 0...
779 ;
780
781 .align 5
782
783 makeDummyCtx:
784 bl EXT(save_get) ; Get a save_area
785 li r4,SAVgeneral ; Get the general context type
786 li r0,0 ; Get a 0
787 stb r4,SAVflags+2(r3) ; Set type
788 addi r2,r3,savefpscr+4 ; Point past what we are clearing
789 mr r4,r3 ; Save the start
790
791 cleardummy: stw r0,0(r4) ; Clear stuff
792 addi r4,r4,4 ; Next word
793 cmplw r4,r2 ; Still some more?
794 blt+ cleardummy ; Yeah...
795
796 lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR
797 ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part
798 stw r2,savesrr1+4(r3) ; Set the default user MSR
799
800 b thread_return ; Go let em try to execute, hah!
801
802 /*
803 * ihandler(type)
804 *
805 * ENTRY: VM switched ON
806 * Interrupts OFF
807 * R3 contains exception code
808 * R4 points to the saved context (virtual address)
809 * Everything is saved in savearea
810 *
811 */
812
813 .align 5
814 .globl EXT(ihandler)
815 LEXT(ihandler) ; Interrupt handler */
816
817 /*
818 * get the value of istackptr, if it's zero then we're already on the
819 * interrupt stack.
820 */
821
822 lwz r10,savesrr1+4(r4) ; Get SRR1
823 lwz r7,savevrsave(r4) ; Get the VRSAVE register
824 mfsprg r25,0 ; Get the per_proc block
825 li r14,0 ; Zero this for now
826 rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
827 lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack
828 mfsprg r13,1 ; Get the current thread
829 li r16,0 ; Zero this for now
830
831 beq+ ivecoff ; Vector off, do not save vrsave...
832 stw r7,liveVRS(r25) ; Set the live value
833
834 ivecoff: li r0,0 ; Get a constant 0
835 rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
836 mr. r1,r1 ; Is it active?
837 cmplwi cr2,r5,0 ; cr2_eq == 1 if yes
838 lwz r16,ACT_THREAD(r13) ; Get the shuttle
839 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
840 lwz r9,saver1+4(r4) ; Pick up the rupt time stack
841 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
842 stw r13,SAVact(r4) ; Point the savearea at its activation
843 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
844 beq cr2,ifromk
845 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
846
847 ifromk: bne .L_istackfree ; Nope...
848
849 /* We're already on the interrupt stack, get back the old
850 * stack pointer and make room for a frame
851 */
852
853 lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack
854 addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check
855 subi r1,r9,FM_REDZONE ; Back up beyond the red zone
856 sub r5,r5,r10 ; Get displacement into stack
857 cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid?
858 blt+ ihsetback ; The stack is ok...
859
860 lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
861 subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
862 sub r5,r1,r5 ; Get displacement into debug stack
863 cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
864 blt+ cr2,ihsetback ; Yeah, that is ok too...
865
866 lis r0,hi16(Choke) ; Choke code
867 ori r0,r0,lo16(Choke) ; and the rest
868 li r3,failStack ; Bad stack code
869 sc ; System ABEND
870
871 intUnalignedStk:
872 lis r0,hi16(Choke) ; Choke code
873 ori r0,r0,lo16(Choke) ; and the rest
874 li r3,failUnalignedStk ; Unaligned stack code
875 sc ; System ABEND
876
877 .align 5
878
879 .L_istackfree:
880 rlwinm. r0,r1,0,28,31 ; Check if stack is aligned (and get 0)
881 lwz r10,SAVflags(r4) ; Get savearea flags
882 bne-- intUnalignedStk ; Stack is unaligned...
883 stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use
884 oris r10,r10,hi16(SAVrststk) ; Indicate we reset stack when we return from this one
885 stw r10,SAVflags(r4) ; Stick it back
886
887 /*
888 * To summarize, when we reach here, the state has been saved and
889 * the stack is marked as busy. We now generate a small
890 * stack frame with backpointers to follow the calling
891 * conventions. We set up the backpointers to the trapped
892 * routine allowing us to backtrace.
893 */
894
895 ihsetback: subi r1,r1,FM_SIZE ; Make a new frame
896 stw r9,FM_BACKPTR(r1) ; Point back to previous stackptr
897
898 #if VERIFYSAVE
899 beq- cr1,ihbootnover ; (TEST/DEBUG)
900 bl versave ; (TEST/DEBUG)
901 ihbootnover: ; (TEST/DEBUG)
902 #endif
903
904 #if DEBUG
905 /* If debugging, we need two frames, the first being a dummy
906 * which links back to the trapped routine. The second is
907 * that which the C routine below will need
908 */
909 lwz r5,savesrr0+4(r4) ; Get interrupt address
910 stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value
911 stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine
912 #endif /* DEBUG */
913
914 lwz r5,savedsisr(r4) ; Get the DSISR
915 lwz r6,savedar+4(r4) ; Get the DAR
916
917 bl EXT(interrupt)
918
919
920 /* interrupt() returns a pointer to the saved state in r3
921 *
922 * Ok, back from C. Disable interrupts while we restore things
923 */
924 .globl EXT(ihandler_ret)
925
926 LEXT(ihandler_ret) ; Marks our return point from debugger entry
927
928 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
929 mfmsr r0 ; Get our MSR
930 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
931 andc r0,r0,r10 ; Turn off VEC, FP, and EE
932 mtmsr r0 ; Make sure interrupts are disabled
933 mfsprg r10,0 ; Get the per_proc block
934
935 lwz r7,SAVflags(r3) ; Pick up the flags
936 mfsprg r8,1 ; Get the current thread
937 lwz r9,SAVprev+4(r3) ; Get previous save area
938 cmplwi cr1,r8,0 ; Are we still initializing?
939 lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
940 lwz r8,THREAD_TOP_ACT(r8) ; Pick up the active thread
941 andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
942 stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea
943 mr r4,r3 ; Move the savearea pointer
944 beq .L_no_int_ast2 ; Get going if not the top-o-stack...
945
946
947 /* We're the last frame on the stack. Restore istackptr to empty state.
948 *
949 * Check for ASTs if one of the below is true:
950 * returning to user mode
951 * returning to a kloaded server
952 */
953 lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value
954 andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one
955 stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr
956 lwz r3,ACT_PREEMPT_CNT(r8) ; Get preemption level
957 stw r7,SAVflags(r4) ; Save the flags
958 cmplwi r3, 0 ; Check for preemption
959 bne .L_no_int_ast ; Do not preempt if level is not zero
960 andi. r6,r12,MASK(MSR_PR) ; privilege mode
961 lwz r11,PP_NEED_AST(r10) ; Get the AST request address
962 lwz r11,0(r11) ; Get the request
963 beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check
964 li r3,T_AST ; Assume the worst
965 mr. r11,r11 ; Are there any pending?
966 beq .L_no_int_ast ; Nope...
967 b .L_call_thandler
968
969 .L_kernel_int_ast:
970 andi. r11,r11,AST_URGENT ; Do we have AST_URGENT?
971 li r3,T_PREEMPT ; Assume the worst
972 beq .L_no_int_ast ; Nope...
973
974 /*
975 * There is a pending AST. Massage things to make it look like
976 * we took a trap and jump into the trap handler. To do this
977 * we essentially pretend to return from the interrupt but
978 * at the last minute jump into the trap handler with an AST
979 * trap instead of performing an rfi.
980 */
981
982 .L_call_thandler:
983 stw r3,saveexception(r4) ; Set the exception code to T_AST/T_PREEMPT
984 b EXT(thandler) ; We need to preempt so treat like a trap...
985
986 .L_no_int_ast:
987 mr r3,r4 ; Get into the right register for common code
988
989 .L_no_int_ast2:
990 rlwinm r7,r7,0,15,13 ; Clear the syscall flag
991 li r4,0 ; Assume for a moment that we are in init
992 stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag
993 beq-- cr1,chkfac ; Jump away if we are in init...
994
995 lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker
996
997
998 ;
999 ; This section is common to all exception exits. It throws away vector
1000 ; and floating point saveareas as the exception level of a thread is
1001 ; exited.
1002 ;
1003 ; It also enables the facility if its context is live
1004 ; Requires:
1005 ; R3 = Savearea to be released (virtual)
1006 ; R4 = New top of savearea stack (could be 0)
1007 ; R8 = pointer to activation
1008 ; R10 = per_proc block
1009 ;
1010 ; Note that barring unforseen crashes, there is no escape from this point
1011 ; on. We WILL call exception_exit and launch this context. No worries
1012 ; about preemption or interruptions here.
1013 ;
1014 ; Note that we will set up R26 with whatever context we will be launching,
1015 ; so it will indicate the current, or the deferred it it is set and we
1016 ; are going to user state. CR2_eq will be set to indicate deferred.
1017 ;
1018
1019 chkfac: lwz r29,savesrr1+4(r3) ; Get the current MSR
1020 mr. r28,r8 ; Are we still in boot?
1021 mr r31,r10 ; Move per_proc address
1022 mr r30,r4 ; Preserve new level
1023 mr r27,r3 ; Save the old level
1024 beq-- chkenax ; Yeah, skip it all...
1025
1026 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state?
1027
1028 lwz r20,curctx(r28) ; Get our current context
1029 lwz r26,deferctx(r28) ; Get any deferred context switch
1030 li r0,1 ; Get set to hold off quickfret
1031 rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now
1032 lwz r21,FPUlevel(r20) ; Get the facility level
1033 cmplwi cr2,r26,0 ; Are we going into a deferred context later?
1034 rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now
1035 crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred
1036 lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number
1037 cmplw r27,r21 ; Are we returning from the active level?
1038 stw r0,holdQFret(r31) ; Make sure we hold off releasing quickfret
1039 bne++ fpuchkena ; Nope...
1040
1041 ;
1042 ; First clean up any live context we are returning from
1043 ;
1044
1045 lwz r22,FPUcpu(r20) ; Get CPU this context was last dispatched on
1046
1047 stw r19,FPUcpu(r20) ; Claim context for us
1048
1049 eieio ; Make sure this gets out before owner clear
1050
1051 #if ppSize != 4096
1052 #error per_proc_info is not 4k in size
1053 #endif
1054
1055 lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc
1056 slwi r22,r22,12 ; FInd offset to the owner per_proc
1057 ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc
1058 li r24,FPUowner ; Displacement to FPU owner
1059 add r22,r23,r22 ; Point to the owner per_proc
1060
1061 fpuinvothr: lwarx r23,r24,r22 ; Get the owner
1062
1063 sub r0,r23,r20 ; Subtract one from the other
1064 sub r21,r20,r23 ; Subtract the other from the one
1065 or r21,r21,r0 ; Combine them
1066 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1067 and r23,r23,r21 ; Make 0 if same, unchanged if not
1068 stwcx. r23,r24,r22 ; Try to invalidate it
1069 bne-- fpuinvothr ; Try again if there was a collision...
1070
1071 isync
1072
1073 ;
1074 ; Now if there is a savearea associated with the popped context, release it.
1075 ; Either way, pop the level to the top stacked context.
1076 ;
1077
1078 lwz r22,FPUsave(r20) ; Get pointer to the first savearea
1079 li r21,0 ; Assume we popped all the way out
1080 mr. r22,r22 ; Is there anything there?
1081 beq++ fpusetlvl ; No, see if we need to enable...
1082
1083 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1084 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1085 bne fpusetlvl ; No, leave as is...
1086
1087 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1088 li r21,0 ; Assume we popped all the way out
1089 mr. r24,r24 ; Any more context stacked?
1090 beq-- fpuonlyone ; Nope...
1091 lwz r21,SAVlevel(r24) ; Get the level associated with save
1092
1093 fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea
1094
1095 rlwinm r3,r22,0,0,19 ; Find main savearea header
1096
1097 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1098 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1099 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1100 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1101 stw r8,SAVprev(r22) ; Link the old in (top)
1102 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1103 xor r3,r22,r3 ; Convert to physical
1104 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1105 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1106
1107 #if FPVECDBG
1108 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1109 li r2,0x3301 ; (TEST/DEBUG)
1110 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1111 sc ; (TEST/DEBUG)
1112 #endif
1113
1114 fpusetlvl: stw r21,FPUlevel(r20) ; Save the level
1115
1116 ;
1117 ; Here we check if we are at the right level
1118 ; We need to check the level we are entering, not the one we are exiting.
1119 ; Therefore, we will use the defer level if it is non-zero and we are
1120 ; going into user state.
1121 ;
1122
1123 fpuchkena: bt-- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up...
1124 mr r26,r20 ; Use the non-deferred value
1125
1126 fpuhasdfrd:
1127 #if 0
1128 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) Going into user state?
1129 beq fpunusrstt ; (TEST/DEBUG) Nope...
1130 lwz r23,FPUlevel(r26) ; (TEST/DEBUG) Get the level ID
1131 lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
1132 mr. r23,r23 ; (TEST/DEBUG) Should be level 0
1133 beq++ fpulvl0 ; (TEST/DEBUG) Yes...
1134 BREAKPOINT_TRAP ; (TEST/DEBUG)
1135
1136 fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
1137 beq fpunusrstt ; (TEST/DEBUG) No...
1138 lwz r23,SAVlevel(r24) ; (TEST/DEBUG) Get level of context
1139 lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
1140 mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
1141 beq++ fpulvl0b ; (TEST/DEBUG) Yes...
1142 BREAKPOINT_TRAP ; (TEST/DEBUG)
1143
1144 fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
1145 beq++ fpunusrstt ; (TEST/DEBUG) Nope...
1146 BREAKPOINT_TRAP ; (TEST/DEBUG)
1147
1148 fpunusrstt: ; (TEST/DEBUG)
1149 #endif
1150
1151 lwz r21,FPUowner(r31) ; Get the ID of the live context
1152 lwz r23,FPUlevel(r26) ; Get the level ID
1153 lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on
1154 cmplw cr3,r26,r21 ; Do we have the live context?
1155 cmplw r30,r23 ; Are we about to launch the live level?
1156 bne-- cr3,chkvec ; No, can not possibly enable...
1157 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1158 bne-- chkvec ; No, not live...
1159 bne-- cr1,chkvec ; No, wrong cpu, have to enable later....
1160
1161 lwz r24,FPUsave(r26) ; Get the first savearea
1162 mr. r24,r24 ; Any savearea?
1163 beq++ fpuena ; Nope...
1164 lwz r25,SAVlevel(r24) ; Get the level of savearea
1165 lwz r0,SAVprev+4(r24) ; Get the previous
1166 cmplw r30,r25 ; Is savearea for the level we are launching?
1167 bne++ fpuena ; No, just go enable...
1168
1169 stw r0,FPUsave(r26) ; Pop the chain
1170
1171 rlwinm r3,r24,0,0,19 ; Find main savearea header
1172
1173 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1174 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1175 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1176 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1177 stw r8,SAVprev(r24) ; Link the old in (top)
1178 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1179 xor r3,r24,r3 ; Convert to physical
1180 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1181 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1182
1183 #if FPVECDBG
1184 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1185 li r2,0x3302 ; (TEST/DEBUG)
1186 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1187 sc ; (TEST/DEBUG)
1188 #endif
1189
1190 fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility
1191
1192 chkvec:
1193
1194 lwz r21,VMXlevel(r20) ; Get the facility level
1195
1196 cmplw r27,r21 ; Are we returning from the active level?
1197 bne+ vmxchkena ; Nope...
1198
1199
1200 ;
1201 ; First clean up any live context we are returning from
1202 ;
1203
1204 lwz r22,VMXcpu(r20) ; Get CPU this context was last dispatched on
1205
1206 stw r19,VMXcpu(r20) ; Claim context for us
1207
1208 eieio ; Make sure this gets out before owner clear
1209
1210 lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc
1211 slwi r22,r22,12 ; Find offset to the owner per_proc
1212 ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc
1213 li r24,VMXowner ; Displacement to VMX owner
1214 add r22,r23,r22 ; Point to the owner per_proc
1215
1216 vmxinvothr: lwarx r23,r24,r22 ; Get the owner
1217
1218 sub r0,r23,r20 ; Subtract one from the other
1219 sub r21,r20,r23 ; Subtract the other from the one
1220 or r21,r21,r0 ; Combine them
1221 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1222 and r23,r23,r21 ; Make 0 if same, unchanged if not
1223 stwcx. r23,r24,r22 ; Try to invalidate it
1224 bne-- vmxinvothr ; Try again if there was a collision...
1225
1226 isync
1227
1228 ;
1229 ; Now if there is a savearea associated with the popped context, release it.
1230 ; Either way, pop the level to the top stacked context.
1231 ;
1232
1233 lwz r22,VMXsave(r20) ; Get pointer to the first savearea
1234 li r21,0 ; Assume we popped all the way out
1235 mr. r22,r22 ; Is there anything there?
1236 beq++ vmxsetlvl ; No, see if we need to enable...
1237
1238 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1239 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1240 bne vmxsetlvl ; No, leave as is...
1241
1242 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1243 li r21,0 ; Assume we popped all the way out
1244 mr. r24,r24 ; Any more context?
1245 beq-- vmxonlyone ; Nope...
1246 lwz r21,SAVlevel(r24) ; Get the level associated with save
1247
1248 vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea
1249
1250 rlwinm r3,r22,0,0,19 ; Find main savearea header
1251
1252 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1253 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1254 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1255 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1256 stw r8,SAVprev(r22) ; Link the old in (top)
1257 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1258 xor r3,r24,r3 ; Convert to physical
1259 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1260 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1261
1262 #if FPVECDBG
1263 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1264 li r2,0x3401 ; (TEST/DEBUG)
1265 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1266 sc ; (TEST/DEBUG)
1267 #endif
1268
1269 vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level
1270
1271 ;
1272 ; Here we check if we are at the right level
1273 ;
1274
1275 vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context
1276 lwz r23,VMXlevel(r26) ; Get the level ID
1277 cmplw r26,r21 ; Do we have the live context?
1278 lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on
1279 bne-- setena ; No, can not possibly enable...
1280 cmplw r30,r23 ; Are we about to launch the live level?
1281 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1282 bne-- setena ; No, not live...
1283 bne-- cr1,setena ; No, wrong cpu, have to enable later....
1284
1285 lwz r24,VMXsave(r26) ; Get the first savearea
1286 mr. r24,r24 ; Any savearea?
1287 beq++ vmxena ; Nope...
1288 lwz r25,SAVlevel(r24) ; Get the level of savearea
1289 lwz r0,SAVprev+4(r24) ; Get the previous
1290 cmplw r30,r25 ; Is savearea for the level we are launching?
1291 bne++ vmxena ; No, just go enable...
1292
1293 stw r0,VMXsave(r26) ; Pop the chain
1294
1295 rlwinm r3,r24,0,0,19 ; Find main savearea header
1296
1297 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1298 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1299 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1300 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1301 stw r8,SAVprev(r24) ; Link the old in (top)
1302 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1303 xor r3,r24,r3 ; Convert to physical
1304 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1305 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1306
1307 #if FPVECDBG
1308 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1309 li r2,0x3402 ; (TEST/DEBUG)
1310 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1311 sc ; (TEST/DEBUG)
1312 #endif
1313
1314 vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility
1315
1316 setena: lwz r18,cioSpace(r28) ; Get the space ID in case we are launching user
1317 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state?
1318 li r0,0 ; Get set to release quickfret holdoff
1319 crmove cr7_eq,cr0_eq ; Remember if we are going to user state
1320 rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats
1321 lwz r19,deferctx(r28) ; Get any deferred facility context switch
1322 rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector
1323 stw r29,savesrr1+4(r27) ; Turn facility on or off
1324 stw r0,holdQFret(r31) ; Release quickfret
1325 oris r18,r18,hi16(cioSwitchAway) ; Set the switch-away bit in case we go to user
1326
1327 beq setenaa ; Neither float nor vector turned on....
1328
1329 lwz r5,ACT_MACT_SPF(r28) ; Get activation copy
1330 lwz r6,spcFlags(r31) ; Get per_proc copy
1331 or r5,r5,r20 ; Set vector/float changed bits in activation
1332 or r6,r6,r20 ; Set vector/float changed bits in per_proc
1333 stw r5,ACT_MACT_SPF(r28) ; Set activation copy
1334 stw r6,spcFlags(r31) ; Set per_proc copy
1335
1336 setenaa: mfdec r24 ; Get decrementer
1337 bf+ cr2_eq,nodefer ; No deferred to switch to...
1338
1339 li r20,0 ; Clear this
1340 stw r26,curctx(r28) ; Make the facility context current
1341 stw r20,deferctx(r28) ; Clear deferred context
1342
1343 nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer
1344 mr. r24,r24 ; See if it has popped already...
1345 lwz r23,qactTimer+4(r28) ; Get low order qact timer
1346 ble- chkifuser ; We have popped or are just about to...
1347
1348 segtb: mftbu r20 ; Get the upper time base
1349 mftb r21 ; Get the low
1350 mftbu r19 ; Get upper again
1351 or. r0,r22,r23 ; Any time set?
1352 cmplw cr1,r20,r19 ; Did they change?
1353 beq++ chkifuser ; No time set....
1354 bne-- cr1,segtb ; Timebase ticked, get them again...
1355
1356 subfc r6,r21,r23 ; Subtract current from qact time
1357 li r0,0 ; Make a 0
1358 subfe r5,r20,r22 ; Finish subtract
1359 subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise
1360 andc. r12,r5,r0 ; Set 0 if qact has passed
1361 andc r13,r6,r0 ; Set 0 if qact has passed
1362 bne chkifuser ; If high order is non-zero, this is too big for a decrementer
1363 cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on)
1364 bge++ chkifuser ; No, do not reset decrementer...
1365
1366 mtdec r13 ; Set our value
1367
1368 chkifuser: beq-- cr7,chkenax ; Skip this if we are going to kernel...
1369 stw r18,cioSpace(r28) ; Half-invalidate to force MapUserAddressSpace to reload SRs
1370
1371 chkenax:
1372
1373
1374 #if DEBUG
1375 lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore
1376 mfsprg r21, 1 ; (TEST/DEBUG) with the current act.
1377 cmpwi r21,0 ; (TEST/DEBUG)
1378 beq-- yeswereok ; (TEST/DEBUG)
1379 cmplw r21,r20 ; (TEST/DEBUG)
1380 beq++ yeswereok ; (TEST/DEBUG)
1381
1382 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1383 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1384 mr r21,r27 ; (TEST/DEBUG) Save the savearea address
1385 li r3,failContext ; (TEST/DEBUG) Bad state code
1386 sc ; (TEST/DEBUG) System ABEND
1387
1388 yeswereok:
1389 #endif
1390
1391 mr r3,r27 ; Pass savearea back
1392 b EXT(exception_exit) ; We are all done now...
1393
1394
1395
1396 ;
1397 ; Null PPC call - performance testing, does absolutely nothing
1398 ;
1399
1400 .align 5
1401
1402 .globl EXT(ppcNull)
1403
1404 LEXT(ppcNull)
1405
1406 li r3,-1 ; Make sure we test no asts
1407 blr
1408
1409
1410 ;
1411 ; Instrumented null PPC call - performance testing, does absolutely nothing
1412 ; Forces various timestamps to be returned.
1413 ;
1414
1415 .align 5
1416
1417 .globl EXT(ppcNullinst)
1418
1419 LEXT(ppcNullinst)
1420
1421 li r3,-1 ; Make sure we test no asts
1422 blr
1423
1424
1425 /*
1426 * Here's where we handle the fastpath stuff
1427 * We'll do what we can here because registers are already
1428 * loaded and it will be less confusing that moving them around.
1429 * If we need to though, we'll branch off somewhere's else.
1430 *
1431 * Registers when we get here:
1432 *
1433 * r0 = syscall number
1434 * r4 = savearea/pcb
1435 * r13 = activation
1436 * r14 = previous savearea (if any)
1437 * r16 = thread
1438 * r25 = per_proc
1439 */
1440
1441 .align 5
1442
1443 fastpath: cmplwi cr3,r0,0x7FF5 ; Is this a null fastpath?
1444 beq-- cr3,fastexutl ; Yes, bail fast...
1445 cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber?
1446 bnelr-- cr3 ; Not a fast path...
1447
1448 /*
1449 * void cthread_set_self(cproc_t p)
1450 *
1451 * set's thread state "user_value"
1452 *
1453 * This op is invoked as follows:
1454 * li r0, CthreadSetSelfNumber // load the fast-trap number
1455 * sc // invoke fast-trap
1456 * blr
1457 *
1458 */
1459
1460 CthreadSetSelfNumber:
1461
1462 lwz r5,saver3+4(r4) /* Retrieve the self number */
1463 stw r5,CTHREAD_SELF(r13) /* Remember it */
1464 stw r5,UAW(r25) /* Prime the per_proc_info with it */
1465
1466
1467 .globl EXT(fastexit)
1468 EXT(fastexit):
1469 fastexutl: mr r3,r4 ; Pass back savearea
1470 b EXT(exception_exit) ; Go back to the caller...
1471
1472
1473 /*
1474 * Here's where we check for a hit on the Blue Box Assist
1475 * Most registers are non-volatile, so be careful here. If we don't
1476 * recognize the trap instruction we go back for regular processing.
1477 * Otherwise we transfer to the assist code.
1478 */
1479
1480 .align 5
1481
1482 checkassist:
1483 lwz r0,saveexception(r4) ; Get the exception code
1484 lwz r23,savesrr1+4(r4) ; Get the interrupted MSR
1485 lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area
1486 mtcrf 0x18,r23 ; Check what SRR1 says
1487 lwz r24,ACT_MACT_BTS(r13) ; Get the table start
1488 cmplwi r0,T_AST ; Check for T_AST trap
1489 lwz r27,savesrr0+4(r4) ; Get trapped address
1490 crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state
1491 sub r24,r27,r24 ; See how far into it we are
1492 cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state
1493 cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list?
1494 cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range
1495 btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range
1496 b EXT(atomic_switch_trap) ; Go to the assist...
1497
1498 ;
1499 ; Virtual Machine Monitor
1500 ; Here is where we exit from the emulated context
1501 ; Note that most registers get trashed here
1502 ; R3 and R30 are preserved across the call and hold the activation
1503 ; and savearea respectivily.
1504 ;
1505
1506 .align 5
1507
1508 exitFromVM: mr r30,r4 ; Get the savearea
1509 mr r3,r13 ; Get the activation
1510
1511 b EXT(vmm_exit) ; Do it to it
1512
1513 .align 5
1514 .globl EXT(retFromVM)
1515
1516 LEXT(retFromVM)
1517 mfsprg r10,0 ; Restore the per_proc info
1518 mr r8,r3 ; Get the activation
1519 lwz r4,SAVprev+4(r30) ; Pick up the previous savearea
1520 mr r3,r30 ; Put savearea in proper register for common code
1521 lwz r11,SAVflags(r30) ; Get the flags of the current savearea
1522 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
1523 lwz r1,ACT_THREAD(r8) ; and the active thread
1524 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
1525
1526 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
1527
1528 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
1529 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
1530 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
1531 b chkfac ; Go end it all...
1532
1533
1534 ;
1535 ; chandler (note: not a candle maker or tallow merchant)
1536 ;
1537 ; Here is the system choke handler. This is where the system goes
1538 ; to die.
1539 ;
1540 ; We get here as a result of a T_CHOKE exception which is generated
1541 ; by the Choke firmware call or by lowmem_vectors when it detects a
1542 ; fatal error. Examples of where this may be used is when we detect
1543 ; problems in low-level mapping chains, trashed savearea free chains,
1544 ; or stack guardpage violations.
1545 ;
1546 ; Note that we can not set a back chain in the stack when we come
1547 ; here because we are probably here because the chain was corrupt.
1548 ;
1549
1550
1551 .align 5
1552 .globl EXT(chandler)
1553 LEXT(chandler) ; Choke handler
1554
1555 li r31,0 ; Get a 0
1556 mfsprg r25,0 ; Get the per_proc
1557 stw r31,traceMask(0) ; Force tracing off right now
1558
1559
1560
1561 lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer
1562 cmpwi r1,-1 ; Are we already choking?
1563 bne chokefirst ; Nope...
1564
1565 chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection...
1566 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1567 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1568 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1569 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1570 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1571 b chokespin ; Spin and hope for an analyzer connection...
1572
1573 chokefirst: li r0,-1 ; Set choke value
1574 mr. r1,r1 ; See if we are on debug stack yet
1575 lwz r10,saver1+4(r4) ;
1576 stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking
1577 bne chokestart ; We are not on the debug stack yet...
1578
1579 lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top
1580 sub r11,r2,r10 ; Get stack depth
1581
1582 cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok
1583 bgt chokespin ; Bad stack pointer or too little left, just die...
1584
1585 subi r1,r10,FM_REDZONE ; Make a red zone
1586
1587 chokestart: li r0,0 ; Get a zero
1588 stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain
1589
1590 bl EXT(SysChoked) ; Call the "C" phase of this
1591 b chokespin ; Should not be here so just go spin...
1592
1593
1594 #if VERIFYSAVE
1595 ;
1596 ; Savearea chain verification
1597 ;
1598
1599 versave:
1600 #if 0
1601 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1602 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1603 lwz r23,0(r22) ; (TEST/DEBUG)
1604 mr. r23,r23 ; (TEST/DEBUG)
1605 beqlr- ; (TEST/DEBUG)
1606 mfsprg r20,0 ; (TEST/DEBUG)
1607 lwz r21,pfAvailable(r20) ; (TEST/DEBUG)
1608 mr. r21,r21 ; (TEST/DEBUG)
1609 bnelr+ ; (TEST/DEBUG)
1610
1611 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1612 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1613 #endif
1614
1615 #if 0
1616 ;; This code is broken and migration will make the matter even worse
1617 ;
1618 ; Make sure that all savearea chains have the right type on them
1619 ;
1620
1621 lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG)
1622 lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1623 ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG)
1624 ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1625 li r20,0 ; (TEST/DEBUG)
1626 lwz r26,0(r27) ; (TEST/DEBUG)
1627 lwz r27,psthreadcnt(r28) ; (TEST/DEBUG)
1628 mr. r26,r26 ; (TEST/DEBUG) Have we locked the test out?
1629 lwz r28,psthreads(r28) ; (TEST/DEBUG)
1630 mflr r31 ; (TEST/DEBUG) Save return
1631 bnelr- ; (TEST/DEBUG) Test already triggered, skip...
1632 b fckgo ; (TEST/DEBUG) Join up...
1633
1634 fcknext: mr. r27,r27 ; (TEST/DEBUG) Any more threads?
1635 bne+ fckxxx ; (TEST/DEBUG) Yes...
1636
1637 mtlr r31 ; (TEST/DEBUG) Restore return
1638 blr ; (TEST/DEBUG) Leave...
1639
1640 fckxxx: lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Get next thread
1641
1642 fckgo: subi r27,r27,1 ; (TEST/DEBUG) Decrement thread count
1643 lwz r24,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) Get activation for the thread
1644 lwz r20,ACT_MACT_PCB(r24) ; (TEST/DEBUG) Get the normal context
1645 li r21,SAVgeneral ; (TEST/DEBUG) Make sure this is all general context
1646 bl versavetype ; (TEST/DEBUG) Check the chain
1647
1648 lwz r20,facctx+FPUsave(r24) ; (TEST/DEBUG) Get regular floating point
1649 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1650 bl versavetype ; (TEST/DEBUG) Check the chain
1651
1652 lwz r20,facctx+VMXsave(r24) ; (TEST/DEBUG) Get regular vector point
1653 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1654 bl versavetype ; (TEST/DEBUG) Check the chain
1655
1656 lwz r29,vmmControl(r24) ; (TEST/DEBUG) Get the virtual machine control blocks
1657 mr. r29,r29 ; (TEST/DEBUG) Are there any?
1658 beq+ fcknext ; (TEST/DEBUG) Nope, next thread...
1659
1660 li r22,kVmmMaxContextsPerThread ; (TEST/DEBUG) Get the number of control blocks
1661 subi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get running start
1662
1663 fcknvmm: subi r22,r22,1 ; (TEST/DEBUG) Do all of them
1664 mr. r22,r22 ; (TEST/DEBUG) Are we all done?
1665 addi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get the next entry
1666 blt- fcknext ; (TEST/DEBUG) Yes, check next thread...
1667
1668 lwz r23,vmmFlags(r29) ; (TEST/DEBUG) Get entry flags
1669 rlwinm. r23,r23,0,0,0 ; (TEST/DEBUG) Is this in use?
1670 beq+ fcknvmm ; (TEST/DEBUG) Not in use...
1671
1672 lwz r20,vmmFacCtx+FPUsave(r29) ; (TEST/DEBUG) Get regular floating point
1673 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1674 bl versavetype ; (TEST/DEBUG) Check the chain
1675
1676 lwz r20,vmmFacCtx+VMXsave(r29) ; (TEST/DEBUG) Get regular vector point
1677 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1678 bl versavetype ; (TEST/DEBUG) Check the chain
1679 b fcknvmm ; (TEST/DEBUG) Get then vmm block...
1680
1681 versavetype:
1682 mr. r20,r20 ; (TEST/DEBUG) Chain done?
1683 beqlr- ; (TEST/DEBUG) Yes...
1684
1685 lwz r23,SAVflags(r20) ; (TEST/DEBUG) Get the flags
1686 rlwinm r23,r23,24,24,31 ; (TEST/DEBUG) Position it
1687 cmplw r23,r21 ; (TEST/DEBUG) Are we the correct type?
1688 beq+ versvok ; (TEST/DEBUG) This one is ok...
1689
1690 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1691 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1692 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1693 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1694
1695 versvok: lwz r20,SAVprev+4(r20) ; (TEST/DEBUG) Get the previous one
1696 b versavetype ; (TEST/DEBUG) Go check its type...
1697 #endif
1698
1699
1700 #endif
Cache object: 42a83da14bbc0ae0b6b72265af79f4d2
|