FreeBSD/Linux Kernel Cross Reference
sys/ppc/l.s
1 #include "mem.h"
2
3 /* use of SPRG registers in save/restore */
4 #define SAVER0 SPRG0
5 #define SAVER1 SPRG1
6 #define SAVELR SPRG2
7 #define SAVEXX SPRG3
8
9 #ifdef ucuconf
10 /* These only exist on the PPC 755: */
11 #define SAVER4 SPRG4
12 #define SAVER5 SPRG5
13 #define SAVER6 SPRG6
14 #define SAVER7 SPRG7
15 #endif /* ucuconf */
16
17 /* special instruction definitions */
18 #define BDNZ BC 16, 0,
19 #define BDNE BC 0, 2,
20 #define MTCRF(r, crm) WORD $((31<<26)|((r)<<21)|(crm<<12)|(144<<1))
21
22 /* #define TLBIA WORD $((31<<26)|(370<<1)) Not implemented on the 603e */
23 #define TLBSYNC WORD $((31<<26)|(566<<1))
24 #define TLBLI(n) WORD $((31<<26)|((n)<<11)|(1010<<1))
25 #define TLBLD(n) WORD $((31<<26)|((n)<<11)|(978<<1))
26
27 /* on some models mtmsr doesn't synchronise enough (eg, 603e) */
28 #define MSRSYNC SYNC
29
30 #define UREGSPACE (UREGSIZE+8)
31
32 TEXT start(SB), $-4
33
34 /*
35 * setup MSR
36 * turn off interrupts
37 * use 0x000 as exception prefix
38 * enable machine check
39 */
40 MOVW MSR, R3
41 MOVW $(MSR_ME|MSR_EE|MSR_IP), R4
42 ANDN R4, R3
43 SYNC
44 MOVW R3, MSR
45 MSRSYNC
46
47 /* except during trap handling, R0 is zero from now on */
48 MOVW $0, R0
49
50 /* setup SB for pre mmu */
51 MOVW $setSB(SB), R2
52 MOVW $KZERO, R3
53 ANDN R3, R2
54
55 /* before this we're not running above KZERO */
56 BL mmuinit0(SB)
57 /* after this we are */
58
59 #ifdef ucuconf
60 MOVW $0x2000000, R4 /* size */
61 MOVW $0, R3 /* base address */
62 RLWNM $0, R3, $~(CACHELINESZ-1), R5
63 CMP R4, $0
64 BLE _dcf1
65 SUB R5, R3
66 ADD R3, R4
67 ADD $(CACHELINESZ-1), R4
68 SRAW $CACHELINELOG, R4
69 MOVW R4, CTR
70 _dcf0: DCBF (R5)
71 ADD $CACHELINESZ, R5
72 BDNZ _dcf0
73 _dcf1:
74 SYNC
75
76 /* BAT0, 3 unused, copy of BAT2 */
77 MOVW SPR(IBATL(2)), R3
78 MOVW R3, SPR(IBATL(0))
79 MOVW SPR(IBATU(2)), R3
80 MOVW R3, SPR(IBATU(0))
81 MOVW SPR(DBATL(2)), R3
82 MOVW R3, SPR(DBATL(0))
83 MOVW SPR(DBATU(2)), R3
84 MOVW R3, SPR(DBATU(0))
85
86 MOVW SPR(IBATL(2)), R3
87 MOVW R3, SPR(IBATL(3))
88 MOVW SPR(IBATU(2)), R3
89 MOVW R3, SPR(IBATU(3))
90 MOVW SPR(DBATL(2)), R3
91 MOVW R3, SPR(DBATL(3))
92 MOVW SPR(DBATU(2)), R3
93 MOVW R3, SPR(DBATU(3))
94 #endif /* ucuconf */
95
96 /* running with MMU on!! */
97
98 /* set R2 to correct value */
99 MOVW $setSB(SB), R2
100
101 /* set up Mach */
102 MOVW $MACHADDR, R(MACH)
103 ADD $(MACHSIZE-8), R(MACH), R1 /* set stack */
104
105 MOVW R0, R(USER) /* up-> set to zero */
106 MOVW R0, 0(R(MACH)) /* machno set to zero */
107
108 BL main(SB)
109
110 RETURN /* not reached */
111
112 /*
113 * on return from this function we will be running in virtual mode.
114 * We set up the Block Address Translation (BAT) registers thus:
115 * 1) first 3 BATs are 256M blocks, starting from KZERO->0
116 * 2) remaining BAT maps last 256M directly
117 */
118 TEXT mmuinit0(SB), $0
119 /* reset all the tlbs */
120 MOVW $64, R3
121 MOVW R3, CTR
122 MOVW $0, R4
123
124 tlbloop:
125 TLBIE R4
126 SYNC
127 ADD $BIT(19), R4
128 BDNZ tlbloop
129 TLBSYNC
130
131 #ifndef ucuconf
132 /* BATs 0 and 1 cover memory from 0x00000000 to 0x20000000 */
133
134 /* KZERO -> 0, IBAT and DBAT, 256 MB */
135 MOVW $(KZERO|(0x7ff<<2)|2), R3
136 MOVW $(PTEVALID|PTEWRITE), R4 /* PTEVALID => Cache coherency on */
137 MOVW R3, SPR(IBATU(0))
138 MOVW R4, SPR(IBATL(0))
139 MOVW R3, SPR(DBATU(0))
140 MOVW R4, SPR(DBATL(0))
141
142 /* KZERO+256M -> 256M, IBAT and DBAT, 256 MB */
143 ADD $(1<<28), R3
144 ADD $(1<<28), R4
145 MOVW R3, SPR(IBATU(1))
146 MOVW R4, SPR(IBATL(1))
147 MOVW R3, SPR(DBATU(1))
148 MOVW R4, SPR(DBATL(1))
149
150 /* FPGABASE -> FPGABASE, DBAT, 16 MB */
151 MOVW $(FPGABASE|(0x7f<<2)|2), R3
152 MOVW $(FPGABASE|PTEWRITE|PTEUNCACHED), R4 /* FPGA memory, don't cache */
153 MOVW R3, SPR(DBATU(2))
154 MOVW R4, SPR(DBATL(2))
155
156 /* IBAT 2 unused */
157 MOVW R0, SPR(IBATU(2))
158 MOVW R0, SPR(IBATL(2))
159
160 /* direct map last block, uncached, (not guarded, doesn't work for BAT), DBAT only */
161 MOVW $(INTMEM|(0x7ff<<2)|2), R3
162 MOVW $(INTMEM|PTEWRITE|PTEUNCACHED), R4 /* Don't set PTEVALID here */
163 MOVW R3, SPR(DBATU(3))
164 MOVW R4, SPR(DBATL(3))
165
166 /* IBAT 3 unused */
167 MOVW R0, SPR(IBATU(3))
168 MOVW R0, SPR(IBATL(3))
169 #else /* ucuconf */
170 /* BAT 2 covers memory from 0x00000000 to 0x10000000 */
171
172 /* KZERO -> 0, IBAT2 and DBAT2, 256 MB */
173 MOVW $(KZERO|(0x7ff<<2)|2), R3
174 MOVW $(PTEVALID|PTEWRITE), R4 /* PTEVALID => Cache coherency on */
175 MOVW R3, SPR(DBATU(2))
176 MOVW R4, SPR(DBATL(2))
177 MOVW R3, SPR(IBATU(2))
178 MOVW R4, SPR(IBATL(2))
179 #endif /* ucuconf */
180
181 /* enable MMU */
182 MOVW LR, R3
183 OR $KZERO, R3
184 MOVW R3, SPR(SRR0) /* Stored PC for RFI instruction */
185 MOVW MSR, R4
186 OR $(MSR_IR|MSR_DR|MSR_RI|MSR_FP), R4
187 MOVW R4, SPR(SRR1)
188 RFI /* resume in kernel mode in caller */
189
190 RETURN
191
192 TEXT kfpinit(SB), $0
193 MOVFL $0, FPSCR(7)
194 MOVFL $0xD, FPSCR(6) /* VE, OE, ZE */
195 MOVFL $0, FPSCR(5)
196 MOVFL $0, FPSCR(3)
197 MOVFL $0, FPSCR(2)
198 MOVFL $0, FPSCR(1)
199 MOVFL $0, FPSCR(0)
200
201 FMOVD $4503601774854144.0, F27
202 FMOVD $0.5, F29
203 FSUB F29, F29, F28
204 FADD F29, F29, F30
205 FADD F30, F30, F31
206 FMOVD F28, F0
207 FMOVD F28, F1
208 FMOVD F28, F2
209 FMOVD F28, F3
210 FMOVD F28, F4
211 FMOVD F28, F5
212 FMOVD F28, F6
213 FMOVD F28, F7
214 FMOVD F28, F8
215 FMOVD F28, F9
216 FMOVD F28, F10
217 FMOVD F28, F11
218 FMOVD F28, F12
219 FMOVD F28, F13
220 FMOVD F28, F14
221 FMOVD F28, F15
222 FMOVD F28, F16
223 FMOVD F28, F17
224 FMOVD F28, F18
225 FMOVD F28, F19
226 FMOVD F28, F20
227 FMOVD F28, F21
228 FMOVD F28, F22
229 FMOVD F28, F23
230 FMOVD F28, F24
231 FMOVD F28, F25
232 FMOVD F28, F26
233 RETURN
234
235 TEXT splhi(SB), $0
236 MOVW LR, R31
237 MOVW R31, 4(R(MACH)) /* save PC in m->splpc */
238 MOVW MSR, R3
239 RLWNM $0, R3, $~MSR_EE, R4
240 SYNC
241 MOVW R4, MSR
242 MSRSYNC
243 RETURN
244
245 TEXT splx(SB), $0
246 /* fall though */
247
248 TEXT splxpc(SB), $0
249 MOVW LR, R31
250 MOVW R31, 4(R(MACH)) /* save PC in m->splpc */
251 MOVW MSR, R4
252 RLWMI $0, R3, $MSR_EE, R4
253 SYNC
254 MOVW R4, MSR
255 MSRSYNC
256 RETURN
257
258 TEXT spllo(SB), $0
259 MOVW MSR, R3
260 OR $MSR_EE, R3, R4
261 SYNC
262 MOVW R4, MSR
263 MSRSYNC
264 RETURN
265
266 TEXT spldone(SB), $0
267 RETURN
268
269 TEXT islo(SB), $0
270 MOVW MSR, R3
271 RLWNM $0, R3, $MSR_EE, R3
272 RETURN
273
274 TEXT setlabel(SB), $-4
275 MOVW LR, R31
276 MOVW R1, 0(R3)
277 MOVW R31, 4(R3)
278 MOVW $0, R3
279 RETURN
280
281 TEXT gotolabel(SB), $-4
282 MOVW 4(R3), R31
283 MOVW R31, LR
284 MOVW 0(R3), R1
285 MOVW $1, R3
286 RETURN
287
288 TEXT touser(SB), $-4
289 MOVW $(UTZERO+32), R5 /* header appears in text */
290 MOVW $(MSR_EE|MSR_PR|MSR_IR|MSR_DR|MSR_RI), R4
291 MOVW R4, SPR(SRR1)
292 MOVW R3, R1
293 MOVW R5, SPR(SRR0)
294 RFI
295
296 TEXT dczap(SB), $-4 /* dczap(virtaddr, count) */
297 MOVW n+4(FP), R4
298 RLWNM $0, R3, $~(CACHELINESZ-1), R5
299 CMP R4, $0
300 BLE dcz1
301 SUB R5, R3
302 ADD R3, R4
303 ADD $(CACHELINESZ-1), R4
304 SRAW $CACHELINELOG, R4
305 MOVW R4, CTR
306 dcz0:
307 DCBI (R5)
308 ADD $CACHELINESZ, R5
309 BDNZ dcz0
310 dcz1:
311 SYNC
312 RETURN
313
314 TEXT dcflush(SB), $-4 /* dcflush(virtaddr, count) */
315 MOVW n+4(FP), R4
316 RLWNM $0, R3, $~(CACHELINESZ-1), R5
317 CMP R4, $0
318 BLE dcf1
319 SUB R5, R3
320 ADD R3, R4
321 ADD $(CACHELINESZ-1), R4
322 SRAW $CACHELINELOG, R4
323 MOVW R4, CTR
324 dcf0: DCBST (R5)
325 ADD $CACHELINESZ, R5
326 BDNZ dcf0
327 dcf1:
328 SYNC
329 RETURN
330
331 TEXT icflush(SB), $-4 /* icflush(virtaddr, count) */
332 MOVW n+4(FP), R4
333 RLWNM $0, R3, $~(CACHELINESZ-1), R5
334 CMP R4, $0
335 BLE icf1
336 SUB R5, R3
337 ADD R3, R4
338 ADD $(CACHELINESZ-1), R4
339 SRAW $CACHELINELOG, R4
340 MOVW R4, CTR
341 icf0: ICBI (R5) /* invalidate the instruction cache */
342 ADD $CACHELINESZ, R5
343 BDNZ icf0
344 ISYNC
345 icf1:
346 RETURN
347
348 TEXT tas(SB), $0
349 MOVW R3, R4
350 MOVW $0xdead, R5
351 tas1:
352 DCBF (R4) /* fix for 603x bug */
353 SYNC
354 LWAR (R4), R3
355 CMP R3, $0
356 BNE tas0
357 STWCCC R5, (R4)
358 BNE tas1
359 EIEIO
360 tas0:
361 SYNC
362 RETURN
363
364 TEXT _xinc(SB), $0 /* void _xinc(long *); */
365 MOVW R3, R4
366 xincloop:
367 DCBF (R4) /* fix for 603x bug */
368 LWAR (R4), R3
369 ADD $1, R3
370 STWCCC R3, (R4)
371 BNE xincloop
372 RETURN
373
374 TEXT _xdec(SB), $0 /* long _xdec(long *); */
375 MOVW R3, R4
376 xdecloop:
377 DCBF (R4) /* fix for 603x bug */
378 LWAR (R4), R3
379 ADD $-1, R3
380 STWCCC R3, (R4)
381 BNE xdecloop
382 RETURN
383
384 TEXT tlbflushall(SB), $0
385 MOVW $TLBENTRIES, R3
386 MOVW R3, CTR
387 MOVW $0, R4
388 ISYNC
389 tlbflushall0:
390 TLBIE R4
391 SYNC
392 ADD $BIT(19), R4
393 BDNZ tlbflushall0
394 TLBSYNC
395 RETURN
396
397 TEXT tlbflush(SB), $0
398 ISYNC
399 TLBIE R3
400 SYNC
401 TLBSYNC
402 RETURN
403
404 TEXT gotopc(SB), $0
405 MOVW R3, CTR
406 MOVW LR, R31 /* for trace back */
407 BR (CTR)
408
409 /* On an imiss, we get here. If we can resolve it, we do.
410 * Otherwise take the real trap. The code at the vector is
411 * MOVW R0, SPR(SAVER0) No point to this, of course
412 * MOVW LR, R0
413 * MOVW R0, SPR(SAVELR)
414 * BL imiss(SB) or dmiss, as the case may be
415 * BL tlbvec(SB)
416 */
417 TEXT imiss(SB), $-4
418 /* Statistics */
419 MOVW $MACHPADDR, R1
420 MOVW 0xc(R1), R3 /* count m->tlbfault */
421 ADD $1, R3
422 MOVW R3, 0xc(R1)
423 MOVW 0x10(R1), R3 /* count m->imiss */
424 ADD $1, R3
425 MOVW R3, 0x10(R1)
426
427 /* Real work */
428 MOVW SPR(HASH1), R1 /* (phys) pointer into the hash table */
429 ADD $BY2PTEG, R1, R2 /* end pointer */
430 MOVW SPR(iCMP), R3 /* pattern to look for */
431 imiss1:
432 MOVW (R1), R0
433 CMP R3, R0
434 BEQ imiss2 /* found the entry */
435 ADD $8, R1
436 CMP R1, R2 /* test end of loop */
437 BNE imiss1 /* Loop */
438 /* Failed to find an entry; take the full trap */
439 MOVW SPR(SRR1), R1
440 MTCRF(1, 0x80) /* restore CR0 bits (they're auto saved in SRR1) */
441 RETURN
442 imiss2:
443 /* Found the entry */
444 MOVW 4(R1), R2 /* Phys addr */
445 MOVW R2, SPR(RPA)
446 MOVW SPR(IMISS), R3
447 TLBLI(3)
448
449 /* Restore Registers */
450 MOVW SPR(SRR1), R1 /* Restore the CR0 field of the CR register from SRR1 */
451 MTCRF(1, 0x80)
452 MOVW SPR(SAVELR), R0
453 MOVW R0, LR
454 RFI
455
456 /* On a data load or store miss, we get here. If we can resolve it, we do.
457 * Otherwise take the real trap
458 */
459 TEXT dmiss(SB), $-4
460 /* Statistics */
461 MOVW $MACHPADDR, R1
462 MOVW 0xc(R1), R3 /* count m->tlbfault */
463 ADD $1, R3
464 MOVW R3, 0xc(R1)
465 MOVW 0x14(R1), R3 /* count m->dmiss */
466 ADD $1, R3
467 MOVW R3, 0x14(R1)
468 /* Real work */
469 MOVW SPR(HASH1), R1 /* (phys) pointer into the hash table */
470 ADD $BY2PTEG, R1, R2 /* end pointer */
471 MOVW SPR(DCMP), R3 /* pattern to look for */
472 dmiss1:
473 MOVW (R1), R0
474 CMP R3, R0
475 BEQ dmiss2 /* found the entry */
476 ADD $8, R1
477 CMP R1, R2 /* test end of loop */
478 BNE dmiss1 /* Loop */
479 /* Failed to find an entry; take the full trap */
480 MOVW SPR(SRR1), R1
481 MTCRF(1, 0x80) /* restore CR0 bits (they're auto saved in SRR1) */
482 RETURN
483 dmiss2:
484 /* Found the entry */
485 MOVW 4(R1), R2 /* Phys addr */
486 MOVW R2, SPR(RPA)
487 MOVW SPR(DMISS), R3
488 TLBLD(3)
489 /* Restore Registers */
490 MOVW SPR(SRR1), R1 /* Restore the CR0 field of the CR register from SRR1 */
491 MTCRF(1, 0x80)
492 MOVW SPR(SAVELR), R0
493 MOVW R0, LR
494 RFI
495
496 /*
497 * When a trap sets the TGPR bit (TLB miss traps do this),
498 * registers get remapped: R0-R31 are temporarily inaccessible,
499 * and Temporary Registers TR0-TR3 are mapped onto R0-R3.
500 * While this bit is set, R4-R31 cannot be used.
501 * The code at the vector has executed this code before
502 * coming to tlbvec:
503 * MOVW R0, SPR(SAVER0) No point to this, of course
504 * MOVW LR, R0
505 * MOVW R0, SPR(SAVELR)
506 * BL tlbvec(SB)
507 * SAVER0 can be reused. We're not interested in the value of TR0
508 */
509 TEXT tlbvec(SB), $-4
510 MOVW MSR, R1
511 RLWNM $0, R1, $~MSR_TGPR, R1 /* Clear the dreaded TGPR bit in the MSR */
512 SYNC
513 MOVW R1, MSR
514 MSRSYNC
515 /* Now the GPRs are what they're supposed to be, save R0 again */
516 MOVW R0, SPR(SAVER0)
517 /* Fall through to trapvec */
518
519 /*
520 * traps force memory mapping off.
521 * the following code has been executed at the exception
522 * vector location
523 * MOVW R0, SPR(SAVER0)
524 * MOVW LR, R0
525 * MOVW R0, SPR(SAVELR)
526 * bl trapvec(SB)
527 *
528 */
529 TEXT trapvec(SB), $-4
530 MOVW LR, R0
531 MOVW R1, SPR(SAVER1)
532 MOVW R0, SPR(SAVEXX) /* vector */
533
534 /* did we come from user space */
535 MOVW SPR(SRR1), R0
536 MOVW CR, R1
537 MOVW R0, CR
538 BC 4, 17, ktrap
539
540 /* switch to kernel stack */
541 MOVW R1, CR
542 MOVW $MACHPADDR, R1 /* PADDR(m->) */
543 MOVW 8(R1), R1 /* m->proc */
544 RLWNM $0, R1, $~KZERO, R1 /* PADDR(m->proc) */
545 MOVW 8(R1), R1 /* m->proc->kstack */
546 RLWNM $0, R1, $~KZERO, R1 /* PADDR(m->proc->kstack) */
547 ADD $(KSTACK-UREGSIZE), R1 /* make room on stack */
548
549 BL saveureg(SB)
550 BL trap(SB)
551 BR restoreureg
552
553 ktrap:
554 MOVW R1, CR
555 MOVW SPR(SAVER1), R1
556 RLWNM $0, R1, $~KZERO, R1 /* set stack pointer */
557 SUB $UREGSPACE, R1
558
559 BL saveureg(SB) /* addressed relative to PC */
560 BL trap(SB)
561 BR restoreureg
562
563 /*
564 * enter with stack set and mapped.
565 * on return, SB (R2) has been set, and R3 has the Ureg*,
566 * the MMU has been re-enabled, kernel text and PC are in KSEG,
567 * R(MACH) has been set, and R0 contains 0.
568 *
569 */
570 TEXT saveureg(SB), $-4
571 /*
572 * save state
573 */
574 MOVMW R2, 48(R1) /* save r2 .. r31 in 48(R1) .. 164(R1) */
575 MOVW $MACHPADDR, R(MACH) /* PADDR(m->) */
576 MOVW 8(R(MACH)), R(USER) /* up-> */
577 MOVW $MACHADDR, R(MACH) /* m-> */
578 MOVW SPR(SAVER1), R4
579 MOVW R4, 44(R1)
580 MOVW SPR(SAVER0), R5
581 MOVW R5, 40(R1)
582 MOVW CTR, R6
583 MOVW R6, 36(R1)
584 MOVW XER, R4
585 MOVW R4, 32(R1)
586 MOVW CR, R5
587 MOVW R5, 28(R1)
588 MOVW SPR(SAVELR), R6 /* LR */
589 MOVW R6, 24(R1)
590 /* pad at 20(R1) */
591 MOVW SPR(SRR0), R0
592 MOVW R0, 16(R1) /* old PC */
593 MOVW SPR(SRR1), R0
594 MOVW R0, 12(R1) /* old status */
595 MOVW SPR(SAVEXX), R0
596 MOVW R0, 8(R1) /* cause/vector */
597 MOVW SPR(DCMP), R0
598 MOVW R0, (160+8)(R1)
599 MOVW SPR(iCMP), R0
600 MOVW R0, (164+8)(R1)
601 MOVW SPR(DMISS), R0
602 MOVW R0, (168+8)(R1)
603 MOVW SPR(IMISS), R0
604 MOVW R0, (172+8)(R1)
605 MOVW SPR(HASH1), R0
606 MOVW R0, (176+8)(R1)
607 MOVW SPR(HASH2), R0
608 MOVW R0, (180+8)(R1)
609 MOVW SPR(DAR), R0
610 MOVW R0, (184+8)(R1)
611 MOVW SPR(DSISR), R0
612 MOVW R0, (188+8)(R1)
613 ADD $8, R1, R3 /* Ureg* */
614 OR $KZERO, R3 /* fix ureg */
615 STWCCC R3, (R1) /* break any pending reservations */
616 MOVW $0, R0 /* compiler/linker expect R0 to be zero */
617 MOVW $setSB(SB), R2 /* SB register */
618
619 MOVW MSR, R5
620 OR $(MSR_IR|MSR_DR|MSR_FP|MSR_RI), R5 /* enable MMU */
621 MOVW R5, SPR(SRR1)
622 MOVW LR, R31
623 OR $KZERO, R31 /* return PC in KSEG0 */
624 MOVW R31, SPR(SRR0)
625 OR $KZERO, R1 /* fix stack pointer */
626 RFI /* returns to trap handler */
627
628 /*
629 * restore state from Ureg and return from trap/interrupt
630 */
631 TEXT forkret(SB), $0
632 BR restoreureg
633
634 restoreureg:
635 MOVMW 48(R1), R2 /* restore r2 through r31 */
636 /* defer R1 */
637 MOVW 40(R1), R0
638 MOVW R0, SPR(SAVER0) /* resave saved R0 */
639 MOVW 36(R1), R0
640 MOVW R0, CTR
641 MOVW 32(R1), R0
642 MOVW R0, XER
643 MOVW 28(R1), R0
644 MOVW R0, CR /* Condition register*/
645 MOVW 24(R1), R0
646 MOVW R0, LR
647 /* pad, skip */
648 MOVW 16(R1), R0
649 MOVW R0, SPR(SRR0) /* old PC */
650 MOVW 12(R1), R0
651 MOVW R0, SPR(SRR1) /* old MSR */
652 /* cause, skip */
653 MOVW 44(R1), R1 /* old SP */
654 MOVW SPR(SAVER0), R0
655 RFI
656
657 TEXT getpvr(SB), $0
658 MOVW SPR(PVR), R3
659 RETURN
660
661 TEXT getdec(SB), $0
662 MOVW SPR(DEC), R3
663 RETURN
664
665 TEXT putdec(SB), $0
666 MOVW R3, SPR(DEC)
667 RETURN
668
669 TEXT getdar(SB), $0
670 MOVW SPR(DAR), R3
671 RETURN
672
673 TEXT getdsisr(SB), $0
674 MOVW SPR(DSISR), R3
675 RETURN
676
677 TEXT getmsr(SB), $0
678 MOVW MSR, R3
679 RETURN
680
681 TEXT putmsr(SB), $0
682 MOVW R3, MSR
683 MSRSYNC
684 RETURN
685
686 TEXT putsdr1(SB), $0
687 SYNC
688 MOVW R3, SPR(SDR1)
689 ISYNC
690 RETURN
691
692 TEXT putsr(SB), $0
693 MOVW 4(FP), R4
694 SYNC
695 MOVW R4, SEG(R3)
696 MSRSYNC
697 RETURN
698
699 TEXT getsr(SB), $0
700 MOVW SEG(R3), R3
701 RETURN
702
703 TEXT gethid0(SB), $0
704 MOVW SPR(HID0), R3
705 RETURN
706
707 TEXT puthid0(SB), $0
708 SYNC
709 ISYNC
710 MOVW R3, SPR(HID0)
711 SYNC
712 RETURN
713
714 TEXT gethid1(SB), $0
715 MOVW SPR(HID1), R3
716 RETURN
717
718 TEXT gethid2(SB), $0
719 MOVW SPR(HID2), R3
720 RETURN
721
722 TEXT puthid2(SB), $0
723 MOVW R3, SPR(HID2)
724 RETURN
725
726 TEXT eieio(SB), $0
727 EIEIO
728 RETURN
729
730 TEXT sync(SB), $0
731 SYNC
732 RETURN
733
734 /* Power PC 603e specials */
735 TEXT getimiss(SB), $0
736 MOVW SPR(IMISS), R3
737 RETURN
738
739 TEXT geticmp(SB), $0
740 MOVW SPR(iCMP), R3
741 RETURN
742
743 TEXT puticmp(SB), $0
744 MOVW R3, SPR(iCMP)
745 RETURN
746
747 TEXT getdmiss(SB), $0
748 MOVW SPR(DMISS), R3
749 RETURN
750
751 TEXT getdcmp(SB), $0
752 MOVW SPR(DCMP), R3
753 RETURN
754
755 TEXT putdcmp(SB), $0
756 MOVW R3, SPR(DCMP)
757 RETURN
758
759 TEXT getsdr1(SB), $0
760 MOVW SPR(SDR1), R3
761 RETURN
762
763 TEXT gethash1(SB), $0
764 MOVW SPR(HASH1), R3
765 RETURN
766
767 TEXT puthash1(SB), $0
768 MOVW R3, SPR(HASH1)
769 RETURN
770
771 TEXT gethash2(SB), $0
772 MOVW SPR(HASH2), R3
773 RETURN
774
775 TEXT puthash2(SB), $0
776 MOVW R3, SPR(HASH2)
777 RETURN
778
779 TEXT getrpa(SB), $0
780 MOVW SPR(RPA), R3
781 RETURN
782
783 TEXT putrpa(SB), $0
784 MOVW R3, SPR(RPA)
785 RETURN
786
787 TEXT tlbli(SB), $0
788 TLBLI(3)
789 ISYNC
790 RETURN
791
792 TEXT tlbld(SB), $0
793 SYNC
794 TLBLD(3)
795 ISYNC
796 RETURN
797
798 TEXT getsrr1(SB), $0
799 MOVW SPR(SRR1), R3
800 RETURN
801
802 TEXT putsrr1(SB), $0
803 MOVW R3, SPR(SRR1)
804 RETURN
805
806 TEXT fpsave(SB), $0
807 FMOVD F0, (0*8)(R3)
808 FMOVD F1, (1*8)(R3)
809 FMOVD F2, (2*8)(R3)
810 FMOVD F3, (3*8)(R3)
811 FMOVD F4, (4*8)(R3)
812 FMOVD F5, (5*8)(R3)
813 FMOVD F6, (6*8)(R3)
814 FMOVD F7, (7*8)(R3)
815 FMOVD F8, (8*8)(R3)
816 FMOVD F9, (9*8)(R3)
817 FMOVD F10, (10*8)(R3)
818 FMOVD F11, (11*8)(R3)
819 FMOVD F12, (12*8)(R3)
820 FMOVD F13, (13*8)(R3)
821 FMOVD F14, (14*8)(R3)
822 FMOVD F15, (15*8)(R3)
823 FMOVD F16, (16*8)(R3)
824 FMOVD F17, (17*8)(R3)
825 FMOVD F18, (18*8)(R3)
826 FMOVD F19, (19*8)(R3)
827 FMOVD F20, (20*8)(R3)
828 FMOVD F21, (21*8)(R3)
829 FMOVD F22, (22*8)(R3)
830 FMOVD F23, (23*8)(R3)
831 FMOVD F24, (24*8)(R3)
832 FMOVD F25, (25*8)(R3)
833 FMOVD F26, (26*8)(R3)
834 FMOVD F27, (27*8)(R3)
835 FMOVD F28, (28*8)(R3)
836 FMOVD F29, (29*8)(R3)
837 FMOVD F30, (30*8)(R3)
838 FMOVD F31, (31*8)(R3)
839 MOVFL FPSCR, F0
840 FMOVD F0, (32*8)(R3)
841 RETURN
842
843 TEXT fprestore(SB), $0
844 FMOVD (32*8)(R3), F0
845 MOVFL F0, FPSCR
846 FMOVD (0*8)(R3), F0
847 FMOVD (1*8)(R3), F1
848 FMOVD (2*8)(R3), F2
849 FMOVD (3*8)(R3), F3
850 FMOVD (4*8)(R3), F4
851 FMOVD (5*8)(R3), F5
852 FMOVD (6*8)(R3), F6
853 FMOVD (7*8)(R3), F7
854 FMOVD (8*8)(R3), F8
855 FMOVD (9*8)(R3), F9
856 FMOVD (10*8)(R3), F10
857 FMOVD (11*8)(R3), F11
858 FMOVD (12*8)(R3), F12
859 FMOVD (13*8)(R3), F13
860 FMOVD (14*8)(R3), F14
861 FMOVD (15*8)(R3), F15
862 FMOVD (16*8)(R3), F16
863 FMOVD (17*8)(R3), F17
864 FMOVD (18*8)(R3), F18
865 FMOVD (19*8)(R3), F19
866 FMOVD (20*8)(R3), F20
867 FMOVD (21*8)(R3), F21
868 FMOVD (22*8)(R3), F22
869 FMOVD (23*8)(R3), F23
870 FMOVD (24*8)(R3), F24
871 FMOVD (25*8)(R3), F25
872 FMOVD (26*8)(R3), F26
873 FMOVD (27*8)(R3), F27
874 FMOVD (28*8)(R3), F28
875 FMOVD (29*8)(R3), F29
876 FMOVD (30*8)(R3), F30
877 FMOVD (31*8)(R3), F31
878 RETURN
879
880 TEXT dcacheenb(SB), $0
881 SYNC
882 MOVW SPR(HID0), R4 /* Get HID0 and clear unwanted bits */
883 RLWNM $0, R4, $~(HID_DLOCK), R4
884 MOVW $(HID_DCFI|HID_DCE), R5
885 OR R4, R5
886 MOVW $HID_DCE, R3
887 OR R4, R3
888 SYNC
889 // MOVW R5, SPR(HID0) /* Cache enable and flash invalidate */
890 MOVW R3, SPR(HID0) /* Cache enable */
891 SYNC
892 RETURN
893
894 TEXT icacheenb(SB), $0
895 SYNC
896 MOVW SPR(HID0), R4 /* Get HID0 and clear unwanted bits */
897 RLWNM $0, R4, $~(HID_ILOCK), R4
898 MOVW $(HID_ICFI|HID_ICE), R5
899 OR R4, R5
900 MOVW $HID_ICE, R3
901 OR R4, R3
902 SYNC
903 MOVW R5, SPR(HID0) /* Cache enable and flash invalidate */
904 MOVW R3, SPR(HID0) /* Cache enable */
905 SYNC
906 RETURN
907
908 #ifdef ucuconf
909 TEXT getpll(SB), $0
910 MOVW SPR(1009), R3
911 ISYNC
912 RETURN
913
914 TEXT getl2pm(SB), $0
915 MOVW SPR(1016), R3
916 RETURN
917
918 TEXT getl2cr(SB), $0
919 MOVW SPR(1017), R3
920 RETURN
921
922 TEXT putl2cr(SB), $0
923 MOVW R3, SPR(1017)
924 RETURN
925
926 TEXT dcachedis(SB), $0
927 SYNC
928 /* MOVW SPR(HID0), R4
929 RLWNM $0, R4, $~(HID_DCE), R4
930 MOVW R4, SPR(HID0) /* L1 Cache disable */
931
932 MOVW SPR(1017), R4
933 RLWNM $0, R4, $~(0x80000000), R4
934 MOVW R4, SPR(1017) /* L2 Cache disable */
935
936 SYNC
937 RETURN
938
939 TEXT l2disable(SB), $0
940 SYNC
941 MOVW SPR(1017), R4
942 RLWNM $0, R4, $~(0x80000000), R4
943 MOVW R4, SPR(1017) /* L2 Cache disable */
944 SYNC
945 RETURN
946
947 TEXT getbats(SB), $0
948 MOVW SPR(DBATU(0)), R4
949 MOVW R4, 0(R3)
950 MOVW SPR(DBATL(0)), R4
951 MOVW R4, 4(R3)
952 MOVW SPR(IBATU(0)), R4
953 MOVW R4, 8(R3)
954 MOVW SPR(IBATL(0)), R4
955 MOVW R4, 12(R3)
956 MOVW SPR(DBATU(1)), R4
957 MOVW R4, 16(R3)
958 MOVW SPR(DBATL(1)), R4
959 MOVW R4, 20(R3)
960 MOVW SPR(IBATU(1)), R4
961 MOVW R4, 24(R3)
962 MOVW SPR(IBATL(1)), R4
963 MOVW R4, 28(R3)
964 MOVW SPR(DBATU(2)), R4
965 MOVW R4, 32(R3)
966 MOVW SPR(DBATL(2)), R4
967 MOVW R4, 36(R3)
968 MOVW SPR(IBATU(2)), R4
969 MOVW R4, 40(R3)
970 MOVW SPR(IBATL(2)), R4
971 MOVW R4, 44(R3)
972 MOVW SPR(DBATU(3)), R4
973 MOVW R4, 48(R3)
974 MOVW SPR(DBATL(3)), R4
975 MOVW R4, 52(R3)
976 MOVW SPR(IBATU(3)), R4
977 MOVW R4, 56(R3)
978 MOVW SPR(IBATL(3)), R4
979 MOVW R4, 60(R3)
980 RETURN
981
982 TEXT setdbat0(SB), $0
983 MOVW 0(R3), R4
984 MOVW R4, SPR(DBATU(0))
985 MOVW 4(R3), R4
986 MOVW R4, SPR(DBATL(0))
987 RETURN
988 #endif /* ucuconf */
989
990 TEXT mmudisable(SB), $0
991 /* disable MMU */
992 MOVW LR, R4
993 MOVW $KZERO, R5
994 ANDN R5, R4
995 MOVW R4, SPR(SRR0) /* Stored PC for RFI instruction */
996
997 MOVW MSR, R4
998 MOVW $(MSR_IR|MSR_DR|MSR_RI|MSR_FP), R5
999 ANDN R5, R4
1000 MOVW R4, SPR(SRR1)
1001
1002 MOVW SPR(HID0), R4 /* Get HID0 and clear unwanted bits */
1003 MOVW $(HID_ICE|HID_DCE), R5
1004 ANDN R5, R4
1005 MOVW R4, SPR(HID0) /* Cache disable */
1006 RFI /* resume caller with MMU off */
1007 RETURN
1008
1009 TEXT kreboot(SB), $0
1010 BL mmudisable(SB)
1011 MOVW R3, LR
1012 RETURN
1013
1014 TEXT mul64fract(SB), $0
1015 MOVW a0+8(FP), R9
1016 MOVW a1+4(FP), R10
1017 MOVW b0+16(FP), R4
1018 MOVW b1+12(FP), R5
1019
1020 MULLW R10, R5, R13 /* c2 = lo(a1*b1) */
1021
1022 MULLW R10, R4, R12 /* c1 = lo(a1*b0) */
1023 MULHWU R10, R4, R7 /* hi(a1*b0) */
1024 ADD R7, R13 /* c2 += hi(a1*b0) */
1025
1026 MULLW R9, R5, R6 /* lo(a0*b1) */
1027 MULHWU R9, R5, R7 /* hi(a0*b1) */
1028 ADDC R6, R12 /* c1 += lo(a0*b1) */
1029 ADDE R7, R13 /* c2 += hi(a0*b1) + carry */
1030
1031 MULHWU R9, R4, R7 /* hi(a0*b0) */
1032 ADDC R7, R12 /* c1 += hi(a0*b0) */
1033 ADDE R0, R13 /* c2 += carry */
1034
1035 MOVW R12, 4(R3)
1036 MOVW R13, 0(R3)
1037 RETURN
Cache object: dc9c3597b355eefe00c4f32c7abaf3ff
|