1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <machine/asm.h>
28 __FBSDID("$FreeBSD: releng/6.0/sys/sparc64/sparc64/support.S 122464 2003-11-11 06:41:54Z jake $");
29
30 #include <machine/asi.h>
31 #include <machine/asmacros.h>
32 #include <machine/ktr.h>
33 #include <machine/pstate.h>
34
35 #include "assym.s"
36
37 .register %g2, #ignore
38 .register %g3, #ignore
39 .register %g6, #ignore
40
41 #define E /* empty */
42
43 /*
44 * Generate load and store instructions for the corresponding width and asi
45 * (or not). Note that we want to evaluate the macro args before
46 * concatenating, so that E really turns into nothing.
47 */
48 #define _LD(w, a) ld ## w ## a
49 #define _ST(w, a) st ## w ## a
50
51 #define LD(w, a) _LD(w, a)
52 #define ST(w, a) _ST(w, a)
53
54 /*
55 * Common code for copy routines.
56 *
57 * We use large macros to generate functions for each of the copy routines.
58 * This allows the load and store instructions to be generated for the right
59 * operation, asi or not. It is possible to write an asi independent function
60 * but this would require 2 expensive wrs in the main loop to switch %asi.
61 * It would also screw up profiling (if we ever get it), but may save some I$.
62 * We assume that either one of dasi and sasi is empty, or that they are both
63 * the same (empty or non-empty). It is up to the caller to set %asi.
64 */
65
66 /*
67 * ASI independent implementation of copystr(9).
68 * Used to implement copyinstr() and copystr().
69 *
70 * Return value is in %g1.
71 */
72 #define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
73 brz len, 4f ; \
74 mov src, %g2 ; \
75 1: deccc 1, len ; \
76 bl,a,pn %xcc, 3f ; \
77 nop ; \
78 LD(ub, sa) [src] sasi, %g1 ; \
79 ST(b, da) %g1, [dst] dasi ; \
80 brz,pn %g1, 3f ; \
81 inc src ; \
82 ba %xcc, 1b ; \
83 inc dst ; \
84 2: mov ENAMETOOLONG, %g1 ; \
85 3: sub src, %g2, %g2 ; \
86 brnz,a done, 4f ; \
87 stx %g2, [done] ; \
88 4:
89
90 /*
91 * ASI independent implementation of memset(3).
92 * Used to implement bzero(), memset() and aszero().
93 *
94 * If the pattern is non-zero, duplicate it to fill 64 bits.
95 * Store bytes until dst is 8-byte aligned, then store 8 bytes.
96 * It has yet to be determined how much unrolling is beneficial.
97 * Could also read and compare before writing to minimize snoop traffic.
98 *
99 * XXX bzero() should be implemented as
100 * #define bzero(dst, len) (void)memset((dst), 0, (len))
101 * if at all.
102 */
103 #define _MEMSET(dst, pat, len, da, dasi) \
104 brlez,pn len, 5f ; \
105 and pat, 0xff, pat ; \
106 brz,pt pat, 1f ; \
107 sllx pat, 8, %g1 ; \
108 or pat, %g1, pat ; \
109 sllx pat, 16, %g1 ; \
110 or pat, %g1, pat ; \
111 sllx pat, 32, %g1 ; \
112 or pat, %g1, pat ; \
113 .align 16 ; \
114 1: deccc 1, len ; \
115 bl,pn %xcc, 5f ; \
116 btst 7, dst ; \
117 bz,a,pt %xcc, 2f ; \
118 inc 1, len ; \
119 ST(b, da) pat, [dst] dasi ; \
120 ba %xcc, 1b ; \
121 inc dst ; \
122 .align 16 ; \
123 2: deccc 32, len ; \
124 bl,a,pn %xcc, 3f ; \
125 inc 32, len ; \
126 ST(x, da) pat, [dst] dasi ; \
127 ST(x, da) pat, [dst + 8] dasi ; \
128 ST(x, da) pat, [dst + 16] dasi ; \
129 ST(x, da) pat, [dst + 24] dasi ; \
130 ba %xcc, 2b ; \
131 inc 32, dst ; \
132 .align 16 ; \
133 3: deccc 8, len ; \
134 bl,a,pn %xcc, 4f ; \
135 inc 8, len ; \
136 ST(x, da) pat, [dst] dasi ; \
137 ba %xcc, 3b ; \
138 inc 8, dst ; \
139 .align 16 ; \
140 4: deccc 1, len ; \
141 bl,a,pn %xcc, 5f ; \
142 nop ; \
143 ST(b, da) pat, [dst] dasi ; \
144 ba %xcc, 4b ; \
145 inc 1, dst ; \
146 5:
147
148 /*
149 * ASI independent implementation of memcpy(3).
150 * Used to implement bcopy(), copyin(), copyout(), memcpy(), ascopy(),
151 * ascopyfrom() and ascopyto().
152 *
153 * Transfer bytes until dst is 8-byte aligned. If src is then also 8 byte
154 * aligned, transfer 8 bytes, otherwise finish with bytes. The unaligned
155 * case could be optimized, but it is expected that this is the uncommon
156 * case and of questionable value. The code to do so is also rather large
157 * and ugly. It has yet to be determined how much unrolling is beneficial.
158 *
159 * XXX bcopy() must also check for overlap. This is stupid.
160 * XXX bcopy() should be implemented as
161 * #define bcopy(src, dst, len) (void)memcpy((dst), (src), (len))
162 * if at all.
163 */
164 #define _MEMCPY(dst, src, len, da, dasi, sa, sasi) \
165 1: deccc 1, len ; \
166 bl,pn %xcc, 6f ; \
167 btst 7, dst ; \
168 bz,a,pt %xcc, 2f ; \
169 inc 1, len ; \
170 LD(ub, sa) [src] sasi, %g1 ; \
171 ST(b, da) %g1, [dst] dasi ; \
172 inc 1, src ; \
173 ba %xcc, 1b ; \
174 inc 1, dst ; \
175 .align 16 ; \
176 2: btst 7, src ; \
177 bz,a,pt %xcc, 3f ; \
178 nop ; \
179 ba,a %xcc, 5f ; \
180 .align 16 ; \
181 3: deccc 32, len ; \
182 bl,a,pn %xcc, 4f ; \
183 inc 32, len ; \
184 LD(x, sa) [src] sasi, %g1 ; \
185 LD(x, sa) [src + 8] sasi, %g2 ; \
186 LD(x, sa) [src + 16] sasi, %g3 ; \
187 LD(x, sa) [src + 24] sasi, %g4 ; \
188 ST(x, da) %g1, [dst] dasi ; \
189 ST(x, da) %g2, [dst + 8] dasi ; \
190 ST(x, da) %g3, [dst + 16] dasi ; \
191 ST(x, da) %g4, [dst + 24] dasi ; \
192 inc 32, src ; \
193 ba %xcc, 3b ; \
194 inc 32, dst ; \
195 .align 16 ; \
196 4: deccc 8, len ; \
197 bl,a,pn %xcc, 5f ; \
198 inc 8, len ; \
199 LD(x, sa) [src] sasi, %g1 ; \
200 ST(x, da) %g1, [dst] dasi ; \
201 inc 8, src ; \
202 ba %xcc, 4b ; \
203 inc 8, dst ; \
204 .align 16 ; \
205 5: deccc 1, len ; \
206 bl,a,pn %xcc, 6f ; \
207 nop ; \
208 LD(ub, sa) [src] sasi, %g1 ; \
209 ST(b, da) %g1, [dst] dasi ; \
210 inc src ; \
211 ba %xcc, 5b ; \
212 inc dst ; \
213 6:
214
215 /*
216 * void ascopy(u_long asi, vm_offset_t src, vm_offset_t dst, size_t len)
217 */
218 ENTRY(ascopy)
219 wr %o0, 0, %asi
220 _MEMCPY(%o2, %o1, %o3, a, %asi, a, %asi)
221 retl
222 nop
223 END(ascopy)
224
225 /*
226 * void ascopyfrom(u_long sasi, vm_offset_t src, caddr_t dst, size_t len)
227 */
228 ENTRY(ascopyfrom)
229 wr %o0, 0, %asi
230 _MEMCPY(%o2, %o1, %o3, E, E, a, %asi)
231 retl
232 nop
233 END(ascopyfrom)
234
235 /*
236 * void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len)
237 */
238 ENTRY(ascopyto)
239 wr %o1, 0, %asi
240 _MEMCPY(%o2, %o0, %o3, a, %asi, E, E)
241 retl
242 nop
243 END(ascopyto)
244
245 /*
246 * void aszero(u_long asi, vm_offset_t pa, size_t len)
247 */
248 ENTRY(aszero)
249 wr %o0, 0, %asi
250 _MEMSET(%o1, %g0, %o2, a, %asi)
251 retl
252 nop
253 END(aszero)
254
255 /*
256 * int bcmp(const void *b1, const void *b2, size_t len)
257 */
258 ENTRY(bcmp)
259 brz,pn %o2, 2f
260 clr %o3
261 1: ldub [%o0 + %o3], %o4
262 ldub [%o1 + %o3], %o5
263 cmp %o4, %o5
264 bne,pn %xcc, 2f
265 inc %o3
266 deccc %o2
267 bne,pt %xcc, 1b
268 nop
269 2: retl
270 mov %o2, %o0
271 END(bcmp)
272
273 /*
274 * void bcopy(const void *src, void *dst, size_t len)
275 */
276 ENTRY(bcopy)
277 /*
278 * Check for overlap, and copy backwards if so.
279 */
280 sub %o1, %o0, %g1
281 cmp %g1, %o2
282 bgeu,a,pt %xcc, 3f
283 nop
284
285 /*
286 * Copy backwards.
287 */
288 add %o0, %o2, %o0
289 add %o1, %o2, %o1
290 1: deccc 1, %o2
291 bl,a,pn %xcc, 2f
292 nop
293 dec 1, %o0
294 ldub [%o0], %g1
295 dec 1, %o1
296 ba %xcc, 1b
297 stb %g1, [%o1]
298 2: retl
299 nop
300
301 /*
302 * Do the fast version.
303 */
304 3: _MEMCPY(%o1, %o0, %o2, E, E, E, E)
305 retl
306 nop
307 END(bcopy)
308
309 /*
310 * void bzero(void *b, size_t len)
311 */
312 ENTRY(bzero)
313 _MEMSET(%o0, %g0, %o1, E, E)
314 retl
315 nop
316 END(bzero)
317
318 /*
319 * int copystr(const void *src, void *dst, size_t len, size_t *done)
320 */
321 ENTRY(copystr)
322 _COPYSTR(%o0, %o1, %o2, %o3, E, E, E, E)
323 retl
324 mov %g1, %o0
325 END(copystr)
326
327 /*
328 * void *memcpy(void *dst, const void *src, size_t len)
329 */
330 ENTRY(memcpy)
331 mov %o0, %o3
332 _MEMCPY(%o3, %o1, %o2, E, E, E, E)
333 retl
334 nop
335 END(memcpy)
336
337 /*
338 * void *memset(void *b, int c, size_t len)
339 */
340 ENTRY(memset)
341 mov %o0, %o3
342 _MEMSET(%o3, %o1, %o2, E, E)
343 retl
344 nop
345 END(memset)
346
347 .globl copy_nofault_begin
348 copy_nofault_begin:
349 nop
350
351 /*
352 * int copyin(const void *uaddr, void *kaddr, size_t len)
353 */
354 ENTRY(copyin)
355 wr %g0, ASI_AIUP, %asi
356 _MEMCPY(%o1, %o0, %o2, E, E, a, %asi)
357 retl
358 clr %o0
359 END(copyin)
360
361 /*
362 * int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
363 */
364 ENTRY(copyinstr)
365 wr %g0, ASI_AIUP, %asi
366 _COPYSTR(%o0, %o1, %o2, %o3, a, %asi, E, E)
367 retl
368 mov %g1, %o0
369 END(copyinstr)
370
371 /*
372 * int copyout(const void *kaddr, void *uaddr, size_t len)
373 */
374 ENTRY(copyout)
375 wr %g0, ASI_AIUP, %asi
376 _MEMCPY(%o1, %o0, %o2, a, %asi, E, E)
377 retl
378 clr %o0
379 END(copyout)
380
381 .globl copy_nofault_end
382 copy_nofault_end:
383 nop
384
385 ENTRY(copy_fault)
386 retl
387 mov EFAULT, %o0
388 END(copy_fault)
389
390 .globl fs_nofault_begin
391 fs_nofault_begin:
392 nop
393
394 /*
395 * Chatty aliases for fetch, store functions.
396 */
397 .globl fubyte, fusword, fuword, subyte, susword, suword
398 .set fubyte, fuword8
399 .set fusword, fuword16
400 .set fuword, fuword64
401 .set subyte, suword8
402 .set susword, suword16
403 .set suword, suword64
404
405 .globl casuptr, fuptr, suptr
406 .set casuptr, casuword64
407 .set fuptr, fuword64
408 .set suptr, suword64
409
410 /*
411 * int32_t casuword32(volatile int32_t *p, int32_t e, int32_t s)
412 */
413 ENTRY(casuword32)
414 casa [%o0] ASI_AIUP, %o1, %o2
415 retl
416 mov %o2, %o0
417 END(casuword32)
418
419 /*
420 * int64_t casuword64(volatile int64_t *p, int64_t e, int64_t s)
421 */
422 ENTRY(casuword64)
423 casxa [%o0] ASI_AIUP, %o1, %o2
424 retl
425 mov %o2, %o0
426 END(casuword64)
427
428 /*
429 * int fuword8(const void *base)
430 */
431 ENTRY(fuword8)
432 retl
433 lduba [%o0] ASI_AIUP, %o0
434 END(fuword8)
435
436 /*
437 * int fuword16(const void *base)
438 */
439 ENTRY(fuword16)
440 retl
441 lduha [%o0] ASI_AIUP, %o0
442 END(fuword16)
443
444 /*
445 * int32_t fuword32(const void *base)
446 */
447 ENTRY(fuword32)
448 retl
449 lduwa [%o0] ASI_AIUP, %o0
450 END(fuword32)
451
452 /*
453 * int64_t fuword64(const void *base)
454 */
455 ENTRY(fuword64)
456 retl
457 ldxa [%o0] ASI_AIUP, %o0
458 END(fuword64)
459
460 /*
461 * int suword8(const void *base, int word)
462 */
463 ENTRY(suword8)
464 stba %o1, [%o0] ASI_AIUP
465 retl
466 clr %o0
467 END(suword8)
468
469 /*
470 * int suword16(const void *base, int word)
471 */
472 ENTRY(suword16)
473 stha %o1, [%o0] ASI_AIUP
474 retl
475 clr %o0
476 END(suword16)
477
478 /*
479 * int suword32(const void *base, int32_t word)
480 */
481 ENTRY(suword32)
482 stwa %o1, [%o0] ASI_AIUP
483 retl
484 clr %o0
485 END(suword32)
486
487 /*
488 * int suword64(const void *base, int64_t word)
489 */
490 ENTRY(suword64)
491 stxa %o1, [%o0] ASI_AIUP
492 retl
493 clr %o0
494 END(suword64)
495
496 .globl fs_nofault_intr_begin
497 fs_nofault_intr_begin:
498 nop
499
500 /*
501 * int fuswintr(const void *base)
502 */
503 ENTRY(fuswintr)
504 retl
505 lduha [%o0] ASI_AIUP, %o0
506 END(fuswintr)
507
508 /*
509 * int suswintr(const void *base, int word)
510 */
511 ENTRY(suswintr)
512 stha %o1, [%o0] ASI_AIUP
513 retl
514 clr %o0
515 END(suswintr)
516
517 .globl fs_nofault_intr_end
518 fs_nofault_intr_end:
519 nop
520
521 .globl fs_nofault_end
522 fs_nofault_end:
523 nop
524
525 ENTRY(fs_fault)
526 retl
527 mov -1, %o0
528 END(fsfault)
529
530 .globl fas_nofault_begin
531 fas_nofault_begin:
532
533 /*
534 * int fasword8(u_long asi, uint64_t addr, uint8_t *val)
535 */
536 ENTRY(fasword8)
537 wr %o0, 0, %asi
538 membar #Sync
539 lduba [%o1] %asi, %o3
540 membar #Sync
541 stb %o3, [%o2]
542 retl
543 clr %o0
544 END(fasword8)
545
546 /*
547 * int fasword16(u_long asi, uint64_t addr, uint16_t *val)
548 */
549 ENTRY(fasword16)
550 wr %o0, 0, %asi
551 membar #Sync
552 lduha [%o1] %asi, %o3
553 membar #Sync
554 sth %o3, [%o2]
555 retl
556 clr %o0
557 END(fasword16)
558
559 /*
560 * int fasword32(u_long asi, uint64_t addr, uint32_t *val)
561 */
562 ENTRY(fasword32)
563 wr %o0, 0, %asi
564 membar #Sync
565 lduwa [%o1] %asi, %o3
566 membar #Sync
567 stw %o3, [%o2]
568 retl
569 clr %o0
570 END(fasword32)
571
572 .globl fas_nofault_end
573 fas_nofault_end:
574 nop
575
576 .globl fas_fault
577 ENTRY(fas_fault)
578 retl
579 mov -1, %o0
580 END(fas_fault)
581
582 .globl fpu_fault_begin
583 fpu_fault_begin:
584 nop
585
586 /*
587 * void spitfire_block_copy(void *src, void *dst, size_t len)
588 */
589 ENTRY(spitfire_block_copy)
590 rdpr %pil, %o3
591 wrpr %g0, PIL_TICK, %pil
592
593 wr %g0, ASI_BLK_S, %asi
594 wr %g0, FPRS_FEF, %fprs
595
596 sub PCB_REG, TF_SIZEOF, %o4
597 ldx [%o4 + TF_FPRS], %o5
598 andcc %o5, FPRS_FEF, %g0
599 bz,a,pt %xcc, 1f
600 nop
601 stda %f0, [PCB_REG + PCB_UFP + (0 * 64)] %asi
602 stda %f16, [PCB_REG + PCB_UFP + (1 * 64)] %asi
603 stda %f32, [PCB_REG + PCB_UFP + (2 * 64)] %asi
604 stda %f48, [PCB_REG + PCB_UFP + (3 * 64)] %asi
605 membar #Sync
606
607 andn %o5, FPRS_FEF, %o5
608 stx %o5, [%o4 + TF_FPRS]
609 ldx [PCB_REG + PCB_FLAGS], %o4
610 or %o4, PCB_FEF, %o4
611 stx %o4, [PCB_REG + PCB_FLAGS]
612
613 1: wrpr %o3, 0, %pil
614
615 ldda [%o0] %asi, %f0
616 add %o0, 64, %o0
617 sub %o2, 64, %o2
618
619 2: ldda [%o0] %asi, %f16
620 fsrc1 %f0, %f32
621 fsrc1 %f2, %f34
622 fsrc1 %f4, %f36
623 fsrc1 %f6, %f38
624 fsrc1 %f8, %f40
625 fsrc1 %f10, %f42
626 fsrc1 %f12, %f44
627 fsrc1 %f14, %f46
628 stda %f32, [%o1] %asi
629 add %o0, 64, %o0
630 subcc %o2, 64, %o2
631 bz,pn %xcc, 3f
632 add %o1, 64, %o1
633 ldda [%o0] %asi, %f0
634 fsrc1 %f16, %f32
635 fsrc1 %f18, %f34
636 fsrc1 %f20, %f36
637 fsrc1 %f22, %f38
638 fsrc1 %f24, %f40
639 fsrc1 %f26, %f42
640 fsrc1 %f28, %f44
641 fsrc1 %f30, %f46
642 stda %f32, [%o1] %asi
643 add %o0, 64, %o0
644 sub %o2, 64, %o2
645 ba %xcc, 2b
646 add %o1, 64, %o1
647
648 3: membar #Sync
649
650 stda %f16, [%o1] %asi
651 membar #Sync
652
653 wr %g0, 0, %fprs
654
655 retl
656 nop
657 END(spitfire_block_copy)
658
659 /*
660 * void spitfire_block_zero(void *dst, size_t len)
661 */
662 ENTRY(spitfire_block_zero)
663 rdpr %pil, %o3
664 wrpr %g0, PIL_TICK, %pil
665
666 wr %g0, ASI_BLK_S, %asi
667 wr %g0, FPRS_FEF, %fprs
668
669 sub PCB_REG, TF_SIZEOF, %o4
670 ldx [%o4 + TF_FPRS], %o5
671 andcc %o5, FPRS_FEF, %g0
672 bz,a,pt %xcc, 1f
673 nop
674 stda %f0, [PCB_REG + PCB_UFP + (0 * 64)] %asi
675 stda %f16, [PCB_REG + PCB_UFP + (1 * 64)] %asi
676 stda %f32, [PCB_REG + PCB_UFP + (2 * 64)] %asi
677 stda %f48, [PCB_REG + PCB_UFP + (3 * 64)] %asi
678 membar #Sync
679
680 andn %o5, FPRS_FEF, %o5
681 stx %o5, [%o4 + TF_FPRS]
682 ldx [PCB_REG + PCB_FLAGS], %o4
683 or %o4, PCB_FEF, %o4
684 stx %o4, [PCB_REG + PCB_FLAGS]
685
686 1: wrpr %o3, 0, %pil
687
688 fzero %f0
689 fzero %f2
690 fzero %f4
691 fzero %f6
692 fzero %f8
693 fzero %f10
694 fzero %f12
695 fzero %f14
696
697 1: stda %f0, [%o0] %asi
698 stda %f0, [%o0 + 64] %asi
699 stda %f0, [%o0 + 128] %asi
700 stda %f0, [%o0 + 192] %asi
701 sub %o1, 256, %o1
702 brnz %o1, 1b
703 add %o0, 256, %o0
704 membar #Sync
705
706 wr %g0, 0, %fprs
707
708 retl
709 nop
710 END(spitfire_block_zero)
711
712 .globl fpu_fault_end
713 fpu_fault_end:
714 nop
715
716 .globl fpu_fault_size
717 .set fpu_fault_size, fpu_fault_end - fpu_fault_begin
718
719 ENTRY(longjmp)
720 set 1, %g3
721 movrz %o1, %o1, %g3
722 mov %o0, %g1
723 ldx [%g1 + _JB_FP], %g2
724 1: cmp %fp, %g2
725 bl,a,pt %xcc, 1b
726 restore
727 bne,pn %xcc, 2f
728 ldx [%g1 + _JB_SP], %o2
729 cmp %o2, %sp
730 blt,pn %xcc, 2f
731 movge %xcc, %o2, %sp
732 ldx [%g1 + _JB_PC], %o7
733 retl
734 mov %g3, %o0
735 2: PANIC("longjmp botch", %l1)
736 END(longjmp)
737
738 ENTRY(setjmp)
739 stx %sp, [%o0 + _JB_SP]
740 stx %o7, [%o0 + _JB_PC]
741 stx %fp, [%o0 + _JB_FP]
742 retl
743 clr %o0
744 END(setjmp)
745
746 /*
747 * void openfirmware(cell_t args[])
748 */
749 ENTRY(openfirmware)
750 save %sp, -CCFSZ, %sp
751 SET(ofw_vec, %l7, %l6)
752 ldx [%l6], %l6
753 rdpr %pil, %l7
754 wrpr %g0, PIL_TICK, %pil
755 call %l6
756 mov %i0, %o0
757 wrpr %l7, 0, %pil
758 ret
759 restore %o0, %g0, %o0
760 END(openfirmware)
761
762 /*
763 * void ofw_exit(cell_t args[])
764 */
765 ENTRY(openfirmware_exit)
766 save %sp, -CCFSZ, %sp
767 flushw
768 wrpr %g0, PIL_TICK, %pil
769 SET(ofw_tba, %l7, %l5)
770 ldx [%l5], %l5
771 wrpr %l5, 0, %tba ! restore the ofw trap table
772 SET(ofw_vec, %l7, %l6)
773 ldx [%l6], %l6
774 SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l7, %l0)
775 sub %l0, SPOFF, %fp ! setup a stack in a locked page
776 sub %l0, SPOFF + CCFSZ, %sp
777 mov AA_DMMU_PCXR, %l3 ! set context 0
778 stxa %g0, [%l3] ASI_DMMU
779 membar #Sync
780 wrpr %g0, 0, %tl ! force trap level 0
781 call %l6
782 mov %i0, %o0
783 ! never to return
784 END(openfirmware_exit)
785
786 #ifdef GPROF
787
788 ENTRY(user)
789 nop
790
791 ENTRY(btrap)
792 nop
793
794 ENTRY(etrap)
795 nop
796
797 ENTRY(bintr)
798 nop
799
800 ENTRY(eintr)
801 nop
802
803
804 /*
805 * XXX including sys/gmon.h in genassym.c is not possible due to uintfptr_t
806 * badness.
807 */
808 #define GM_STATE 0x0
809 #define GMON_PROF_OFF 3
810 #define GMON_PROF_HIRES 4
811
812 .globl _mcount
813 .set _mcount, __cyg_profile_func_enter
814
815 ENTRY(__cyg_profile_func_enter)
816 SET(_gmonparam, %o3, %o2)
817 lduw [%o2 + GM_STATE], %o3
818 cmp %o3, GMON_PROF_OFF
819 be,a,pn %icc, 1f
820 nop
821 SET(mcount, %o3, %o2)
822 jmpl %o2, %g0
823 nop
824 1: retl
825 nop
826 END(__cyg_profile_func_enter)
827
828 #ifdef GUPROF
829
830 ENTRY(__cyg_profile_func_exit)
831 SET(_gmonparam, %o3, %o2)
832 lduw [%o2 + GM_STATE], %o3
833 cmp %o3, GMON_PROF_HIRES
834 be,a,pn %icc, 1f
835 nop
836 SET(mexitcount, %o3, %o2)
837 jmpl %o2, %g0
838 nop
839 1: retl
840 nop
841 END(__cyg_profile_func_exit)
842
843 #endif /* GUPROF */
844
845 #endif /* GPROF */
Cache object: f73b26e40e75a7887ee8228dfe075ed1
|