1 /*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #include <machine/asm.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/errno.h>
31
32 #include <machine/asi.h>
33 #include <machine/asmacros.h>
34 #include <machine/fsr.h>
35 #include <machine/intr_machdep.h>
36 #include <machine/ktr.h>
37 #include <machine/pcb.h>
38 #include <machine/pstate.h>
39
40 #include "assym.s"
41
42 .register %g2, #ignore
43 .register %g3, #ignore
44 .register %g6, #ignore
45
46 #define E /* empty */
47
48 /*
49 * Generate load and store instructions for the corresponding width and asi
50 * (or not). Note that we want to evaluate the macro args before
51 * concatenating, so that E really turns into nothing.
52 */
53 #define _LD(w, a) ld ## w ## a
54 #define _ST(w, a) st ## w ## a
55
56 #define LD(w, a) _LD(w, a)
57 #define ST(w, a) _ST(w, a)
58
59 /*
60 * Common code for copy routines.
61 *
62 * We use large macros to generate functions for each of the copy routines.
63 * This allows the load and store instructions to be generated for the right
64 * operation, asi or not. It is possible to write an asi independent function
65 * but this would require 2 expensive wrs in the main loop to switch %asi.
66 * It would also screw up profiling (if we ever get it), but may save some I$.
67 * We assume that either one of dasi and sasi is empty, or that they are both
68 * the same (empty or non-empty). It is up to the caller to set %asi.
69 */
70
71 /*
72 * ASI independent implementation of copystr(9).
73 * Used to implement copyinstr() and copystr().
74 *
75 * Return value is in %g1.
76 */
77 #define _COPYSTR(src, dst, len, done, sa, sasi, da, dasi) \
78 brz len, 4f ; \
79 mov src, %g2 ; \
80 1: deccc 1, len ; \
81 bl,a,pn %xcc, 3f ; \
82 nop ; \
83 LD(ub, sa) [src] sasi, %g1 ; \
84 ST(b, da) %g1, [dst] dasi ; \
85 brz,pn %g1, 3f ; \
86 inc src ; \
87 ba %xcc, 1b ; \
88 inc dst ; \
89 2: mov ENAMETOOLONG, %g1 ; \
90 3: sub src, %g2, %g2 ; \
91 brnz,a done, 4f ; \
92 stx %g2, [done] ; \
93 4:
94
95 /*
96 * ASI independent implementation of memset(3).
97 * Used to implement bzero(), memset() and aszero().
98 *
99 * If the pattern is non-zero, duplicate it to fill 64 bits.
100 * Store bytes until dst is 8-byte aligned, then store 8 bytes.
101 * It has yet to be determined how much unrolling is beneficial.
102 * Could also read and compare before writing to minimize snoop traffic.
103 *
104 * XXX bzero() should be implemented as
105 * #define bzero(dst, len) (void)memset((dst), 0, (len))
106 * if at all.
107 */
108 #define _MEMSET(dst, pat, len, da, dasi) \
109 brlez,pn len, 5f ; \
110 and pat, 0xff, pat ; \
111 brz,pt pat, 1f ; \
112 sllx pat, 8, %g1 ; \
113 or pat, %g1, pat ; \
114 sllx pat, 16, %g1 ; \
115 or pat, %g1, pat ; \
116 sllx pat, 32, %g1 ; \
117 or pat, %g1, pat ; \
118 .align 16 ; \
119 1: deccc 1, len ; \
120 bl,pn %xcc, 5f ; \
121 btst 7, dst ; \
122 bz,a,pt %xcc, 2f ; \
123 inc 1, len ; \
124 ST(b, da) pat, [dst] dasi ; \
125 ba %xcc, 1b ; \
126 inc dst ; \
127 .align 16 ; \
128 2: deccc 32, len ; \
129 bl,a,pn %xcc, 3f ; \
130 inc 32, len ; \
131 ST(x, da) pat, [dst] dasi ; \
132 ST(x, da) pat, [dst + 8] dasi ; \
133 ST(x, da) pat, [dst + 16] dasi ; \
134 ST(x, da) pat, [dst + 24] dasi ; \
135 ba %xcc, 2b ; \
136 inc 32, dst ; \
137 .align 16 ; \
138 3: deccc 8, len ; \
139 bl,a,pn %xcc, 4f ; \
140 inc 8, len ; \
141 ST(x, da) pat, [dst] dasi ; \
142 ba %xcc, 3b ; \
143 inc 8, dst ; \
144 .align 16 ; \
145 4: deccc 1, len ; \
146 bl,a,pn %xcc, 5f ; \
147 nop ; \
148 ST(b, da) pat, [dst] dasi ; \
149 ba %xcc, 4b ; \
150 inc 1, dst ; \
151 5:
152
153 /*
154 * ASI independent implementation of memcpy(3).
155 * Used to implement bcopy(), copyin(), copyout(), memcpy(), ascopy(),
156 * ascopyfrom() and ascopyto().
157 *
158 * Transfer bytes until dst is 8-byte aligned. If src is then also 8 byte
159 * aligned, transfer 8 bytes, otherwise finish with bytes. The unaligned
160 * case could be optimized, but it is expected that this is the uncommon
161 * case and of questionable value. The code to do so is also rather large
162 * and ugly. It has yet to be determined how much unrolling is beneficial.
163 *
164 * XXX bcopy() must also check for overlap. This is stupid.
165 * XXX bcopy() should be implemented as
166 * #define bcopy(src, dst, len) (void)memcpy((dst), (src), (len))
167 * if at all.
168 */
169 #define _MEMCPY(dst, src, len, da, dasi, sa, sasi) \
170 1: deccc 1, len ; \
171 bl,pn %xcc, 6f ; \
172 btst 7, dst ; \
173 bz,a,pt %xcc, 2f ; \
174 inc 1, len ; \
175 LD(ub, sa) [src] sasi, %g1 ; \
176 ST(b, da) %g1, [dst] dasi ; \
177 inc 1, src ; \
178 ba %xcc, 1b ; \
179 inc 1, dst ; \
180 .align 16 ; \
181 2: btst 7, src ; \
182 bz,a,pt %xcc, 3f ; \
183 nop ; \
184 ba,a %xcc, 5f ; \
185 .align 16 ; \
186 3: deccc 32, len ; \
187 bl,a,pn %xcc, 4f ; \
188 inc 32, len ; \
189 LD(x, sa) [src] sasi, %g1 ; \
190 LD(x, sa) [src + 8] sasi, %g2 ; \
191 LD(x, sa) [src + 16] sasi, %g3 ; \
192 LD(x, sa) [src + 24] sasi, %g4 ; \
193 ST(x, da) %g1, [dst] dasi ; \
194 ST(x, da) %g2, [dst + 8] dasi ; \
195 ST(x, da) %g3, [dst + 16] dasi ; \
196 ST(x, da) %g4, [dst + 24] dasi ; \
197 inc 32, src ; \
198 ba %xcc, 3b ; \
199 inc 32, dst ; \
200 .align 16 ; \
201 4: deccc 8, len ; \
202 bl,a,pn %xcc, 5f ; \
203 inc 8, len ; \
204 LD(x, sa) [src] sasi, %g1 ; \
205 ST(x, da) %g1, [dst] dasi ; \
206 inc 8, src ; \
207 ba %xcc, 4b ; \
208 inc 8, dst ; \
209 .align 16 ; \
210 5: deccc 1, len ; \
211 bl,a,pn %xcc, 6f ; \
212 nop ; \
213 LD(ub, sa) [src] sasi, %g1 ; \
214 ST(b, da) %g1, [dst] dasi ; \
215 inc src ; \
216 ba %xcc, 5b ; \
217 inc dst ; \
218 6:
219
220 /*
221 * void ascopy(u_long asi, vm_offset_t src, vm_offset_t dst, size_t len)
222 */
223 ENTRY(ascopy)
224 wr %o0, 0, %asi
225 _MEMCPY(%o2, %o1, %o3, a, %asi, a, %asi)
226 retl
227 nop
228 END(ascopy)
229
230 /*
231 * void ascopyfrom(u_long sasi, vm_offset_t src, caddr_t dst, size_t len)
232 */
233 ENTRY(ascopyfrom)
234 wr %o0, 0, %asi
235 _MEMCPY(%o2, %o1, %o3, E, E, a, %asi)
236 retl
237 nop
238 END(ascopyfrom)
239
240 /*
241 * void ascopyto(caddr_t src, u_long dasi, vm_offset_t dst, size_t len)
242 */
243 ENTRY(ascopyto)
244 wr %o1, 0, %asi
245 _MEMCPY(%o2, %o0, %o3, a, %asi, E, E)
246 retl
247 nop
248 END(ascopyto)
249
250 /*
251 * void aszero(u_long asi, vm_offset_t pa, size_t len)
252 */
253 ENTRY(aszero)
254 wr %o0, 0, %asi
255 _MEMSET(%o1, %g0, %o2, a, %asi)
256 retl
257 nop
258 END(aszero)
259
260 /*
261 * int bcmp(const void *b1, const void *b2, size_t len)
262 */
263 ENTRY(bcmp)
264 brz,pn %o2, 2f
265 clr %o3
266 1: ldub [%o0 + %o3], %o4
267 ldub [%o1 + %o3], %o5
268 cmp %o4, %o5
269 bne,pn %xcc, 2f
270 inc %o3
271 deccc %o2
272 bne,pt %xcc, 1b
273 nop
274 2: retl
275 mov %o2, %o0
276 END(bcmp)
277
278 /*
279 * void bcopy(const void *src, void *dst, size_t len)
280 */
281 ENTRY(bcopy)
282 /*
283 * Check for overlap, and copy backwards if so.
284 */
285 sub %o1, %o0, %g1
286 cmp %g1, %o2
287 bgeu,a,pt %xcc, 3f
288 nop
289
290 /*
291 * Copy backwards.
292 */
293 add %o0, %o2, %o0
294 add %o1, %o2, %o1
295 1: deccc 1, %o2
296 bl,a,pn %xcc, 2f
297 nop
298 dec 1, %o0
299 ldub [%o0], %g1
300 dec 1, %o1
301 ba %xcc, 1b
302 stb %g1, [%o1]
303 2: retl
304 nop
305
306 /*
307 * Do the fast version.
308 */
309 3: _MEMCPY(%o1, %o0, %o2, E, E, E, E)
310 retl
311 nop
312 END(bcopy)
313
314 /*
315 * void bzero(void *b, size_t len)
316 */
317 ENTRY(bzero)
318 _MEMSET(%o0, %g0, %o1, E, E)
319 retl
320 nop
321 END(bzero)
322
323 /*
324 * int copystr(const void *src, void *dst, size_t len, size_t *done)
325 */
326 ENTRY(copystr)
327 _COPYSTR(%o0, %o1, %o2, %o3, E, E, E, E)
328 retl
329 mov %g1, %o0
330 END(copystr)
331
332 /*
333 * void *memcpy(void *dst, const void *src, size_t len)
334 */
335 ENTRY(memcpy)
336 mov %o0, %o3
337 _MEMCPY(%o3, %o1, %o2, E, E, E, E)
338 retl
339 nop
340 END(memcpy)
341
342 /*
343 * void *memset(void *b, int c, size_t len)
344 */
345 ENTRY(memset)
346 mov %o0, %o3
347 _MEMSET(%o3, %o1, %o2, E, E)
348 retl
349 nop
350 END(memset)
351
352 .globl copy_nofault_begin
353 copy_nofault_begin:
354 nop
355
356 /*
357 * int copyin(const void *uaddr, void *kaddr, size_t len)
358 */
359 ENTRY(copyin)
360 wr %g0, ASI_AIUP, %asi
361 _MEMCPY(%o1, %o0, %o2, E, E, a, %asi)
362 retl
363 clr %o0
364 END(copyin)
365
366 /*
367 * int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done)
368 */
369 ENTRY(copyinstr)
370 wr %g0, ASI_AIUP, %asi
371 _COPYSTR(%o0, %o1, %o2, %o3, a, %asi, E, E)
372 retl
373 mov %g1, %o0
374 END(copyinstr)
375
376 /*
377 * int copyout(const void *kaddr, void *uaddr, size_t len)
378 */
379 ENTRY(copyout)
380 wr %g0, ASI_AIUP, %asi
381 _MEMCPY(%o1, %o0, %o2, a, %asi, E, E)
382 retl
383 clr %o0
384 END(copyout)
385
386 .globl copy_nofault_end
387 copy_nofault_end:
388 nop
389
390 ENTRY(copy_fault)
391 retl
392 mov EFAULT, %o0
393 END(copy_fault)
394
395 .globl fs_nofault_begin
396 fs_nofault_begin:
397 nop
398
399 /*
400 * Chatty aliases for fetch, store functions.
401 */
402 .globl fubyte, fusword, fuword, subyte, susword, suword
403 .set fubyte, fuword8
404 .set fusword, fuword16
405 .set fuword, fuword64
406 .set subyte, suword8
407 .set susword, suword16
408 .set suword, suword64
409
410 .globl casuword32, casuword, fuptr, suptr
411 .set casuword, casuword64
412 .set fuptr, fuword64
413 .set suptr, suword64
414
415 /*
416 * int32_t casuword32(volatile int32_t *p, int32_t e, int32_t s)
417 */
418 ENTRY(casuword32)
419 casa [%o0] ASI_AIUP, %o1, %o2
420 retl
421 mov %o2, %o0
422 END(casuword32)
423
424 /*
425 * int64_t casuword64(volatile int64_t *p, int64_t e, int64_t s)
426 */
427 ENTRY(casuword64)
428 casxa [%o0] ASI_AIUP, %o1, %o2
429 retl
430 mov %o2, %o0
431 END(casuword64)
432
433 /*
434 * int fuword8(const void *base)
435 */
436 ENTRY(fuword8)
437 retl
438 lduba [%o0] ASI_AIUP, %o0
439 END(fuword8)
440
441 /*
442 * int fuword16(const void *base)
443 */
444 ENTRY(fuword16)
445 retl
446 lduha [%o0] ASI_AIUP, %o0
447 END(fuword16)
448
449 /*
450 * int32_t fuword32(const void *base)
451 */
452 ENTRY(fuword32)
453 retl
454 lduwa [%o0] ASI_AIUP, %o0
455 END(fuword32)
456
457 /*
458 * int64_t fuword64(const void *base)
459 */
460 ENTRY(fuword64)
461 retl
462 ldxa [%o0] ASI_AIUP, %o0
463 END(fuword64)
464
465 /*
466 * int suword8(const void *base, int word)
467 */
468 ENTRY(suword8)
469 stba %o1, [%o0] ASI_AIUP
470 retl
471 clr %o0
472 END(suword8)
473
474 /*
475 * int suword16(const void *base, int word)
476 */
477 ENTRY(suword16)
478 stha %o1, [%o0] ASI_AIUP
479 retl
480 clr %o0
481 END(suword16)
482
483 /*
484 * int suword32(const void *base, int32_t word)
485 */
486 ENTRY(suword32)
487 stwa %o1, [%o0] ASI_AIUP
488 retl
489 clr %o0
490 END(suword32)
491
492 /*
493 * int suword64(const void *base, int64_t word)
494 */
495 ENTRY(suword64)
496 stxa %o1, [%o0] ASI_AIUP
497 retl
498 clr %o0
499 END(suword64)
500
501 .globl fs_nofault_intr_begin
502 fs_nofault_intr_begin:
503 nop
504
505 /*
506 * int fuswintr(const void *base)
507 */
508 ENTRY(fuswintr)
509 retl
510 lduha [%o0] ASI_AIUP, %o0
511 END(fuswintr)
512
513 /*
514 * int suswintr(const void *base, int word)
515 */
516 ENTRY(suswintr)
517 stha %o1, [%o0] ASI_AIUP
518 retl
519 clr %o0
520 END(suswintr)
521
522 .globl fs_nofault_intr_end
523 fs_nofault_intr_end:
524 nop
525
526 .globl fs_nofault_end
527 fs_nofault_end:
528 nop
529
530 ENTRY(fs_fault)
531 retl
532 mov -1, %o0
533 END(fsfault)
534
535 .globl fas_nofault_begin
536 fas_nofault_begin:
537
538 /*
539 * int fasword8(u_long asi, uint64_t addr, uint8_t *val)
540 */
541 ENTRY(fasword8)
542 wr %o0, 0, %asi
543 membar #Sync
544 lduba [%o1] %asi, %o3
545 membar #Sync
546 stb %o3, [%o2]
547 retl
548 clr %o0
549 END(fasword8)
550
551 /*
552 * int fasword16(u_long asi, uint64_t addr, uint16_t *val)
553 */
554 ENTRY(fasword16)
555 wr %o0, 0, %asi
556 membar #Sync
557 lduha [%o1] %asi, %o3
558 membar #Sync
559 sth %o3, [%o2]
560 retl
561 clr %o0
562 END(fasword16)
563
564 /*
565 * int fasword32(u_long asi, uint64_t addr, uint32_t *val)
566 */
567 ENTRY(fasword32)
568 wr %o0, 0, %asi
569 membar #Sync
570 lduwa [%o1] %asi, %o3
571 membar #Sync
572 stw %o3, [%o2]
573 retl
574 clr %o0
575 END(fasword32)
576
577 .globl fas_nofault_end
578 fas_nofault_end:
579 nop
580
581 .globl fas_fault
582 ENTRY(fas_fault)
583 retl
584 mov -1, %o0
585 END(fas_fault)
586
587 .globl fpu_fault_begin
588 fpu_fault_begin:
589 nop
590
591 /*
592 * void spitfire_block_copy(void *src, void *dst, size_t len)
593 */
594 ENTRY(spitfire_block_copy)
595 rdpr %pil, %o3
596 wrpr %g0, PIL_TICK, %pil
597
598 wr %g0, ASI_BLK_S, %asi
599 wr %g0, FPRS_FEF, %fprs
600
601 sub PCB_REG, TF_SIZEOF, %o4
602 ldx [%o4 + TF_FPRS], %o5
603 andcc %o5, FPRS_FEF, %g0
604 bz,a,pt %xcc, 1f
605 nop
606 stda %f0, [PCB_REG + PCB_UFP + (0 * 64)] %asi
607 stda %f16, [PCB_REG + PCB_UFP + (1 * 64)] %asi
608 stda %f32, [PCB_REG + PCB_UFP + (2 * 64)] %asi
609 stda %f48, [PCB_REG + PCB_UFP + (3 * 64)] %asi
610 membar #Sync
611
612 andn %o5, FPRS_FEF, %o5
613 stx %o5, [%o4 + TF_FPRS]
614 ldx [PCB_REG + PCB_FLAGS], %o4
615 or %o4, PCB_FEF, %o4
616 stx %o4, [PCB_REG + PCB_FLAGS]
617
618 1: wrpr %o3, 0, %pil
619
620 ldda [%o0] %asi, %f0
621 add %o0, 64, %o0
622 sub %o2, 64, %o2
623
624 2: ldda [%o0] %asi, %f16
625 fsrc1 %f0, %f32
626 fsrc1 %f2, %f34
627 fsrc1 %f4, %f36
628 fsrc1 %f6, %f38
629 fsrc1 %f8, %f40
630 fsrc1 %f10, %f42
631 fsrc1 %f12, %f44
632 fsrc1 %f14, %f46
633 stda %f32, [%o1] %asi
634 add %o0, 64, %o0
635 subcc %o2, 64, %o2
636 bz,pn %xcc, 3f
637 add %o1, 64, %o1
638 ldda [%o0] %asi, %f0
639 fsrc1 %f16, %f32
640 fsrc1 %f18, %f34
641 fsrc1 %f20, %f36
642 fsrc1 %f22, %f38
643 fsrc1 %f24, %f40
644 fsrc1 %f26, %f42
645 fsrc1 %f28, %f44
646 fsrc1 %f30, %f46
647 stda %f32, [%o1] %asi
648 add %o0, 64, %o0
649 sub %o2, 64, %o2
650 ba %xcc, 2b
651 add %o1, 64, %o1
652
653 3: membar #Sync
654
655 stda %f16, [%o1] %asi
656 membar #Sync
657
658 wr %g0, 0, %fprs
659
660 retl
661 nop
662 END(spitfire_block_copy)
663
664 /*
665 * void spitfire_block_zero(void *dst, size_t len)
666 */
667 ENTRY(spitfire_block_zero)
668 rdpr %pil, %o3
669 wrpr %g0, PIL_TICK, %pil
670
671 wr %g0, ASI_BLK_S, %asi
672 wr %g0, FPRS_FEF, %fprs
673
674 sub PCB_REG, TF_SIZEOF, %o4
675 ldx [%o4 + TF_FPRS], %o5
676 andcc %o5, FPRS_FEF, %g0
677 bz,a,pt %xcc, 1f
678 nop
679 stda %f0, [PCB_REG + PCB_UFP + (0 * 64)] %asi
680 stda %f16, [PCB_REG + PCB_UFP + (1 * 64)] %asi
681 stda %f32, [PCB_REG + PCB_UFP + (2 * 64)] %asi
682 stda %f48, [PCB_REG + PCB_UFP + (3 * 64)] %asi
683 membar #Sync
684
685 andn %o5, FPRS_FEF, %o5
686 stx %o5, [%o4 + TF_FPRS]
687 ldx [PCB_REG + PCB_FLAGS], %o4
688 or %o4, PCB_FEF, %o4
689 stx %o4, [PCB_REG + PCB_FLAGS]
690
691 1: wrpr %o3, 0, %pil
692
693 fzero %f0
694 fzero %f2
695 fzero %f4
696 fzero %f6
697 fzero %f8
698 fzero %f10
699 fzero %f12
700 fzero %f14
701
702 1: stda %f0, [%o0] %asi
703 stda %f0, [%o0 + 64] %asi
704 stda %f0, [%o0 + 128] %asi
705 stda %f0, [%o0 + 192] %asi
706 sub %o1, 256, %o1
707 brnz %o1, 1b
708 add %o0, 256, %o0
709 membar #Sync
710
711 wr %g0, 0, %fprs
712
713 retl
714 nop
715 END(spitfire_block_zero)
716
717 .globl fpu_fault_end
718 fpu_fault_end:
719 nop
720
721 .globl fpu_fault_size
722 .set fpu_fault_size, fpu_fault_end - fpu_fault_begin
723
724 ENTRY(longjmp)
725 set 1, %g3
726 movrz %o1, %o1, %g3
727 mov %o0, %g1
728 ldx [%g1 + _JB_FP], %g2
729 1: cmp %fp, %g2
730 bl,a,pt %xcc, 1b
731 restore
732 bne,pn %xcc, 2f
733 ldx [%g1 + _JB_SP], %o2
734 cmp %o2, %sp
735 blt,pn %xcc, 2f
736 movge %xcc, %o2, %sp
737 ldx [%g1 + _JB_PC], %o7
738 retl
739 mov %g3, %o0
740 2: PANIC("longjmp botch", %l1)
741 END(longjmp)
742
743 ENTRY(setjmp)
744 stx %sp, [%o0 + _JB_SP]
745 stx %o7, [%o0 + _JB_PC]
746 stx %fp, [%o0 + _JB_FP]
747 retl
748 clr %o0
749 END(setjmp)
750
751 /*
752 * void openfirmware(cell_t args[])
753 */
754 ENTRY(openfirmware)
755 save %sp, -CCFSZ, %sp
756 SET(ofw_vec, %l7, %l6)
757 ldx [%l6], %l6
758 rdpr %pil, %l7
759 wrpr %g0, PIL_TICK, %pil
760 call %l6
761 mov %i0, %o0
762 wrpr %l7, 0, %pil
763 ret
764 restore %o0, %g0, %o0
765 END(openfirmware)
766
767 /*
768 * void ofw_exit(cell_t args[])
769 */
770 ENTRY(openfirmware_exit)
771 save %sp, -CCFSZ, %sp
772 flushw
773 wrpr %g0, PIL_TICK, %pil
774 SET(ofw_tba, %l7, %l5)
775 ldx [%l5], %l5
776 wrpr %l5, 0, %tba ! restore the ofw trap table
777 SET(ofw_vec, %l7, %l6)
778 ldx [%l6], %l6
779 SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l7, %l0)
780 sub %l0, SPOFF, %fp ! setup a stack in a locked page
781 sub %l0, SPOFF + CCFSZ, %sp
782 mov AA_DMMU_PCXR, %l3 ! set context 0
783 stxa %g0, [%l3] ASI_DMMU
784 membar #Sync
785 wrpr %g0, 0, %tl ! force trap level 0
786 call %l6
787 mov %i0, %o0
788 ! never to return
789 END(openfirmware_exit)
790
791 #ifdef GPROF
792
793 ENTRY(user)
794 nop
795
796 ENTRY(btrap)
797 nop
798
799 ENTRY(etrap)
800 nop
801
802 ENTRY(bintr)
803 nop
804
805 ENTRY(eintr)
806 nop
807
808
809 /*
810 * XXX including sys/gmon.h in genassym.c is not possible due to uintfptr_t
811 * badness.
812 */
813 #define GM_STATE 0x0
814 #define GMON_PROF_OFF 3
815 #define GMON_PROF_HIRES 4
816
817 .globl _mcount
818 .set _mcount, __cyg_profile_func_enter
819
820 ENTRY(__cyg_profile_func_enter)
821 SET(_gmonparam, %o3, %o2)
822 lduw [%o2 + GM_STATE], %o3
823 cmp %o3, GMON_PROF_OFF
824 be,a,pn %icc, 1f
825 nop
826 SET(mcount, %o3, %o2)
827 jmpl %o2, %g0
828 nop
829 1: retl
830 nop
831 END(__cyg_profile_func_enter)
832
833 #ifdef GUPROF
834
835 ENTRY(__cyg_profile_func_exit)
836 SET(_gmonparam, %o3, %o2)
837 lduw [%o2 + GM_STATE], %o3
838 cmp %o3, GMON_PROF_HIRES
839 be,a,pn %icc, 1f
840 nop
841 SET(mexitcount, %o3, %o2)
842 jmpl %o2, %g0
843 nop
844 1: retl
845 nop
846 END(__cyg_profile_func_exit)
847
848 #endif /* GUPROF */
849
850 #endif /* GPROF */
Cache object: ea1bbbb1a70a4b268660165096be901d
|