1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 *
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50 * $FreeBSD: releng/8.1/sys/mips/mips/support.S 183299 2008-09-23 14:45:10Z obrien $
51 */
52
53 /*
54 * Contains code that is the first executed at boot time plus
55 * assembly language support routines.
56 */
57
58 #include "opt_ddb.h"
59 #include <sys/errno.h>
60 #include <machine/asm.h>
61 #include <machine/cpu.h>
62 #include <machine/regnum.h>
63
64 #include "assym.s"
65
66 .set noreorder # Noreorder is default style!
67
68 /*
69 * Primitives
70 */
71
72 /*
73 * This table is indexed by u.u_pcb.pcb_onfault in trap().
74 * The reason for using this table rather than storing an address in
75 * u.u_pcb.pcb_onfault is simply to make the code faster.
76 */
77 .globl onfault_table
78 .data
79 .align 3
80 onfault_table:
81 .word 0 # invalid index number
82 #define BADERR 1
83 .word baderr
84 #define COPYERR 2
85 .word copyerr
86 #define FSWBERR 3
87 .word fswberr
88 #define FSWINTRBERR 4
89 .word fswintrberr
90 #if defined(DDB) || defined(DEBUG)
91 #define DDBERR 5
92 .word ddberr
93 #else
94 .word 0
95 #endif
96
97 .text
98
99 /*
100 * See if access to addr with a len type instruction causes a machine check.
101 * len is length of access (1=byte, 2=short, 4=long)
102 *
103 * badaddr(addr, len)
104 * char *addr;
105 * int len;
106 */
107 LEAF(badaddr)
108 li v0, BADERR
109 GET_CPU_PCPU(v1)
110 lw v1, PC_CURPCB(v1)
111 bne a1, 1, 2f
112 sw v0, U_PCB_ONFAULT(v1)
113 b 5f
114 lbu v0, (a0)
115 2:
116 bne a1, 2, 4f
117 nop
118 b 5f
119 lhu v0, (a0)
120 4:
121 lw v0, (a0)
122 5:
123 sw zero, U_PCB_ONFAULT(v1)
124 j ra
125 move v0, zero # made it w/o errors
126 baderr:
127 j ra
128 li v0, 1 # trap sends us here
129 END(badaddr)
130
131 /*
132 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
133 * Copy a NIL-terminated string, at most maxlen characters long. Return the
134 * number of characters copied (including the NIL) in *lencopied. If the
135 * string is too long, return ENAMETOOLONG; else return 0.
136 */
137 LEAF(copystr)
138 move t0, a2
139 beq a2, zero, 4f
140 1:
141 lbu v0, 0(a0)
142 subu a2, a2, 1
143 beq v0, zero, 2f
144 sb v0, 0(a1) # each byte until NIL
145 addu a0, a0, 1
146 bne a2, zero, 1b # less than maxlen
147 addu a1, a1, 1
148 4:
149 li v0, ENAMETOOLONG # run out of space
150 2:
151 beq a3, zero, 3f # return num. of copied bytes
152 subu a2, t0, a2 # if the 4th arg was non-NULL
153 sw a2, 0(a3)
154 3:
155 j ra # v0 is 0 or ENAMETOOLONG
156 nop
157 END(copystr)
158
159
160 /*
161 * fillw(pat, addr, count)
162 */
163 LEAF(fillw)
164 1:
165 addiu a2, a2, -1
166 sh a0, 0(a1)
167 bne a2,zero, 1b
168 addiu a1, a1, 2
169
170 jr ra
171 nop
172 END(fillw)
173
174 /*
175 * Optimized memory zero code.
176 * mem_zero_page(addr);
177 */
178 LEAF(mem_zero_page)
179 li v0, NBPG
180 1:
181 subu v0, 8
182 sd zero, 0(a0)
183 bne zero, v0, 1b
184 addu a0, 8
185 jr ra
186 nop
187 END(mem_zero_page)
188
189 /*
190 * Block I/O routines mainly used by I/O drivers.
191 *
192 * Args as: a0 = port
193 * a1 = memory address
194 * a2 = count
195 */
196 LEAF(insb)
197 beq a2, zero, 2f
198 addu a2, a1
199 1:
200 lbu v0, 0(a0)
201 addiu a1, 1
202 bne a1, a2, 1b
203 sb v0, -1(a1)
204 2:
205 jr ra
206 nop
207 END(insb)
208
209 LEAF(insw)
210 beq a2, zero, 2f
211 addu a2, a2
212 addu a2, a1
213 1:
214 lhu v0, 0(a0)
215 addiu a1, 2
216 bne a1, a2, 1b
217 sh v0, -2(a1)
218 2:
219 jr ra
220 nop
221 END(insw)
222
223 LEAF(insl)
224 beq a2, zero, 2f
225 sll a2, 2
226 addu a2, a1
227 1:
228 lw v0, 0(a0)
229 addiu a1, 4
230 bne a1, a2, 1b
231 sw v0, -4(a1)
232 2:
233 jr ra
234 nop
235 END(insl)
236
237 LEAF(outsb)
238 beq a2, zero, 2f
239 addu a2, a1
240 1:
241 lbu v0, 0(a1)
242 addiu a1, 1
243 bne a1, a2, 1b
244 sb v0, 0(a0)
245 2:
246 jr ra
247 nop
248 END(outsb)
249
250 LEAF(outsw)
251 beq a2, zero, 2f
252 addu a2, a2
253 li v0, 1
254 and v0, a1
255 bne v0, zero, 3f # arghh, unaligned.
256 addu a2, a1
257 1:
258 lhu v0, 0(a1)
259 addiu a1, 2
260 bne a1, a2, 1b
261 sh v0, 0(a0)
262 2:
263 jr ra
264 nop
265 3:
266 LWHI v0, 0(a1)
267 LWLO v0, 3(a1)
268 addiu a1, 2
269 bne a1, a2, 3b
270 sh v0, 0(a0)
271
272 jr ra
273 nop
274 END(outsw)
275
276 LEAF(outsl)
277 beq a2, zero, 2f
278 sll a2, 2
279 li v0, 3
280 and v0, a1
281 bne v0, zero, 3f # arghh, unaligned.
282 addu a2, a1
283 1:
284 lw v0, 0(a1)
285 addiu a1, 4
286 bne a1, a2, 1b
287 sw v0, 0(a0)
288 2:
289 jr ra
290 nop
291 3:
292 LWHI v0, 0(a1)
293 LWLO v0, 3(a1)
294 addiu a1, 4
295 bne a1, a2, 3b
296 sw v0, 0(a0)
297
298 jr ra
299 nop
300 END(outsl)
301
302 /*
303 * Copy a null terminated string from the user address space into
304 * the kernel address space.
305 *
306 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
307 * caddr_t fromaddr;
308 * caddr_t toaddr;
309 * u_int maxlength;
310 * u_int *lencopied;
311 */
312 NON_LEAF(copyinstr, STAND_FRAME_SIZE, ra)
313 subu sp, sp, STAND_FRAME_SIZE
314 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
315 sw ra, STAND_RA_OFFSET(sp)
316 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
317 li v0, COPYERR
318 GET_CPU_PCPU(v1)
319 lw v1, PC_CURPCB(v1)
320 jal _C_LABEL(copystr)
321 sw v0, U_PCB_ONFAULT(v1)
322 lw ra, STAND_RA_OFFSET(sp)
323 GET_CPU_PCPU(v1)
324 lw v1, PC_CURPCB(v1)
325 sw zero, U_PCB_ONFAULT(v1)
326 j ra
327 addu sp, sp, STAND_FRAME_SIZE
328 END(copyinstr)
329
330 /*
331 * Copy a null terminated string from the kernel address space into
332 * the user address space.
333 *
334 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
335 * caddr_t fromaddr;
336 * caddr_t toaddr;
337 * u_int maxlength;
338 * u_int *lencopied;
339 */
340 NON_LEAF(copyoutstr, STAND_FRAME_SIZE, ra)
341 subu sp, sp, STAND_FRAME_SIZE
342 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
343 sw ra, STAND_RA_OFFSET(sp)
344 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
345 li v0, COPYERR
346 GET_CPU_PCPU(v1)
347 lw v1, PC_CURPCB(v1)
348 jal _C_LABEL(copystr)
349 sw v0, U_PCB_ONFAULT(v1)
350 lw ra, STAND_RA_OFFSET(sp)
351 GET_CPU_PCPU(v1)
352 lw v1, PC_CURPCB(v1)
353 sw zero, U_PCB_ONFAULT(v1)
354 j ra
355 addu sp, sp, STAND_FRAME_SIZE
356 END(copyoutstr)
357
358 /*
359 * Copy specified amount of data from user space into the kernel
360 * copyin(from, to, len)
361 * caddr_t *from; (user source address)
362 * caddr_t *to; (kernel destination address)
363 * unsigned len;
364 */
365 NON_LEAF(copyin, STAND_FRAME_SIZE, ra)
366 subu sp, sp, STAND_FRAME_SIZE
367 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
368 sw ra, STAND_RA_OFFSET(sp)
369 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
370 li v0, COPYERR
371 GET_CPU_PCPU(v1)
372 lw v1, PC_CURPCB(v1)
373 jal _C_LABEL(bcopy)
374 sw v0, U_PCB_ONFAULT(v1)
375 lw ra, STAND_RA_OFFSET(sp)
376 GET_CPU_PCPU(v1)
377 lw v1, PC_CURPCB(v1) # bcopy modified v1, so reload
378 sw zero, U_PCB_ONFAULT(v1)
379 addu sp, sp, STAND_FRAME_SIZE
380 j ra
381 move v0, zero
382 END(copyin)
383
384 /*
385 * Copy specified amount of data from kernel to the user space
386 * copyout(from, to, len)
387 * caddr_t *from; (kernel source address)
388 * caddr_t *to; (user destination address)
389 * unsigned len;
390 */
391 NON_LEAF(copyout, STAND_FRAME_SIZE, ra)
392 subu sp, sp, STAND_FRAME_SIZE
393 .mask 0x80000000, (STAND_RA_OFFSET - STAND_FRAME_SIZE)
394 sw ra, STAND_RA_OFFSET(sp)
395 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
396 li v0, COPYERR
397 GET_CPU_PCPU(v1)
398 lw v1, PC_CURPCB(v1)
399 jal _C_LABEL(bcopy)
400 sw v0, U_PCB_ONFAULT(v1)
401 lw ra, STAND_RA_OFFSET(sp)
402 GET_CPU_PCPU(v1)
403 lw v1, PC_CURPCB(v1) # bcopy modified v1, so reload
404 sw zero, U_PCB_ONFAULT(v1)
405 addu sp, sp, STAND_FRAME_SIZE
406 j ra
407 move v0, zero
408 END(copyout)
409
410 LEAF(copyerr)
411 lw ra, STAND_RA_OFFSET(sp)
412 GET_CPU_PCPU(v1)
413 lw v1, PC_CURPCB(v1)
414 sw zero, U_PCB_ONFAULT(v1)
415 addu sp, sp, STAND_FRAME_SIZE
416 j ra
417 li v0, EFAULT # return error
418 END(copyerr)
419
420 /*
421 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
422 * user text space.
423 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
424 * user data space.
425 */
426 LEAF(fuword)
427 ALEAF(fuword32)
428 ALEAF(fuiword)
429 blt a0, zero, fswberr # make sure address is in user space
430 li v0, FSWBERR
431 GET_CPU_PCPU(v1)
432 lw v1, PC_CURPCB(v1)
433 sw v0, U_PCB_ONFAULT(v1)
434 lw v0, 0(a0) # fetch word
435 j ra
436 sw zero, U_PCB_ONFAULT(v1)
437 END(fuword)
438
439 LEAF(fusword)
440 ALEAF(fuisword)
441 blt a0, zero, fswberr # make sure address is in user space
442 li v0, FSWBERR
443 GET_CPU_PCPU(v1)
444 lw v1, PC_CURPCB(v1)
445 sw v0, U_PCB_ONFAULT(v1)
446 lhu v0, 0(a0) # fetch short
447 j ra
448 sw zero, U_PCB_ONFAULT(v1)
449 END(fusword)
450
451 LEAF(fubyte)
452 ALEAF(fuibyte)
453 blt a0, zero, fswberr # make sure address is in user space
454 li v0, FSWBERR
455 GET_CPU_PCPU(v1)
456 lw v1, PC_CURPCB(v1)
457 sw v0, U_PCB_ONFAULT(v1)
458 lbu v0, 0(a0) # fetch byte
459 j ra
460 sw zero, U_PCB_ONFAULT(v1)
461 END(fubyte)
462
463 LEAF(suword)
464 XLEAF(suword32)
465 blt a0, zero, fswberr # make sure address is in user space
466 li v0, FSWBERR
467 GET_CPU_PCPU(v1)
468 lw v1, PC_CURPCB(v1)
469 sw v0, U_PCB_ONFAULT(v1)
470 sw a1, 0(a0) # store word
471 sw zero, U_PCB_ONFAULT(v1)
472 j ra
473 move v0, zero
474 END(suword)
475
476 /*
477 * casuword(9)
478 * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
479 */
480 ENTRY(casuword)
481 break
482 li v0, -1
483 jr ra
484 nop
485 END(casuword)
486
487 /*
488 * casuword32(9)
489 * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
490 * <a2>uint32_t newval)
491 */
492 ENTRY(casuword32)
493 break
494 li v0, -1
495 jr ra
496 nop
497 END(casuword32)
498
499 #if 0
500 /* unused in FreeBSD */
501 /*
502 * Have to flush instruction cache afterwards.
503 */
504 LEAF(suiword)
505 blt a0, zero, fswberr # make sure address is in user space
506 li v0, FSWBERR
507 GET_CPU_PCPU(v1)
508 lw v1, PC_CURPCB(v1)
509 sw v0, U_PCB_ONFAULT(v1)
510 sw a1, 0(a0) # store word
511 sw zero, U_PCB_ONFAULT(v1)
512 j _C_LABEL(Mips_SyncICache) # FlushICache sets v0 = 0. (Ugly)
513 li a1, 4 # size of word
514 END(suiword)
515 #endif
516
517 /*
518 * Will have to flush the instruction cache if byte merging is done in hardware.
519 */
520 LEAF(susword)
521 ALEAF(suisword)
522 blt a0, zero, fswberr # make sure address is in user space
523 li v0, FSWBERR
524 GET_CPU_PCPU(v1)
525 lw v1, PC_CURPCB(v1)
526 sw v0, U_PCB_ONFAULT(v1)
527 sh a1, 0(a0) # store short
528 sw zero, U_PCB_ONFAULT(v1)
529 j ra
530 move v0, zero
531 END(susword)
532
533 LEAF(subyte)
534 ALEAF(suibyte)
535 blt a0, zero, fswberr # make sure address is in user space
536 li v0, FSWBERR
537 GET_CPU_PCPU(v1)
538 lw v1, PC_CURPCB(v1)
539 sw v0, U_PCB_ONFAULT(v1)
540 sb a1, 0(a0) # store byte
541 sw zero, U_PCB_ONFAULT(v1)
542 j ra
543 move v0, zero
544 END(subyte)
545
546 LEAF(fswberr)
547 j ra
548 li v0, -1
549 END(fswberr)
550
551 /*
552 * fuswintr and suswintr are just like fusword and susword except that if
553 * the page is not in memory or would cause a trap, then we return an error.
554 * The important thing is to prevent sleep() and switch().
555 */
556 LEAF(fuswintr)
557 blt a0, zero, fswintrberr # make sure address is in user space
558 li v0, FSWINTRBERR
559 GET_CPU_PCPU(v1)
560 lw v1, PC_CURPCB(v1)
561 sw v0, U_PCB_ONFAULT(v1)
562 lhu v0, 0(a0) # fetch short
563 j ra
564 sw zero, U_PCB_ONFAULT(v1)
565 END(fuswintr)
566
567 LEAF(suswintr)
568 blt a0, zero, fswintrberr # make sure address is in user space
569 li v0, FSWINTRBERR
570 GET_CPU_PCPU(v1)
571 lw v1, PC_CURPCB(v1)
572 sw v0, U_PCB_ONFAULT(v1)
573 sh a1, 0(a0) # store short
574 sw zero, U_PCB_ONFAULT(v1)
575 j ra
576 move v0, zero
577 END(suswintr)
578
579 LEAF(fswintrberr)
580 j ra
581 li v0, -1
582 END(fswintrberr)
583
584 /*
585 * Insert 'p' after 'q'.
586 * _insque(p, q)
587 * caddr_t p, q;
588 */
589 LEAF(_insque)
590 lw v0, 0(a1) # v0 = q->next
591 sw a1, 4(a0) # p->prev = q
592 sw v0, 0(a0) # p->next = q->next
593 sw a0, 4(v0) # q->next->prev = p
594 j ra
595 sw a0, 0(a1) # q->next = p
596 END(_insque)
597
598 /*
599 * Remove item 'p' from queue.
600 * _remque(p)
601 * caddr_t p;
602 */
603 LEAF(_remque)
604 lw v0, 0(a0) # v0 = p->next
605 lw v1, 4(a0) # v1 = p->prev
606 nop
607 sw v0, 0(v1) # p->prev->next = p->next
608 j ra
609 sw v1, 4(v0) # p->next->prev = p->prev
610 END(_remque)
611
612 /*--------------------------------------------------------------------------
613 *
614 * Mips_GetCOUNT --
615 *
616 * Mips_GetCOUNT()
617 *
618 * Results:
619 * Returns the current COUNT reg.
620 *
621 * Side effects:
622 * None.
623 *
624 *--------------------------------------------------------------------------
625 */
626 LEAF(Mips_GetCOUNT)
627 mfc0 v0, COP_0_COUNT
628 nop #???
629 nop #???
630 j ra
631 nop
632 END(Mips_GetCOUNT)
633
634 /*--------------------------------------------------------------------------
635 *
636 * Mips_SetCOMPARE --
637 *
638 * Mips_SetCOMPARE()
639 *
640 * Results:
641 * Sets a new value to the COMPARE register.
642 *
643 * Side effects:
644 * The COMPARE equal interrupt is acknowledged.
645 *
646 *--------------------------------------------------------------------------
647 */
648 LEAF(Mips_SetCOMPARE)
649 mtc0 a0, COP_0_COMPARE
650 j ra
651 nop
652 END(Mips_SetCOMPARE)
653
654 LEAF(Mips_GetCOMPARE)
655 mfc0 v0, COP_0_COMPARE
656 j ra
657 nop
658 END(Mips_GetCOMPARE)
659
660 /*
661 * u_int32_t mips_cp0_status_read(void)
662 *
663 * Return the current value of the CP0 Status register.
664 */
665 LEAF(mips_cp0_status_read)
666 mfc0 v0, COP_0_STATUS_REG
667 j ra
668 nop
669 END(mips_cp0_status_read)
670
671 /*
672 * void mips_cp0_status_write(u_int32_t)
673 *
674 * Set the value of the CP0 Status register.
675 *
676 * Note: This is almost certainly not the way you want to write a
677 * "permanent" value to to the CP0 Status register, since it gets
678 * saved in trap frames and restores.
679 */
680 LEAF(mips_cp0_status_write)
681 mtc0 a0, COP_0_STATUS_REG
682 nop
683 nop
684 j ra
685 nop
686 END(mips_cp0_status_write)
687
688
689 /*
690 * memcpy(to, from, len)
691 * {ov}bcopy(from, to, len)
692 */
693 LEAF(memcpy)
694 .set noreorder
695 move v0, a0 # swap from and to
696 move a0, a1
697 move a1, v0
698 ALEAF(bcopy)
699 ALEAF(ovbcopy)
700 .set noreorder
701 addu t0, a0, a2 # t0 = end of s1 region
702 sltu t1, a1, t0
703 sltu t2, a0, a1
704 and t1, t1, t2 # t1 = true if from < to < (from+len)
705 beq t1, zero, forward # non overlapping, do forward copy
706 slt t2, a2, 12 # check for small copy
707
708 ble a2, zero, 2f
709 addu t1, a1, a2 # t1 = end of to region
710 1:
711 lb v1, -1(t0) # copy bytes backwards,
712 subu t0, t0, 1 # doesnt happen often so do slow way
713 subu t1, t1, 1
714 bne t0, a0, 1b
715 sb v1, 0(t1)
716 2:
717 j ra
718 nop
719 forward:
720 bne t2, zero, smallcpy # do a small bcopy
721 xor v1, a0, a1 # compare low two bits of addresses
722 and v1, v1, 3
723 subu a3, zero, a1 # compute # bytes to word align address
724 beq v1, zero, aligned # addresses can be word aligned
725 and a3, a3, 3
726
727 beq a3, zero, 1f
728 subu a2, a2, a3 # subtract from remaining count
729 LWHI v1, 0(a0) # get next 4 bytes (unaligned)
730 LWLO v1, 3(a0)
731 addu a0, a0, a3
732 SWHI v1, 0(a1) # store 1, 2, or 3 bytes to align a1
733 addu a1, a1, a3
734 1:
735 and v1, a2, 3 # compute number of words left
736 subu a3, a2, v1
737 move a2, v1
738 addu a3, a3, a0 # compute ending address
739 2:
740 LWHI v1, 0(a0) # copy words a0 unaligned, a1 aligned
741 LWLO v1, 3(a0)
742 addu a0, a0, 4
743 sw v1, 0(a1)
744 addu a1, a1, 4
745 bne a0, a3, 2b
746 nop # We have to do this mmu-bug.
747 b smallcpy
748 nop
749 aligned:
750 beq a3, zero, 1f
751 subu a2, a2, a3 # subtract from remaining count
752 LWHI v1, 0(a0) # copy 1, 2, or 3 bytes to align
753 addu a0, a0, a3
754 SWHI v1, 0(a1)
755 addu a1, a1, a3
756 1:
757 and v1, a2, 3 # compute number of whole words left
758 subu a3, a2, v1
759 move a2, v1
760 addu a3, a3, a0 # compute ending address
761 2:
762 lw v1, 0(a0) # copy words
763 addu a0, a0, 4
764 sw v1, 0(a1)
765 bne a0, a3, 2b
766 addu a1, a1, 4
767 smallcpy:
768 ble a2, zero, 2f
769 addu a3, a2, a0 # compute ending address
770 1:
771 lbu v1, 0(a0) # copy bytes
772 addu a0, a0, 1
773 sb v1, 0(a1)
774 bne a0, a3, 1b
775 addu a1, a1, 1 # MMU BUG ? can not do -1(a1) at 0x80000000!!
776 2:
777 j ra
778 nop
779 END(memcpy)
780
781 /*
782 * memset(void *s1, int c, int len)
783 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
784 */
785 LEAF(memset)
786 .set noreorder
787 blt a2, 12, memsetsmallclr # small amount to clear?
788 move v0, a0 # save s1 for result
789
790 sll t1, a1, 8 # compute c << 8 in t1
791 or t1, t1, a1 # compute c << 8 | c in 11
792 sll t2, t1, 16 # shift that left 16
793 or t1, t2, t1 # or together
794
795 subu t0, zero, a0 # compute # bytes to word align address
796 and t0, t0, 3
797 beq t0, zero, 1f # skip if word aligned
798 subu a2, a2, t0 # subtract from remaining count
799 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
800 addu a0, a0, t0
801 1:
802 and v1, a2, 3 # compute number of whole words left
803 subu t0, a2, v1
804 subu a2, a2, t0
805 addu t0, t0, a0 # compute ending address
806 2:
807 addu a0, a0, 4 # clear words
808 #ifdef MIPS3_5900
809 nop
810 nop
811 nop
812 nop
813 #endif
814 bne a0, t0, 2b # unrolling loop does not help
815 sw t1, -4(a0) # since we are limited by memory speed
816
817 memsetsmallclr:
818 ble a2, zero, 2f
819 addu t0, a2, a0 # compute ending address
820 1:
821 addu a0, a0, 1 # clear bytes
822 #ifdef MIPS3_5900
823 nop
824 nop
825 nop
826 nop
827 #endif
828 bne a0, t0, 1b
829 sb a1, -1(a0)
830 2:
831 j ra
832 nop
833 .set reorder
834 END(memset)
835
836 /*
837 * bzero(s1, n)
838 */
839 LEAF(bzero)
840 ALEAF(blkclr)
841 .set noreorder
842 blt a1, 12, smallclr # small amount to clear?
843 subu a3, zero, a0 # compute # bytes to word align address
844 and a3, a3, 3
845 beq a3, zero, 1f # skip if word aligned
846 subu a1, a1, a3 # subtract from remaining count
847 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
848 addu a0, a0, a3
849 1:
850 and v0, a1, 3 # compute number of words left
851 subu a3, a1, v0
852 move a1, v0
853 addu a3, a3, a0 # compute ending address
854 2:
855 addu a0, a0, 4 # clear words
856 bne a0, a3, 2b # unrolling loop does not help
857 sw zero, -4(a0) # since we are limited by memory speed
858 smallclr:
859 ble a1, zero, 2f
860 addu a3, a1, a0 # compute ending address
861 1:
862 addu a0, a0, 1 # clear bytes
863 bne a0, a3, 1b
864 sb zero, -1(a0)
865 2:
866 j ra
867 nop
868 END(bzero)
869
870
871 /*
872 * bcmp(s1, s2, n)
873 */
874 LEAF(bcmp)
875 .set noreorder
876 blt a2, 16, smallcmp # is it worth any trouble?
877 xor v0, a0, a1 # compare low two bits of addresses
878 and v0, v0, 3
879 subu a3, zero, a1 # compute # bytes to word align address
880 bne v0, zero, unalignedcmp # not possible to align addresses
881 and a3, a3, 3
882
883 beq a3, zero, 1f
884 subu a2, a2, a3 # subtract from remaining count
885 move v0, v1 # init v0,v1 so unmodified bytes match
886 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
887 LWHI v1, 0(a1)
888 addu a1, a1, a3
889 bne v0, v1, nomatch
890 addu a0, a0, a3
891 1:
892 and a3, a2, ~3 # compute number of whole words left
893 subu a2, a2, a3 # which has to be >= (16-3) & ~3
894 addu a3, a3, a0 # compute ending address
895 2:
896 lw v0, 0(a0) # compare words
897 lw v1, 0(a1)
898 addu a0, a0, 4
899 bne v0, v1, nomatch
900 addu a1, a1, 4
901 bne a0, a3, 2b
902 nop
903 b smallcmp # finish remainder
904 nop
905 unalignedcmp:
906 beq a3, zero, 2f
907 subu a2, a2, a3 # subtract from remaining count
908 addu a3, a3, a0 # compute ending address
909 1:
910 lbu v0, 0(a0) # compare bytes until a1 word aligned
911 lbu v1, 0(a1)
912 addu a0, a0, 1
913 bne v0, v1, nomatch
914 addu a1, a1, 1
915 bne a0, a3, 1b
916 nop
917 2:
918 and a3, a2, ~3 # compute number of whole words left
919 subu a2, a2, a3 # which has to be >= (16-3) & ~3
920 addu a3, a3, a0 # compute ending address
921 3:
922 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
923 LWLO v0, 3(a0)
924 lw v1, 0(a1)
925 addu a0, a0, 4
926 bne v0, v1, nomatch
927 addu a1, a1, 4
928 bne a0, a3, 3b
929 nop
930 smallcmp:
931 ble a2, zero, match
932 addu a3, a2, a0 # compute ending address
933 1:
934 lbu v0, 0(a0)
935 lbu v1, 0(a1)
936 addu a0, a0, 1
937 bne v0, v1, nomatch
938 addu a1, a1, 1
939 bne a0, a3, 1b
940 nop
941 match:
942 j ra
943 move v0, zero
944 nomatch:
945 j ra
946 li v0, 1
947 END(bcmp)
948
949
950 /*
951 * bit = ffs(value)
952 */
953 LEAF(ffs)
954 .set noreorder
955 beq a0, zero, 2f
956 move v0, zero
957 1:
958 and v1, a0, 1 # bit set?
959 addu v0, v0, 1
960 beq v1, zero, 1b # no, continue
961 srl a0, a0, 1
962 2:
963 j ra
964 nop
965 END(ffs)
966
967 LEAF(get_current_fp)
968 j ra
969 move v0, s8
970 END(get_current_fp)
971
972 LEAF(loadandclear)
973 .set noreorder
974 1:
975 ll v0, 0(a0)
976 move t0, zero
977 sc t0, 0(a0)
978 beq t0, zero, 1b
979 nop
980 j ra
981 nop
982 END(loadandclear)
983
984 #if 0
985 /*
986 * u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
987 * Atomically compare the value stored at p with cmpval
988 * and if the two values are equal, update value *p with
989 * newval. Return zero if compare failed, non-zero otherwise
990 *
991 */
992
993 LEAF(atomic_cmpset_32)
994 .set noreorder
995 1:
996 ll t0, 0(a0)
997 move v0, zero
998 bne t0, a1, 2f
999 move t1, a2
1000 sc t1, 0(a0)
1001 beq t1, zero, 1b
1002 or v0, v0, 1
1003 2:
1004 j ra
1005 nop
1006 END(atomic_cmpset_32)
1007
1008 /**
1009 * u_int32_t
1010 * atomic_readandclear_32(u_int32_t *a)
1011 * {
1012 * u_int32_t retval;
1013 * retval = *a;
1014 * *a = 0;
1015 * }
1016 */
1017 LEAF(atomic_readandclear_32)
1018 .set noreorder
1019 1:
1020 ll t0, 0(a0)
1021 move t1, zero
1022 move v0, t0
1023 sc t1, 0(a0)
1024 beq t1, zero, 1b
1025 nop
1026 j ra
1027 nop
1028 END(atomic_readandclear_32)
1029
1030 /**
1031 * void
1032 * atomic_set_32(u_int32_t *a, u_int32_t b)
1033 * {
1034 * *a |= b;
1035 * }
1036 */
1037 LEAF(atomic_set_32)
1038 .set noreorder
1039 1:
1040 ll t0, 0(a0)
1041 or t0, t0, a1
1042 sc t0, 0(a0)
1043 beq t0, zero, 1b
1044 nop
1045 j ra
1046 nop
1047 END(atomic_set_32)
1048
1049 /**
1050 * void
1051 * atomic_add_32(uint32_t *a, uint32_t b)
1052 * {
1053 * *a += b;
1054 * }
1055 */
1056 LEAF(atomic_add_32)
1057 .set noreorder
1058 srl a0, a0, 2 # round down address to be 32-bit aligned
1059 sll a0, a0, 2
1060 1:
1061 ll t0, 0(a0)
1062 addu t0, t0, a1
1063 sc t0, 0(a0)
1064 beq t0, zero, 1b
1065 nop
1066 j ra
1067 nop
1068 END(atomic_add_32)
1069
1070 /**
1071 * void
1072 * atomic_clear_32(u_int32_t *a, u_int32_t b)
1073 * {
1074 * *a &= ~b;
1075 * }
1076 */
1077 LEAF(atomic_clear_32)
1078 .set noreorder
1079 srl a0, a0, 2 # round down address to be 32-bit aligned
1080 sll a0, a0, 2
1081 nor a1, zero, a1
1082 1:
1083 ll t0, 0(a0)
1084 and t0, t0, a1 # t1 has the new lower 16 bits
1085 sc t0, 0(a0)
1086 beq t0, zero, 1b
1087 nop
1088 j ra
1089 nop
1090 END(atomic_clear_32)
1091
1092 /**
1093 * void
1094 * atomic_subtract_32(uint16_t *a, uint16_t b)
1095 * {
1096 * *a -= b;
1097 * }
1098 */
1099 LEAF(atomic_subtract_32)
1100 .set noreorder
1101 srl a0, a0, 2 # round down address to be 32-bit aligned
1102 sll a0, a0, 2
1103 1:
1104 ll t0, 0(a0)
1105 subu t0, t0, a1
1106 sc t0, 0(a0)
1107 beq t0, zero, 1b
1108 nop
1109 j ra
1110 nop
1111 END(atomic_subtract_32)
1112
1113 #endif
1114
1115 /**
1116 * void
1117 * atomic_set_16(u_int16_t *a, u_int16_t b)
1118 * {
1119 * *a |= b;
1120 * }
1121 */
1122 LEAF(atomic_set_16)
1123 .set noreorder
1124 srl a0, a0, 2 # round down address to be 32-bit aligned
1125 sll a0, a0, 2
1126 andi a1, a1, 0xffff
1127 1:
1128 ll t0, 0(a0)
1129 or t0, t0, a1
1130 sc t0, 0(a0)
1131 beq t0, zero, 1b
1132 nop
1133 j ra
1134 nop
1135 END(atomic_set_16)
1136
1137 /**
1138 * void
1139 * atomic_clear_16(u_int16_t *a, u_int16_t b)
1140 * {
1141 * *a &= ~b;
1142 * }
1143 */
1144 LEAF(atomic_clear_16)
1145 .set noreorder
1146 srl a0, a0, 2 # round down address to be 32-bit aligned
1147 sll a0, a0, 2
1148 nor a1, zero, a1
1149 1:
1150 ll t0, 0(a0)
1151 move t1, t0
1152 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1153 and t1, t1, a1 # t1 has the new lower 16 bits
1154 srl t0, t0, 16 # preserve original top 16 bits
1155 sll t0, t0, 16
1156 or t0, t0, t1
1157 sc t0, 0(a0)
1158 beq t0, zero, 1b
1159 nop
1160 j ra
1161 nop
1162 END(atomic_clear_16)
1163
1164
1165 /**
1166 * void
1167 * atomic_subtract_16(uint16_t *a, uint16_t b)
1168 * {
1169 * *a -= b;
1170 * }
1171 */
1172 LEAF(atomic_subtract_16)
1173 .set noreorder
1174 srl a0, a0, 2 # round down address to be 32-bit aligned
1175 sll a0, a0, 2
1176 1:
1177 ll t0, 0(a0)
1178 move t1, t0
1179 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1180 subu t1, t1, a1
1181 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1182 srl t0, t0, 16 # preserve original top 16 bits
1183 sll t0, t0, 16
1184 or t0, t0, t1
1185 sc t0, 0(a0)
1186 beq t0, zero, 1b
1187 nop
1188 j ra
1189 nop
1190 END(atomic_subtract_16)
1191
1192 /**
1193 * void
1194 * atomic_add_16(uint16_t *a, uint16_t b)
1195 * {
1196 * *a += b;
1197 * }
1198 */
1199 LEAF(atomic_add_16)
1200 .set noreorder
1201 srl a0, a0, 2 # round down address to be 32-bit aligned
1202 sll a0, a0, 2
1203 1:
1204 ll t0, 0(a0)
1205 move t1, t0
1206 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1207 addu t1, t1, a1
1208 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1209 srl t0, t0, 16 # preserve original top 16 bits
1210 sll t0, t0, 16
1211 or t0, t0, t1
1212 sc t0, 0(a0)
1213 beq t0, zero, 1b
1214 nop
1215 j ra
1216 nop
1217 END(atomic_add_16)
1218
1219 /**
1220 * void
1221 * atomic_add_8(uint8_t *a, uint8_t b)
1222 * {
1223 * *a += b;
1224 * }
1225 */
1226 LEAF(atomic_add_8)
1227 .set noreorder
1228 srl a0, a0, 2 # round down address to be 32-bit aligned
1229 sll a0, a0, 2
1230 1:
1231 ll t0, 0(a0)
1232 move t1, t0
1233 andi t1, t1, 0xff # t1 has the original lower 8 bits
1234 addu t1, t1, a1
1235 andi t1, t1, 0xff # t1 has the new lower 8 bits
1236 srl t0, t0, 8 # preserve original top 24 bits
1237 sll t0, t0, 8
1238 or t0, t0, t1
1239 sc t0, 0(a0)
1240 beq t0, zero, 1b
1241 nop
1242 j ra
1243 nop
1244 END(atomic_add_8)
1245
1246
1247 /**
1248 * void
1249 * atomic_subtract_8(uint8_t *a, uint8_t b)
1250 * {
1251 * *a += b;
1252 * }
1253 */
1254 LEAF(atomic_subtract_8)
1255 .set noreorder
1256 srl a0, a0, 2 # round down address to be 32-bit aligned
1257 sll a0, a0, 2
1258 1:
1259 ll t0, 0(a0)
1260 move t1, t0
1261 andi t1, t1, 0xff # t1 has the original lower 8 bits
1262 subu t1, t1, a1
1263 andi t1, t1, 0xff # t1 has the new lower 8 bits
1264 srl t0, t0, 8 # preserve original top 24 bits
1265 sll t0, t0, 8
1266 or t0, t0, t1
1267 sc t0, 0(a0)
1268 beq t0, zero, 1b
1269 nop
1270 j ra
1271 nop
1272 END(atomic_subtract_8)
1273
1274 /*
1275 * atomic 64-bit register read/write assembly language support routines.
1276 */
1277
1278 .set noreorder # Noreorder is default style!
1279 #ifndef _MIPS_ARCH_XLR
1280 .set mips3
1281 #endif
1282
1283 LEAF(atomic_readandclear_64)
1284 1:
1285 lld v0, 0(a0)
1286 li t0, 0
1287 scd t0, 0(a0)
1288 beqz t0, 1b
1289 nop
1290 j ra
1291 nop
1292 END(atomic_readandclear_64)
1293
1294 LEAF(atomic_store_64)
1295 mfc0 t1, COP_0_STATUS_REG
1296 and t2, t1, ~SR_INT_ENAB
1297 mtc0 t2, COP_0_STATUS_REG
1298 nop
1299 nop
1300 nop
1301 nop
1302 ld t0, (a1)
1303 nop
1304 nop
1305 sd t0, (a0)
1306 nop
1307 nop
1308 mtc0 t1,COP_0_STATUS_REG
1309 nop
1310 nop
1311 nop
1312 nop
1313 j ra
1314 nop
1315 END(atomic_store_64)
1316
1317 LEAF(atomic_load_64)
1318 mfc0 t1, COP_0_STATUS_REG
1319 and t2, t1, ~SR_INT_ENAB
1320 mtc0 t2, COP_0_STATUS_REG
1321 nop
1322 nop
1323 nop
1324 nop
1325 ld t0, (a0)
1326 nop
1327 nop
1328 sd t0, (a1)
1329 nop
1330 nop
1331 mtc0 t1,COP_0_STATUS_REG
1332 nop
1333 nop
1334 nop
1335 nop
1336 j ra
1337 nop
1338 END(atomic_load_64)
1339
1340 #if defined(DDB) || defined(DEBUG)
1341
1342 LEAF(kdbpeek)
1343 li v1, DDBERR
1344 and v0, a0, 3 # unaligned ?
1345 GET_CPU_PCPU(t1)
1346 lw t1, PC_CURPCB(t1)
1347 bne v0, zero, 1f
1348 sw v1, U_PCB_ONFAULT(t1)
1349
1350 lw v0, (a0)
1351 jr ra
1352 sw zero, U_PCB_ONFAULT(t1)
1353
1354 1:
1355 LWHI v0, 0(a0)
1356 LWLO v0, 3(a0)
1357 jr ra
1358 sw zero, U_PCB_ONFAULT(t1)
1359 END(kdbpeek)
1360
1361 ddberr:
1362 jr ra
1363 nop
1364
1365 #if defined(DDB)
1366 LEAF(kdbpoke)
1367 li v1, DDBERR
1368 and v0, a0, 3 # unaligned ?
1369 GET_CPU_PCPU(t1)
1370 lw t1, PC_CURPCB(t1)
1371 bne v0, zero, 1f
1372 sw v1, U_PCB_ONFAULT(t1)
1373
1374 sw a1, (a0)
1375 jr ra
1376 sw zero, U_PCB_ONFAULT(t1)
1377
1378 1:
1379 SWHI a1, 0(a0)
1380 SWLO a1, 3(a0)
1381 jr ra
1382 sw zero, U_PCB_ONFAULT(t1)
1383 END(kdbpoke)
1384
1385 .data
1386 .globl esym
1387 esym: .word 0
1388
1389 #ifndef _MIPS_ARCH_XLR
1390 .set mips2
1391 #endif
1392 #endif /* DDB */
1393 #endif /* DDB || DEBUG */
1394
1395 #ifndef MIPS_ISAIII
1396 #define STORE sw /* 32 bit mode regsave instruction */
1397 #define LOAD lw /* 32 bit mode regload instruction */
1398 #define RSIZE 4 /* 32 bit mode register size */
1399 #else
1400 #define STORE sd /* 64 bit mode regsave instruction */
1401 #define LOAD ld /* 64 bit mode regload instruction */
1402 #define RSIZE 8 /* 64 bit mode register size */
1403 #endif
1404
1405 #define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
1406
1407 .text
1408 LEAF(breakpoint)
1409 break BREAK_SOVER_VAL
1410 jr ra
1411 nop
1412 END(breakpoint)
1413
1414 LEAF(setjmp)
1415 mfc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
1416 STORE s0, (RSIZE * PREG_S0)(a0)
1417 STORE s1, (RSIZE * PREG_S1)(a0)
1418 STORE s2, (RSIZE * PREG_S2)(a0)
1419 STORE s3, (RSIZE * PREG_S3)(a0)
1420 STORE s4, (RSIZE * PREG_S4)(a0)
1421 STORE s5, (RSIZE * PREG_S5)(a0)
1422 STORE s6, (RSIZE * PREG_S6)(a0)
1423 STORE s7, (RSIZE * PREG_S7)(a0)
1424 STORE s8, (RSIZE * PREG_SP)(a0)
1425 STORE sp, (RSIZE * PREG_S8)(a0)
1426 STORE ra, (RSIZE * PREG_RA)(a0)
1427 STORE v0, (RSIZE * PREG_SR)(a0)
1428 jr ra
1429 li v0, 0 # setjmp return
1430 END(setjmp)
1431
1432 LEAF(longjmp)
1433 LOAD v0, (RSIZE * PREG_SR)(a0)
1434 LOAD ra, (RSIZE * PREG_RA)(a0)
1435 LOAD s0, (RSIZE * PREG_S0)(a0)
1436 LOAD s1, (RSIZE * PREG_S1)(a0)
1437 LOAD s2, (RSIZE * PREG_S2)(a0)
1438 LOAD s3, (RSIZE * PREG_S3)(a0)
1439 LOAD s4, (RSIZE * PREG_S4)(a0)
1440 LOAD s5, (RSIZE * PREG_S5)(a0)
1441 LOAD s6, (RSIZE * PREG_S6)(a0)
1442 LOAD s7, (RSIZE * PREG_S7)(a0)
1443 LOAD s8, (RSIZE * PREG_S8)(a0)
1444 LOAD sp, (RSIZE * PREG_SP)(a0)
1445 mtc0 v0, COP_0_STATUS_REG # Later the "real" spl value!
1446 ITLBNOPFIX
1447 jr ra
1448 li v0, 1 # longjmp return
1449 END(longjmp)
1450
1451 LEAF(fusufault)
1452 GET_CPU_PCPU(t0)
1453 lw t0, PC_CURTHREAD(t0)
1454 lw t0, TD_PCB(t0)
1455 sw zero, U_PCB_ONFAULT(t0)
1456 li v0, -1
1457 j ra
1458 END(fusufault)
1459
1460 /* Define a new md function 'casuptr'. This atomically compares and sets
1461 a pointer that is in user space. It will be used as the basic primitive
1462 for a kernel supported user space lock implementation. */
1463 LEAF(casuptr)
1464
1465 li t0, VM_MAXUSER_ADDRESS /* verify address validity */
1466 blt a0, t0, fusufault /* trap faults */
1467 nop
1468
1469 GET_CPU_PCPU(t1)
1470 lw t1, PC_CURTHREAD(t1)
1471 lw t1, TD_PCB(t1)
1472
1473 lw t2, fusufault
1474 sw t2, U_PCB_ONFAULT(t1)
1475 1:
1476 ll v0, 0(a0) /* try to load the old value */
1477 beq v0, a1, 2f /* compare */
1478 move t0, a2 /* setup value to write */
1479 sc t0, 0(a0) /* write if address still locked */
1480 beq t0, zero, 1b /* if it failed, spin */
1481 2:
1482 sw zero, U_PCB_ONFAULT(t1) /* clean up */
1483 j ra
1484 END(casuptr)
1485
1486
1487 #ifdef TARGET_OCTEON
1488 /*
1489 * void octeon_enable_shadow(void)
1490 * turns on access to CC and CCRes
1491 */
1492 LEAF(octeon_enable_shadow)
1493 li t1, 0x0000000f
1494 mtc0 t1, COP_0_INFO
1495 jr ra
1496 nop
1497 END(octeon_enable_shadow)
1498
1499
1500 LEAF(octeon_get_shadow)
1501 mfc0 v0, COP_0_INFO
1502 jr ra
1503 nop
1504 END(octeon_get_shadow)
1505
1506 /*
1507 * octeon_set_control(addr, uint32_t val)
1508 */
1509 LEAF(octeon_set_control)
1510 .set mips64r2
1511 or t1, a1, zero
1512 /* dmfc0 a1, 9, 7*/
1513 .word 0x40254807
1514 sd a1, 0(a0)
1515 or a1, t1, zero
1516 /* dmtc0 a1, 9, 7*/
1517 .word 0x40a54807
1518 jr ra
1519 nop
1520 .set mips0
1521 END(octeon_set_control)
1522
1523 /*
1524 * octeon_get_control(addr)
1525 */
1526 LEAF(octeon_get_control)
1527 .set mips64r2
1528 /* dmfc0 a1, 9, 7 */
1529 .word 0x40254807
1530 sd a1, 0(a0)
1531 jr ra
1532 nop
1533 .set mips0
1534 END(octeon_get_control)
1535 #endif
Cache object: e28f7c95091b0090a5a4df50d4e6cc85
|