1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 *
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50 * $FreeBSD: releng/8.4/sys/mips/mips/support.S 215938 2010-11-27 12:26:40Z jchandra $
51 */
52
53 /*
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
68 * the NetBSD Project.
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
71 *
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85 /*
86 * Contains code that is the first executed at boot time plus
87 * assembly language support routines.
88 */
89
90 #include "opt_cputype.h"
91 #include "opt_ddb.h"
92 #include <sys/errno.h>
93 #include <machine/asm.h>
94 #include <machine/cpu.h>
95 #include <machine/regnum.h>
96 #include <machine/cpuregs.h>
97
98 #include "assym.s"
99
100 .set noreorder # Noreorder is default style!
101
102 /*
103 * Primitives
104 */
105
106 .text
107
108 /*
109 * See if access to addr with a len type instruction causes a machine check.
110 * len is length of access (1=byte, 2=short, 4=int)
111 *
112 * badaddr(addr, len)
113 * char *addr;
114 * int len;
115 */
116 LEAF(badaddr)
117 PTR_LA v0, baderr
118 GET_CPU_PCPU(v1)
119 PTR_L v1, PC_CURPCB(v1)
120 bne a1, 1, 2f
121 PTR_S v0, U_PCB_ONFAULT(v1)
122 b 5f
123 lbu v0, (a0)
124 2:
125 bne a1, 2, 4f
126 nop
127 b 5f
128 lhu v0, (a0)
129 4:
130 lw v0, (a0)
131 5:
132 PTR_S zero, U_PCB_ONFAULT(v1)
133 j ra
134 move v0, zero # made it w/o errors
135 baderr:
136 j ra
137 li v0, 1 # trap sends us here
138 END(badaddr)
139
140 /*
141 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
142 * Copy a NIL-terminated string, at most maxlen characters long. Return the
143 * number of characters copied (including the NIL) in *lencopied. If the
144 * string is too long, return ENAMETOOLONG; else return 0.
145 */
146 LEAF(copystr)
147 move t0, a2
148 beq a2, zero, 4f
149 1:
150 lbu v0, 0(a0)
151 PTR_SUBU a2, a2, 1
152 beq v0, zero, 2f
153 sb v0, 0(a1) # each byte until NIL
154 PTR_ADDU a0, a0, 1
155 bne a2, zero, 1b # less than maxlen
156 PTR_ADDU a1, a1, 1
157 4:
158 li v0, ENAMETOOLONG # run out of space
159 2:
160 beq a3, zero, 3f # return num. of copied bytes
161 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
162 PTR_S a2, 0(a3)
163 3:
164 j ra # v0 is 0 or ENAMETOOLONG
165 nop
166 END(copystr)
167
168
169 /*
170 * fillw(pat, addr, count)
171 */
172 LEAF(fillw)
173 1:
174 PTR_ADDU a2, a2, -1
175 sh a0, 0(a1)
176 bne a2,zero, 1b
177 PTR_ADDU a1, a1, 2
178
179 jr ra
180 nop
181 END(fillw)
182
183 /*
184 * Optimized memory zero code.
185 * mem_zero_page(addr);
186 */
187 LEAF(mem_zero_page)
188 li v0, PAGE_SIZE
189 1:
190 PTR_SUBU v0, 8
191 sd zero, 0(a0)
192 bne zero, v0, 1b
193 PTR_ADDU a0, 8
194 jr ra
195 nop
196 END(mem_zero_page)
197
198 /*
199 * Block I/O routines mainly used by I/O drivers.
200 *
201 * Args as: a0 = port
202 * a1 = memory address
203 * a2 = count
204 */
205 LEAF(insb)
206 beq a2, zero, 2f
207 PTR_ADDU a2, a1
208 1:
209 lbu v0, 0(a0)
210 PTR_ADDU a1, 1
211 bne a1, a2, 1b
212 sb v0, -1(a1)
213 2:
214 jr ra
215 nop
216 END(insb)
217
218 LEAF(insw)
219 beq a2, zero, 2f
220 PTR_ADDU a2, a2
221 PTR_ADDU a2, a1
222 1:
223 lhu v0, 0(a0)
224 PTR_ADDU a1, 2
225 bne a1, a2, 1b
226 sh v0, -2(a1)
227 2:
228 jr ra
229 nop
230 END(insw)
231
232 LEAF(insl)
233 beq a2, zero, 2f
234 sll a2, 2
235 PTR_ADDU a2, a1
236 1:
237 lw v0, 0(a0)
238 PTR_ADDU a1, 4
239 bne a1, a2, 1b
240 sw v0, -4(a1)
241 2:
242 jr ra
243 nop
244 END(insl)
245
246 LEAF(outsb)
247 beq a2, zero, 2f
248 PTR_ADDU a2, a1
249 1:
250 lbu v0, 0(a1)
251 PTR_ADDU a1, 1
252 bne a1, a2, 1b
253 sb v0, 0(a0)
254 2:
255 jr ra
256 nop
257 END(outsb)
258
259 LEAF(outsw)
260 beq a2, zero, 2f
261 addu a2, a2
262 li v0, 1
263 and v0, a1
264 bne v0, zero, 3f # arghh, unaligned.
265 addu a2, a1
266 1:
267 lhu v0, 0(a1)
268 addiu a1, 2
269 bne a1, a2, 1b
270 sh v0, 0(a0)
271 2:
272 jr ra
273 nop
274 3:
275 LWHI v0, 0(a1)
276 LWLO v0, 3(a1)
277 addiu a1, 2
278 bne a1, a2, 3b
279 sh v0, 0(a0)
280
281 jr ra
282 nop
283 END(outsw)
284
285 LEAF(outsl)
286 beq a2, zero, 2f
287 sll a2, 2
288 li v0, 3
289 and v0, a1
290 bne v0, zero, 3f # arghh, unaligned.
291 addu a2, a1
292 1:
293 lw v0, 0(a1)
294 addiu a1, 4
295 bne a1, a2, 1b
296 sw v0, 0(a0)
297 2:
298 jr ra
299 nop
300 3:
301 LWHI v0, 0(a1)
302 LWLO v0, 3(a1)
303 addiu a1, 4
304 bne a1, a2, 3b
305 sw v0, 0(a0)
306
307 jr ra
308 nop
309 END(outsl)
310
311 /*
312 * Copy a null terminated string from the user address space into
313 * the kernel address space.
314 *
315 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
316 * caddr_t fromaddr;
317 * caddr_t toaddr;
318 * u_int maxlength;
319 * u_int *lencopied;
320 */
321 NON_LEAF(copyinstr, CALLFRAME_SIZ, ra)
322 PTR_SUBU sp, sp, CALLFRAME_SIZ
323 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
324 PTR_LA v0, copyerr
325 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
326 REG_S ra, CALLFRAME_RA(sp)
327 GET_CPU_PCPU(v1)
328 PTR_L v1, PC_CURPCB(v1)
329 jal _C_LABEL(copystr)
330 PTR_S v0, U_PCB_ONFAULT(v1)
331 REG_L ra, CALLFRAME_RA(sp)
332 GET_CPU_PCPU(v1)
333 PTR_L v1, PC_CURPCB(v1)
334 PTR_S zero, U_PCB_ONFAULT(v1)
335 j ra
336 PTR_ADDU sp, sp, CALLFRAME_SIZ
337 END(copyinstr)
338
339 /*
340 * Copy a null terminated string from the kernel address space into
341 * the user address space.
342 *
343 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
344 * caddr_t fromaddr;
345 * caddr_t toaddr;
346 * u_int maxlength;
347 * u_int *lencopied;
348 */
349 NON_LEAF(copyoutstr, CALLFRAME_SIZ, ra)
350 PTR_SUBU sp, sp, CALLFRAME_SIZ
351 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
352 PTR_LA v0, copyerr
353 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
354 REG_S ra, CALLFRAME_RA(sp)
355 GET_CPU_PCPU(v1)
356 PTR_L v1, PC_CURPCB(v1)
357 jal _C_LABEL(copystr)
358 PTR_S v0, U_PCB_ONFAULT(v1)
359 REG_L ra, CALLFRAME_RA(sp)
360 GET_CPU_PCPU(v1)
361 PTR_L v1, PC_CURPCB(v1)
362 PTR_S zero, U_PCB_ONFAULT(v1)
363 j ra
364 PTR_ADDU sp, sp, CALLFRAME_SIZ
365 END(copyoutstr)
366
367 /*
368 * Copy specified amount of data from user space into the kernel
369 * copyin(from, to, len)
370 * caddr_t *from; (user source address)
371 * caddr_t *to; (kernel destination address)
372 * unsigned len;
373 */
374 NON_LEAF(copyin, CALLFRAME_SIZ, ra)
375 PTR_SUBU sp, sp, CALLFRAME_SIZ
376 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
377 PTR_LA v0, copyerr
378 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
379 REG_S ra, CALLFRAME_RA(sp)
380 GET_CPU_PCPU(v1)
381 PTR_L v1, PC_CURPCB(v1)
382 jal _C_LABEL(bcopy)
383 PTR_S v0, U_PCB_ONFAULT(v1)
384 REG_L ra, CALLFRAME_RA(sp)
385 GET_CPU_PCPU(v1)
386 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
387 PTR_S zero, U_PCB_ONFAULT(v1)
388 PTR_ADDU sp, sp, CALLFRAME_SIZ
389 j ra
390 move v0, zero
391 END(copyin)
392
393 /*
394 * Copy specified amount of data from kernel to the user space
395 * copyout(from, to, len)
396 * caddr_t *from; (kernel source address)
397 * caddr_t *to; (user destination address)
398 * unsigned len;
399 */
400 NON_LEAF(copyout, CALLFRAME_SIZ, ra)
401 PTR_SUBU sp, sp, CALLFRAME_SIZ
402 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
403 PTR_LA v0, copyerr
404 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
405 REG_S ra, CALLFRAME_RA(sp)
406 GET_CPU_PCPU(v1)
407 PTR_L v1, PC_CURPCB(v1)
408 jal _C_LABEL(bcopy)
409 PTR_S v0, U_PCB_ONFAULT(v1)
410 REG_L ra, CALLFRAME_RA(sp)
411 GET_CPU_PCPU(v1)
412 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
413 PTR_S zero, U_PCB_ONFAULT(v1)
414 PTR_ADDU sp, sp, CALLFRAME_SIZ
415 j ra
416 move v0, zero
417 END(copyout)
418
419 LEAF(copyerr)
420 REG_L ra, CALLFRAME_RA(sp)
421 PTR_ADDU sp, sp, CALLFRAME_SIZ
422 j ra
423 li v0, EFAULT # return error
424 END(copyerr)
425
426 /*
427 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
428 * user text space.
429 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
430 * user data space.
431 */
432 #ifdef __mips_n64
433 LEAF(fuword64)
434 ALEAF(fuword)
435 ALEAF(fuiword)
436 PTR_LA v0, fswberr
437 blt a0, zero, fswberr # make sure address is in user space
438 nop
439 GET_CPU_PCPU(v1)
440 PTR_L v1, PC_CURPCB(v1)
441 PTR_S v0, U_PCB_ONFAULT(v1)
442 ld v0, 0(a0) # fetch word
443 j ra
444 PTR_S zero, U_PCB_ONFAULT(v1)
445 END(fuword64)
446 #endif
447
448 LEAF(fuword32)
449 #ifndef __mips_n64
450 ALEAF(fuword)
451 ALEAF(fuiword)
452 #endif
453 PTR_LA v0, fswberr
454 blt a0, zero, fswberr # make sure address is in user space
455 nop
456 GET_CPU_PCPU(v1)
457 PTR_L v1, PC_CURPCB(v1)
458 PTR_S v0, U_PCB_ONFAULT(v1)
459 lw v0, 0(a0) # fetch word
460 j ra
461 PTR_S zero, U_PCB_ONFAULT(v1)
462 END(fuword32)
463
464 LEAF(fusword)
465 ALEAF(fuisword)
466 PTR_LA v0, fswberr
467 blt a0, zero, fswberr # make sure address is in user space
468 nop
469 GET_CPU_PCPU(v1)
470 PTR_L v1, PC_CURPCB(v1)
471 PTR_S v0, U_PCB_ONFAULT(v1)
472 lhu v0, 0(a0) # fetch short
473 j ra
474 PTR_S zero, U_PCB_ONFAULT(v1)
475 END(fusword)
476
477 LEAF(fubyte)
478 ALEAF(fuibyte)
479 PTR_LA v0, fswberr
480 blt a0, zero, fswberr # make sure address is in user space
481 nop
482 GET_CPU_PCPU(v1)
483 PTR_L v1, PC_CURPCB(v1)
484 PTR_S v0, U_PCB_ONFAULT(v1)
485 lbu v0, 0(a0) # fetch byte
486 j ra
487 PTR_S zero, U_PCB_ONFAULT(v1)
488 END(fubyte)
489
490 LEAF(suword32)
491 #ifndef __mips_n64
492 XLEAF(suword)
493 #endif
494 PTR_LA v0, fswberr
495 blt a0, zero, fswberr # make sure address is in user space
496 nop
497 GET_CPU_PCPU(v1)
498 PTR_L v1, PC_CURPCB(v1)
499 PTR_S v0, U_PCB_ONFAULT(v1)
500 sw a1, 0(a0) # store word
501 PTR_S zero, U_PCB_ONFAULT(v1)
502 j ra
503 move v0, zero
504 END(suword32)
505
506 #ifdef __mips_n64
507 LEAF(suword64)
508 XLEAF(suword)
509 PTR_LA v0, fswberr
510 blt a0, zero, fswberr # make sure address is in user space
511 nop
512 GET_CPU_PCPU(v1)
513 PTR_L v1, PC_CURPCB(v1)
514 PTR_S v0, U_PCB_ONFAULT(v1)
515 sd a1, 0(a0) # store word
516 PTR_S zero, U_PCB_ONFAULT(v1)
517 j ra
518 move v0, zero
519 END(suword64)
520 #endif
521
522 /*
523 * casuword(9)
524 * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
525 */
526 /*
527 * casuword32(9)
528 * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
529 * <a2>uint32_t newval)
530 */
531 LEAF(casuword32)
532 #ifndef __mips_n64
533 XLEAF(casuword)
534 #endif
535 PTR_LA v0, fswberr
536 blt a0, zero, fswberr # make sure address is in user space
537 nop
538 GET_CPU_PCPU(v1)
539 PTR_L v1, PC_CURPCB(v1)
540 PTR_S v0, U_PCB_ONFAULT(v1)
541 1:
542 move t0, a2
543 ll v0, 0(a0)
544 bne a1, v0, 2f
545 nop
546 sc t0, 0(a0) # store word
547 beqz t0, 1b
548 nop
549 j 3f
550 nop
551 2:
552 li v0, -1
553 3:
554 PTR_S zero, U_PCB_ONFAULT(v1)
555 jr ra
556 nop
557 END(casuword32)
558
559 #ifdef __mips_n64
560 LEAF(casuword64)
561 XLEAF(casuword)
562 PTR_LA v0, fswberr
563 blt a0, zero, fswberr # make sure address is in user space
564 nop
565 GET_CPU_PCPU(v1)
566 PTR_L v1, PC_CURPCB(v1)
567 PTR_S v0, U_PCB_ONFAULT(v1)
568 1:
569 move t0, a2
570 lld v0, 0(a0)
571 bne a1, v0, 2f
572 nop
573 scd t0, 0(a0) # store double word
574 beqz t0, 1b
575 nop
576 j 3f
577 nop
578 2:
579 li v0, -1
580 3:
581 PTR_S zero, U_PCB_ONFAULT(v1)
582 jr ra
583 nop
584 END(casuword64)
585 #endif
586
587 #if 0
588 /* unused in FreeBSD */
589 /*
590 * Have to flush instruction cache afterwards.
591 */
592 LEAF(suiword)
593 PTR_LA v0, fswberr
594 blt a0, zero, fswberr # make sure address is in user space
595 nop
596 GET_CPU_PCPU(v1)
597 PTR_L v1, PC_CURPCB(v1)
598 PTR_S v0, U_PCB_ONFAULT(v1)
599 sw a1, 0(a0) # store word
600 PTR_S zero, U_PCB_ONFAULT(v1)
601 j _C_LABEL(Mips_SyncICache) # FlushICache sets v0 = 0. (Ugly)
602 li a1, 4 # size of word
603 END(suiword)
604 #endif
605
606 /*
607 * Will have to flush the instruction cache if byte merging is done in hardware.
608 */
609 LEAF(susword)
610 ALEAF(suisword)
611 PTR_LA v0, fswberr
612 blt a0, zero, fswberr # make sure address is in user space
613 nop
614 GET_CPU_PCPU(v1)
615 PTR_L v1, PC_CURPCB(v1)
616 PTR_S v0, U_PCB_ONFAULT(v1)
617 sh a1, 0(a0) # store short
618 PTR_S zero, U_PCB_ONFAULT(v1)
619 j ra
620 move v0, zero
621 END(susword)
622
623 LEAF(subyte)
624 ALEAF(suibyte)
625 PTR_LA v0, fswberr
626 blt a0, zero, fswberr # make sure address is in user space
627 nop
628 GET_CPU_PCPU(v1)
629 PTR_L v1, PC_CURPCB(v1)
630 PTR_S v0, U_PCB_ONFAULT(v1)
631 sb a1, 0(a0) # store byte
632 PTR_S zero, U_PCB_ONFAULT(v1)
633 j ra
634 move v0, zero
635 END(subyte)
636
637 LEAF(fswberr)
638 j ra
639 li v0, -1
640 END(fswberr)
641
642 /*
643 * fuswintr and suswintr are just like fusword and susword except that if
644 * the page is not in memory or would cause a trap, then we return an error.
645 * The important thing is to prevent sleep() and switch().
646 */
647 LEAF(fuswintr)
648 PTR_LA v0, fswintrberr
649 blt a0, zero, fswintrberr # make sure address is in user space
650 nop
651 GET_CPU_PCPU(v1)
652 PTR_L v1, PC_CURPCB(v1)
653 PTR_S v0, U_PCB_ONFAULT(v1)
654 lhu v0, 0(a0) # fetch short
655 j ra
656 PTR_S zero, U_PCB_ONFAULT(v1)
657 END(fuswintr)
658
659 LEAF(suswintr)
660 PTR_LA v0, fswintrberr
661 blt a0, zero, fswintrberr # make sure address is in user space
662 nop
663 GET_CPU_PCPU(v1)
664 PTR_L v1, PC_CURPCB(v1)
665 PTR_S v0, U_PCB_ONFAULT(v1)
666 sh a1, 0(a0) # store short
667 PTR_S zero, U_PCB_ONFAULT(v1)
668 j ra
669 move v0, zero
670 END(suswintr)
671
672 LEAF(fswintrberr)
673 j ra
674 li v0, -1
675 END(fswintrberr)
676
677 /*
678 * memcpy(to, from, len)
679 * {ov}bcopy(from, to, len)
680 */
681 LEAF(memcpy)
682 .set noreorder
683 move v0, a0 # swap from and to
684 move a0, a1
685 move a1, v0
686 ALEAF(bcopy)
687 ALEAF(ovbcopy)
688 .set noreorder
689 PTR_ADDU t0, a0, a2 # t0 = end of s1 region
690 sltu t1, a1, t0
691 sltu t2, a0, a1
692 and t1, t1, t2 # t1 = true if from < to < (from+len)
693 beq t1, zero, forward # non overlapping, do forward copy
694 slt t2, a2, 12 # check for small copy
695
696 ble a2, zero, 2f
697 PTR_ADDU t1, a1, a2 # t1 = end of to region
698 1:
699 lb v1, -1(t0) # copy bytes backwards,
700 PTR_SUBU t0, t0, 1 # doesnt happen often so do slow way
701 PTR_SUBU t1, t1, 1
702 bne t0, a0, 1b
703 sb v1, 0(t1)
704 2:
705 j ra
706 nop
707 forward:
708 bne t2, zero, smallcpy # do a small bcopy
709 xor v1, a0, a1 # compare low two bits of addresses
710 and v1, v1, 3
711 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
712 beq v1, zero, aligned # addresses can be word aligned
713 and a3, a3, 3
714
715 beq a3, zero, 1f
716 PTR_SUBU a2, a2, a3 # subtract from remaining count
717 LWHI v1, 0(a0) # get next 4 bytes (unaligned)
718 LWLO v1, 3(a0)
719 PTR_ADDU a0, a0, a3
720 SWHI v1, 0(a1) # store 1, 2, or 3 bytes to align a1
721 PTR_ADDU a1, a1, a3
722 1:
723 and v1, a2, 3 # compute number of words left
724 PTR_SUBU a3, a2, v1
725 move a2, v1
726 PTR_ADDU a3, a3, a0 # compute ending address
727 2:
728 LWHI v1, 0(a0) # copy words a0 unaligned, a1 aligned
729 LWLO v1, 3(a0)
730 PTR_ADDU a0, a0, 4
731 sw v1, 0(a1)
732 PTR_ADDU a1, a1, 4
733 bne a0, a3, 2b
734 nop # We have to do this mmu-bug.
735 b smallcpy
736 nop
737 aligned:
738 beq a3, zero, 1f
739 PTR_SUBU a2, a2, a3 # subtract from remaining count
740 LWHI v1, 0(a0) # copy 1, 2, or 3 bytes to align
741 PTR_ADDU a0, a0, a3
742 SWHI v1, 0(a1)
743 PTR_ADDU a1, a1, a3
744 1:
745 and v1, a2, 3 # compute number of whole words left
746 PTR_SUBU a3, a2, v1
747 move a2, v1
748 PTR_ADDU a3, a3, a0 # compute ending address
749 2:
750 lw v1, 0(a0) # copy words
751 PTR_ADDU a0, a0, 4
752 sw v1, 0(a1)
753 bne a0, a3, 2b
754 PTR_ADDU a1, a1, 4
755 smallcpy:
756 ble a2, zero, 2f
757 PTR_ADDU a3, a2, a0 # compute ending address
758 1:
759 lbu v1, 0(a0) # copy bytes
760 PTR_ADDU a0, a0, 1
761 sb v1, 0(a1)
762 bne a0, a3, 1b
763 PTR_ADDU a1, a1, 1 # MMU BUG ? can not do -1(a1) at 0x80000000!!
764 2:
765 j ra
766 nop
767 END(memcpy)
768
769 /*
770 * memset(void *s1, int c, int len)
771 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
772 */
773 LEAF(memset)
774 .set noreorder
775 blt a2, 12, memsetsmallclr # small amount to clear?
776 move v0, a0 # save s1 for result
777
778 sll t1, a1, 8 # compute c << 8 in t1
779 or t1, t1, a1 # compute c << 8 | c in 11
780 sll t2, t1, 16 # shift that left 16
781 or t1, t2, t1 # or together
782
783 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
784 and t0, t0, 3
785 beq t0, zero, 1f # skip if word aligned
786 PTR_SUBU a2, a2, t0 # subtract from remaining count
787 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
788 PTR_ADDU a0, a0, t0
789 1:
790 and v1, a2, 3 # compute number of whole words left
791 PTR_SUBU t0, a2, v1
792 PTR_SUBU a2, a2, t0
793 PTR_ADDU t0, t0, a0 # compute ending address
794 2:
795 PTR_ADDU a0, a0, 4 # clear words
796 #ifdef MIPS3_5900
797 nop
798 nop
799 nop
800 nop
801 #endif
802 bne a0, t0, 2b # unrolling loop does not help
803 sw t1, -4(a0) # since we are limited by memory speed
804
805 memsetsmallclr:
806 ble a2, zero, 2f
807 PTR_ADDU t0, a2, a0 # compute ending address
808 1:
809 PTR_ADDU a0, a0, 1 # clear bytes
810 #ifdef MIPS3_5900
811 nop
812 nop
813 nop
814 nop
815 #endif
816 bne a0, t0, 1b
817 sb a1, -1(a0)
818 2:
819 j ra
820 nop
821 .set reorder
822 END(memset)
823
824 /*
825 * bzero(s1, n)
826 */
827 LEAF(bzero)
828 ALEAF(blkclr)
829 .set noreorder
830 blt a1, 12, smallclr # small amount to clear?
831 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
832 and a3, a3, 3
833 beq a3, zero, 1f # skip if word aligned
834 PTR_SUBU a1, a1, a3 # subtract from remaining count
835 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
836 PTR_ADDU a0, a0, a3
837 1:
838 and v0, a1, 3 # compute number of words left
839 PTR_SUBU a3, a1, v0
840 move a1, v0
841 PTR_ADDU a3, a3, a0 # compute ending address
842 2:
843 PTR_ADDU a0, a0, 4 # clear words
844 bne a0, a3, 2b # unrolling loop does not help
845 sw zero, -4(a0) # since we are limited by memory speed
846 smallclr:
847 ble a1, zero, 2f
848 PTR_ADDU a3, a1, a0 # compute ending address
849 1:
850 PTR_ADDU a0, a0, 1 # clear bytes
851 bne a0, a3, 1b
852 sb zero, -1(a0)
853 2:
854 j ra
855 nop
856 END(bzero)
857
858
859 /*
860 * bcmp(s1, s2, n)
861 */
862 LEAF(bcmp)
863 .set noreorder
864 blt a2, 16, smallcmp # is it worth any trouble?
865 xor v0, a0, a1 # compare low two bits of addresses
866 and v0, v0, 3
867 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
868 bne v0, zero, unalignedcmp # not possible to align addresses
869 and a3, a3, 3
870
871 beq a3, zero, 1f
872 PTR_SUBU a2, a2, a3 # subtract from remaining count
873 move v0, v1 # init v0,v1 so unmodified bytes match
874 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
875 LWHI v1, 0(a1)
876 PTR_ADDU a1, a1, a3
877 bne v0, v1, nomatch
878 PTR_ADDU a0, a0, a3
879 1:
880 and a3, a2, ~3 # compute number of whole words left
881 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
882 PTR_ADDU a3, a3, a0 # compute ending address
883 2:
884 lw v0, 0(a0) # compare words
885 lw v1, 0(a1)
886 PTR_ADDU a0, a0, 4
887 bne v0, v1, nomatch
888 PTR_ADDU a1, a1, 4
889 bne a0, a3, 2b
890 nop
891 b smallcmp # finish remainder
892 nop
893 unalignedcmp:
894 beq a3, zero, 2f
895 PTR_SUBU a2, a2, a3 # subtract from remaining count
896 PTR_ADDU a3, a3, a0 # compute ending address
897 1:
898 lbu v0, 0(a0) # compare bytes until a1 word aligned
899 lbu v1, 0(a1)
900 PTR_ADDU a0, a0, 1
901 bne v0, v1, nomatch
902 PTR_ADDU a1, a1, 1
903 bne a0, a3, 1b
904 nop
905 2:
906 and a3, a2, ~3 # compute number of whole words left
907 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
908 PTR_ADDU a3, a3, a0 # compute ending address
909 3:
910 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
911 LWLO v0, 3(a0)
912 lw v1, 0(a1)
913 PTR_ADDU a0, a0, 4
914 bne v0, v1, nomatch
915 PTR_ADDU a1, a1, 4
916 bne a0, a3, 3b
917 nop
918 smallcmp:
919 ble a2, zero, match
920 PTR_ADDU a3, a2, a0 # compute ending address
921 1:
922 lbu v0, 0(a0)
923 lbu v1, 0(a1)
924 PTR_ADDU a0, a0, 1
925 bne v0, v1, nomatch
926 PTR_ADDU a1, a1, 1
927 bne a0, a3, 1b
928 nop
929 match:
930 j ra
931 move v0, zero
932 nomatch:
933 j ra
934 li v0, 1
935 END(bcmp)
936
937
938 /*
939 * bit = ffs(value)
940 */
941 LEAF(ffs)
942 .set noreorder
943 beq a0, zero, 2f
944 move v0, zero
945 1:
946 and v1, a0, 1 # bit set?
947 addu v0, v0, 1
948 beq v1, zero, 1b # no, continue
949 srl a0, a0, 1
950 2:
951 j ra
952 nop
953 END(ffs)
954
955 LEAF(get_current_fp)
956 j ra
957 move v0, s8
958 END(get_current_fp)
959
960 LEAF(loadandclear)
961 .set noreorder
962 1:
963 ll v0, 0(a0)
964 move t0, zero
965 sc t0, 0(a0)
966 beq t0, zero, 1b
967 nop
968 j ra
969 nop
970 END(loadandclear)
971
972 #if 0
973 /*
974 * u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
975 * Atomically compare the value stored at p with cmpval
976 * and if the two values are equal, update value *p with
977 * newval. Return zero if compare failed, non-zero otherwise
978 *
979 */
980
981 LEAF(atomic_cmpset_32)
982 .set noreorder
983 1:
984 ll t0, 0(a0)
985 move v0, zero
986 bne t0, a1, 2f
987 move t1, a2
988 sc t1, 0(a0)
989 beq t1, zero, 1b
990 or v0, v0, 1
991 2:
992 j ra
993 nop
994 END(atomic_cmpset_32)
995
996 /**
997 * u_int32_t
998 * atomic_readandclear_32(u_int32_t *a)
999 * {
1000 * u_int32_t retval;
1001 * retval = *a;
1002 * *a = 0;
1003 * }
1004 */
1005 LEAF(atomic_readandclear_32)
1006 .set noreorder
1007 1:
1008 ll t0, 0(a0)
1009 move t1, zero
1010 move v0, t0
1011 sc t1, 0(a0)
1012 beq t1, zero, 1b
1013 nop
1014 j ra
1015 nop
1016 END(atomic_readandclear_32)
1017
1018 /**
1019 * void
1020 * atomic_set_32(u_int32_t *a, u_int32_t b)
1021 * {
1022 * *a |= b;
1023 * }
1024 */
1025 LEAF(atomic_set_32)
1026 .set noreorder
1027 1:
1028 ll t0, 0(a0)
1029 or t0, t0, a1
1030 sc t0, 0(a0)
1031 beq t0, zero, 1b
1032 nop
1033 j ra
1034 nop
1035 END(atomic_set_32)
1036
1037 /**
1038 * void
1039 * atomic_add_32(uint32_t *a, uint32_t b)
1040 * {
1041 * *a += b;
1042 * }
1043 */
1044 LEAF(atomic_add_32)
1045 .set noreorder
1046 srl a0, a0, 2 # round down address to be 32-bit aligned
1047 sll a0, a0, 2
1048 1:
1049 ll t0, 0(a0)
1050 addu t0, t0, a1
1051 sc t0, 0(a0)
1052 beq t0, zero, 1b
1053 nop
1054 j ra
1055 nop
1056 END(atomic_add_32)
1057
1058 /**
1059 * void
1060 * atomic_clear_32(u_int32_t *a, u_int32_t b)
1061 * {
1062 * *a &= ~b;
1063 * }
1064 */
1065 LEAF(atomic_clear_32)
1066 .set noreorder
1067 srl a0, a0, 2 # round down address to be 32-bit aligned
1068 sll a0, a0, 2
1069 nor a1, zero, a1
1070 1:
1071 ll t0, 0(a0)
1072 and t0, t0, a1 # t1 has the new lower 16 bits
1073 sc t0, 0(a0)
1074 beq t0, zero, 1b
1075 nop
1076 j ra
1077 nop
1078 END(atomic_clear_32)
1079
1080 /**
1081 * void
1082 * atomic_subtract_32(uint16_t *a, uint16_t b)
1083 * {
1084 * *a -= b;
1085 * }
1086 */
1087 LEAF(atomic_subtract_32)
1088 .set noreorder
1089 srl a0, a0, 2 # round down address to be 32-bit aligned
1090 sll a0, a0, 2
1091 1:
1092 ll t0, 0(a0)
1093 subu t0, t0, a1
1094 sc t0, 0(a0)
1095 beq t0, zero, 1b
1096 nop
1097 j ra
1098 nop
1099 END(atomic_subtract_32)
1100
1101 #endif
1102
1103 /**
1104 * void
1105 * atomic_set_16(u_int16_t *a, u_int16_t b)
1106 * {
1107 * *a |= b;
1108 * }
1109 */
1110 LEAF(atomic_set_16)
1111 .set noreorder
1112 srl a0, a0, 2 # round down address to be 32-bit aligned
1113 sll a0, a0, 2
1114 andi a1, a1, 0xffff
1115 1:
1116 ll t0, 0(a0)
1117 or t0, t0, a1
1118 sc t0, 0(a0)
1119 beq t0, zero, 1b
1120 nop
1121 j ra
1122 nop
1123 END(atomic_set_16)
1124
1125 /**
1126 * void
1127 * atomic_clear_16(u_int16_t *a, u_int16_t b)
1128 * {
1129 * *a &= ~b;
1130 * }
1131 */
1132 LEAF(atomic_clear_16)
1133 .set noreorder
1134 srl a0, a0, 2 # round down address to be 32-bit aligned
1135 sll a0, a0, 2
1136 nor a1, zero, a1
1137 1:
1138 ll t0, 0(a0)
1139 move t1, t0
1140 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1141 and t1, t1, a1 # t1 has the new lower 16 bits
1142 srl t0, t0, 16 # preserve original top 16 bits
1143 sll t0, t0, 16
1144 or t0, t0, t1
1145 sc t0, 0(a0)
1146 beq t0, zero, 1b
1147 nop
1148 j ra
1149 nop
1150 END(atomic_clear_16)
1151
1152
1153 /**
1154 * void
1155 * atomic_subtract_16(uint16_t *a, uint16_t b)
1156 * {
1157 * *a -= b;
1158 * }
1159 */
1160 LEAF(atomic_subtract_16)
1161 .set noreorder
1162 srl a0, a0, 2 # round down address to be 32-bit aligned
1163 sll a0, a0, 2
1164 1:
1165 ll t0, 0(a0)
1166 move t1, t0
1167 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1168 subu t1, t1, a1
1169 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1170 srl t0, t0, 16 # preserve original top 16 bits
1171 sll t0, t0, 16
1172 or t0, t0, t1
1173 sc t0, 0(a0)
1174 beq t0, zero, 1b
1175 nop
1176 j ra
1177 nop
1178 END(atomic_subtract_16)
1179
1180 /**
1181 * void
1182 * atomic_add_16(uint16_t *a, uint16_t b)
1183 * {
1184 * *a += b;
1185 * }
1186 */
1187 LEAF(atomic_add_16)
1188 .set noreorder
1189 srl a0, a0, 2 # round down address to be 32-bit aligned
1190 sll a0, a0, 2
1191 1:
1192 ll t0, 0(a0)
1193 move t1, t0
1194 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1195 addu t1, t1, a1
1196 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1197 srl t0, t0, 16 # preserve original top 16 bits
1198 sll t0, t0, 16
1199 or t0, t0, t1
1200 sc t0, 0(a0)
1201 beq t0, zero, 1b
1202 nop
1203 j ra
1204 nop
1205 END(atomic_add_16)
1206
1207 /**
1208 * void
1209 * atomic_add_8(uint8_t *a, uint8_t b)
1210 * {
1211 * *a += b;
1212 * }
1213 */
1214 LEAF(atomic_add_8)
1215 .set noreorder
1216 srl a0, a0, 2 # round down address to be 32-bit aligned
1217 sll a0, a0, 2
1218 1:
1219 ll t0, 0(a0)
1220 move t1, t0
1221 andi t1, t1, 0xff # t1 has the original lower 8 bits
1222 addu t1, t1, a1
1223 andi t1, t1, 0xff # t1 has the new lower 8 bits
1224 srl t0, t0, 8 # preserve original top 24 bits
1225 sll t0, t0, 8
1226 or t0, t0, t1
1227 sc t0, 0(a0)
1228 beq t0, zero, 1b
1229 nop
1230 j ra
1231 nop
1232 END(atomic_add_8)
1233
1234
1235 /**
1236 * void
1237 * atomic_subtract_8(uint8_t *a, uint8_t b)
1238 * {
1239 * *a += b;
1240 * }
1241 */
1242 LEAF(atomic_subtract_8)
1243 .set noreorder
1244 srl a0, a0, 2 # round down address to be 32-bit aligned
1245 sll a0, a0, 2
1246 1:
1247 ll t0, 0(a0)
1248 move t1, t0
1249 andi t1, t1, 0xff # t1 has the original lower 8 bits
1250 subu t1, t1, a1
1251 andi t1, t1, 0xff # t1 has the new lower 8 bits
1252 srl t0, t0, 8 # preserve original top 24 bits
1253 sll t0, t0, 8
1254 or t0, t0, t1
1255 sc t0, 0(a0)
1256 beq t0, zero, 1b
1257 nop
1258 j ra
1259 nop
1260 END(atomic_subtract_8)
1261
1262 /*
1263 * atomic 64-bit register read/write assembly language support routines.
1264 */
1265
1266 .set noreorder # Noreorder is default style!
1267
1268 #if !defined(__mips_n64) && !defined(__mips_n32)
1269 /*
1270 * I don't know if these routines have the right number of
1271 * NOPs in it for all processors. XXX
1272 *
1273 * Maybe it would be better to just leave this undefined in that case.
1274 */
1275 LEAF(atomic_store_64)
1276 mfc0 t1, MIPS_COP_0_STATUS
1277 and t2, t1, ~MIPS_SR_INT_IE
1278 mtc0 t2, MIPS_COP_0_STATUS
1279 nop
1280 nop
1281 nop
1282 nop
1283 ld t0, (a1)
1284 nop
1285 nop
1286 sd t0, (a0)
1287 nop
1288 nop
1289 mtc0 t1,MIPS_COP_0_STATUS
1290 nop
1291 nop
1292 nop
1293 nop
1294 j ra
1295 nop
1296 END(atomic_store_64)
1297
1298 LEAF(atomic_load_64)
1299 mfc0 t1, MIPS_COP_0_STATUS
1300 and t2, t1, ~MIPS_SR_INT_IE
1301 mtc0 t2, MIPS_COP_0_STATUS
1302 nop
1303 nop
1304 nop
1305 nop
1306 ld t0, (a0)
1307 nop
1308 nop
1309 sd t0, (a1)
1310 nop
1311 nop
1312 mtc0 t1,MIPS_COP_0_STATUS
1313 nop
1314 nop
1315 nop
1316 nop
1317 j ra
1318 nop
1319 END(atomic_load_64)
1320 #endif
1321
1322 #if defined(DDB) || defined(DEBUG)
1323
1324 LEAF(kdbpeek)
1325 PTR_LA v1, ddberr
1326 and v0, a0, 3 # unaligned ?
1327 GET_CPU_PCPU(t1)
1328 PTR_L t1, PC_CURPCB(t1)
1329 bne v0, zero, 1f
1330 PTR_S v1, U_PCB_ONFAULT(t1)
1331
1332 lw v0, (a0)
1333 jr ra
1334 PTR_S zero, U_PCB_ONFAULT(t1)
1335
1336 1:
1337 LWHI v0, 0(a0)
1338 LWLO v0, 3(a0)
1339 jr ra
1340 PTR_S zero, U_PCB_ONFAULT(t1)
1341 END(kdbpeek)
1342
1343 ddberr:
1344 jr ra
1345 nop
1346
1347 #if defined(DDB)
1348 LEAF(kdbpoke)
1349 PTR_LA v1, ddberr
1350 and v0, a0, 3 # unaligned ?
1351 GET_CPU_PCPU(t1)
1352 PTR_L t1, PC_CURPCB(t1)
1353 bne v0, zero, 1f
1354 PTR_S v1, U_PCB_ONFAULT(t1)
1355
1356 sw a1, (a0)
1357 jr ra
1358 PTR_S zero, U_PCB_ONFAULT(t1)
1359
1360 1:
1361 SWHI a1, 0(a0)
1362 SWLO a1, 3(a0)
1363 jr ra
1364 PTR_S zero, U_PCB_ONFAULT(t1)
1365 END(kdbpoke)
1366
1367 .data
1368 .globl esym
1369 esym: .word 0
1370
1371 #endif /* DDB */
1372 #endif /* DDB || DEBUG */
1373
1374 .text
1375 LEAF(breakpoint)
1376 break MIPS_BREAK_SOVER_VAL
1377 jr ra
1378 nop
1379 END(breakpoint)
1380
1381 LEAF(setjmp)
1382 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1383 REG_S s0, (SZREG * PREG_S0)(a0)
1384 REG_S s1, (SZREG * PREG_S1)(a0)
1385 REG_S s2, (SZREG * PREG_S2)(a0)
1386 REG_S s3, (SZREG * PREG_S3)(a0)
1387 REG_S s4, (SZREG * PREG_S4)(a0)
1388 REG_S s5, (SZREG * PREG_S5)(a0)
1389 REG_S s6, (SZREG * PREG_S6)(a0)
1390 REG_S s7, (SZREG * PREG_S7)(a0)
1391 REG_S s8, (SZREG * PREG_S8)(a0)
1392 REG_S sp, (SZREG * PREG_SP)(a0)
1393 REG_S ra, (SZREG * PREG_RA)(a0)
1394 REG_S v0, (SZREG * PREG_SR)(a0)
1395 jr ra
1396 li v0, 0 # setjmp return
1397 END(setjmp)
1398
1399 LEAF(longjmp)
1400 REG_L v0, (SZREG * PREG_SR)(a0)
1401 REG_L ra, (SZREG * PREG_RA)(a0)
1402 REG_L s0, (SZREG * PREG_S0)(a0)
1403 REG_L s1, (SZREG * PREG_S1)(a0)
1404 REG_L s2, (SZREG * PREG_S2)(a0)
1405 REG_L s3, (SZREG * PREG_S3)(a0)
1406 REG_L s4, (SZREG * PREG_S4)(a0)
1407 REG_L s5, (SZREG * PREG_S5)(a0)
1408 REG_L s6, (SZREG * PREG_S6)(a0)
1409 REG_L s7, (SZREG * PREG_S7)(a0)
1410 REG_L s8, (SZREG * PREG_S8)(a0)
1411 REG_L sp, (SZREG * PREG_SP)(a0)
1412 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1413 ITLBNOPFIX
1414 jr ra
1415 li v0, 1 # longjmp return
1416 END(longjmp)
1417
1418 LEAF(fusufault)
1419 GET_CPU_PCPU(t0)
1420 lw t0, PC_CURTHREAD(t0)
1421 lw t0, TD_PCB(t0)
1422 li v0, -1
1423 j ra
1424 END(fusufault)
1425
1426 /* Define a new md function 'casuptr'. This atomically compares and sets
1427 a pointer that is in user space. It will be used as the basic primitive
1428 for a kernel supported user space lock implementation. */
1429 LEAF(casuptr)
1430 PTR_LI t0, VM_MAXUSER_ADDRESS /* verify address validity */
1431 blt a0, t0, fusufault /* trap faults */
1432 nop
1433
1434 GET_CPU_PCPU(t1)
1435 lw t1, PC_CURTHREAD(t1)
1436 lw t1, TD_PCB(t1)
1437
1438 PTR_LA t2, fusufault
1439 PTR_S t2, U_PCB_ONFAULT(t1)
1440 1:
1441 ll v0, 0(a0) /* try to load the old value */
1442 beq v0, a1, 2f /* compare */
1443 move t0, a2 /* setup value to write */
1444 sc t0, 0(a0) /* write if address still locked */
1445 beq t0, zero, 1b /* if it failed, spin */
1446 2:
1447 PTR_S zero, U_PCB_ONFAULT(t1) /* clean up */
1448 j ra
1449 END(casuptr)
1450
1451
1452 #ifdef CPU_CNMIPS
1453 /*
1454 * void octeon_enable_shadow(void)
1455 * turns on access to CC and CCRes
1456 */
1457 LEAF(octeon_enable_shadow)
1458 li t1, 0x0000000f
1459 mtc0 t1, MIPS_COP_0_INFO
1460 jr ra
1461 nop
1462 END(octeon_enable_shadow)
1463
1464
1465 LEAF(octeon_get_shadow)
1466 mfc0 v0, MIPS_COP_0_INFO
1467 jr ra
1468 nop
1469 END(octeon_get_shadow)
1470
1471 /*
1472 * octeon_set_control(addr, uint32_t val)
1473 */
1474 LEAF(octeon_set_control)
1475 .set push
1476 or t1, a1, zero
1477 /* dmfc0 a1, 9, 7*/
1478 .word 0x40254807
1479 sd a1, 0(a0)
1480 or a1, t1, zero
1481 /* dmtc0 a1, 9, 7*/
1482 .word 0x40a54807
1483 jr ra
1484 nop
1485 .set pop
1486 END(octeon_set_control)
1487
1488 /*
1489 * octeon_get_control(addr)
1490 */
1491 LEAF(octeon_get_control)
1492 .set push
1493 .set mips64r2
1494 /* dmfc0 a1, 9, 7 */
1495 .word 0x40254807
1496 sd a1, 0(a0)
1497 jr ra
1498 nop
1499 .set pop
1500 END(octeon_get_control)
1501 #endif
1502
1503 LEAF(mips3_ld)
1504 .set push
1505 .set noreorder
1506 .set mips64
1507 #if defined(__mips_o32)
1508 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1509 and t1, t0, ~(MIPS_SR_INT_IE)
1510 mtc0 t1, MIPS_COP_0_STATUS
1511 COP0_SYNC
1512 nop
1513 nop
1514 nop
1515
1516 ld v0, 0(a0)
1517 #if _BYTE_ORDER == _BIG_ENDIAN
1518 dsll v1, v0, 32
1519 dsra v1, v1, 32 # low word in v1
1520 dsra v0, v0, 32 # high word in v0
1521 #else
1522 dsra v1, v0, 32 # high word in v1
1523 dsll v0, v0, 32
1524 dsra v0, v0, 32 # low word in v0
1525 #endif
1526
1527 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1528 COP0_SYNC
1529 nop
1530 #else /* !__mips_o32 */
1531 ld v0, 0(a0)
1532 #endif /* !__mips_o32 */
1533
1534 jr ra
1535 nop
1536 .set pop
1537 END(mips3_ld)
1538
1539 LEAF(mips3_sd)
1540 .set push
1541 .set mips64
1542 .set noreorder
1543 #if defined(__mips_o32)
1544 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1545 and t1, t0, ~(MIPS_SR_INT_IE)
1546 mtc0 t1, MIPS_COP_0_STATUS
1547 COP0_SYNC
1548 nop
1549 nop
1550 nop
1551
1552 # NOTE: a1 is padding!
1553
1554 #if _BYTE_ORDER == _BIG_ENDIAN
1555 dsll a2, a2, 32 # high word in a2
1556 dsll a3, a3, 32 # low word in a3
1557 dsrl a3, a3, 32
1558 #else
1559 dsll a2, a2, 32 # low word in a2
1560 dsrl a2, a2, 32
1561 dsll a3, a3, 32 # high word in a3
1562 #endif
1563 or a1, a2, a3
1564 sd a1, 0(a0)
1565
1566 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1567 COP0_SYNC
1568 nop
1569 #else /* !__mips_o32 */
1570 sd a1, 0(a0)
1571 #endif /* !__mips_o32 */
1572
1573 jr ra
1574 nop
1575 .set pop
1576 END(mips3_sd)
Cache object: 6c08c450cb7ea035d9f999cf119f4900
|