1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 *
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50 * $FreeBSD$
51 */
52
53 /*
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
68 * the NetBSD Project.
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
71 *
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85 /*
86 * Contains assembly language support routines.
87 */
88
89 #include "opt_ddb.h"
90 #include <sys/errno.h>
91 #include <machine/asm.h>
92 #include <machine/cpu.h>
93 #include <machine/regnum.h>
94 #include <machine/cpuregs.h>
95 #include <machine/pcb.h>
96
97 #include "assym.inc"
98
99 .set noreorder # Noreorder is default style!
100
101 /*
102 * Primitives
103 */
104
105 .text
106
107 /*
108 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
109 * Copy a NIL-terminated string, at most maxlen characters long. Return the
110 * number of characters copied (including the NIL) in *lencopied. If the
111 * string is too long, return ENAMETOOLONG; else return 0.
112 */
113 LEAF(copystr)
114 move t0, a2
115 beq a2, zero, 4f
116 1:
117 lbu v0, 0(a0)
118 PTR_SUBU a2, a2, 1
119 beq v0, zero, 2f
120 sb v0, 0(a1) # each byte until NIL
121 PTR_ADDU a0, a0, 1
122 bne a2, zero, 1b # less than maxlen
123 PTR_ADDU a1, a1, 1
124 4:
125 li v0, ENAMETOOLONG # run out of space
126 2:
127 beq a3, zero, 3f # return num. of copied bytes
128 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
129 PTR_S a2, 0(a3)
130 3:
131 j ra # v0 is 0 or ENAMETOOLONG
132 nop
133 END(copystr)
134
135
136 /*
137 * Copy a null terminated string from the user address space into
138 * the kernel address space.
139 *
140 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
141 * caddr_t fromaddr;
142 * caddr_t toaddr;
143 * u_int maxlength;
144 * u_int *lencopied;
145 */
146 NESTED(copyinstr, CALLFRAME_SIZ, ra)
147 PTR_SUBU sp, sp, CALLFRAME_SIZ
148 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
149 PTR_LA v0, copyerr
150 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
151 REG_S ra, CALLFRAME_RA(sp)
152 GET_CPU_PCPU(v1)
153 PTR_L v1, PC_CURPCB(v1)
154 jal _C_LABEL(copystr)
155 PTR_S v0, U_PCB_ONFAULT(v1)
156 REG_L ra, CALLFRAME_RA(sp)
157 GET_CPU_PCPU(v1)
158 PTR_L v1, PC_CURPCB(v1)
159 PTR_S zero, U_PCB_ONFAULT(v1)
160 j ra
161 PTR_ADDU sp, sp, CALLFRAME_SIZ
162 END(copyinstr)
163
164 /*
165 * Copy specified amount of data from user space into the kernel
166 * copyin(from, to, len)
167 * caddr_t *from; (user source address)
168 * caddr_t *to; (kernel destination address)
169 * unsigned len;
170 */
171 NESTED(copyin, CALLFRAME_SIZ, ra)
172 PTR_SUBU sp, sp, CALLFRAME_SIZ
173 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
174 PTR_LA v0, copyerr
175 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
176 REG_S ra, CALLFRAME_RA(sp)
177 GET_CPU_PCPU(v1)
178 PTR_L v1, PC_CURPCB(v1)
179 jal _C_LABEL(bcopy)
180 PTR_S v0, U_PCB_ONFAULT(v1)
181 REG_L ra, CALLFRAME_RA(sp)
182 GET_CPU_PCPU(v1)
183 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
184 PTR_S zero, U_PCB_ONFAULT(v1)
185 PTR_ADDU sp, sp, CALLFRAME_SIZ
186 j ra
187 move v0, zero
188 END(copyin)
189
190 /*
191 * Copy specified amount of data from kernel to the user space
192 * copyout(from, to, len)
193 * caddr_t *from; (kernel source address)
194 * caddr_t *to; (user destination address)
195 * unsigned len;
196 */
197 NESTED(copyout, CALLFRAME_SIZ, ra)
198 PTR_SUBU sp, sp, CALLFRAME_SIZ
199 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
200 PTR_LA v0, copyerr
201 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
202 REG_S ra, CALLFRAME_RA(sp)
203 GET_CPU_PCPU(v1)
204 PTR_L v1, PC_CURPCB(v1)
205 jal _C_LABEL(bcopy)
206 PTR_S v0, U_PCB_ONFAULT(v1)
207 REG_L ra, CALLFRAME_RA(sp)
208 GET_CPU_PCPU(v1)
209 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
210 PTR_S zero, U_PCB_ONFAULT(v1)
211 PTR_ADDU sp, sp, CALLFRAME_SIZ
212 j ra
213 move v0, zero
214 END(copyout)
215
216 LEAF(copyerr)
217 REG_L ra, CALLFRAME_RA(sp)
218 PTR_ADDU sp, sp, CALLFRAME_SIZ
219 j ra
220 li v0, EFAULT # return error
221 END(copyerr)
222
223 /*
224 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
225 * user-space.
226 */
227 #ifdef __mips_n64
228 LEAF(fueword64)
229 XLEAF(fueword)
230 PTR_LA v0, fswberr
231 blt a0, zero, fswberr # make sure address is in user space
232 nop
233 GET_CPU_PCPU(v1)
234 PTR_L v1, PC_CURPCB(v1)
235 PTR_S v0, U_PCB_ONFAULT(v1)
236 ld v0, 0(a0) # fetch word
237 PTR_S zero, U_PCB_ONFAULT(v1)
238 sd v0, 0(a1) # store word
239 j ra
240 li v0, 0
241 END(fueword64)
242 #endif
243
244 LEAF(fueword32)
245 #ifndef __mips_n64
246 XLEAF(fueword)
247 #endif
248 PTR_LA v0, fswberr
249 blt a0, zero, fswberr # make sure address is in user space
250 nop
251 GET_CPU_PCPU(v1)
252 PTR_L v1, PC_CURPCB(v1)
253 PTR_S v0, U_PCB_ONFAULT(v1)
254 lw v0, 0(a0) # fetch word
255 PTR_S zero, U_PCB_ONFAULT(v1)
256 sw v0, 0(a1) # store word
257 j ra
258 li v0, 0
259 END(fueword32)
260
261 LEAF(fuesword)
262 PTR_LA v0, fswberr
263 blt a0, zero, fswberr # make sure address is in user space
264 nop
265 GET_CPU_PCPU(v1)
266 PTR_L v1, PC_CURPCB(v1)
267 PTR_S v0, U_PCB_ONFAULT(v1)
268 lhu v0, 0(a0) # fetch short
269 PTR_S zero, U_PCB_ONFAULT(v1)
270 sh v0, 0(a1) # store short
271 j ra
272 li v0, 0
273 END(fuesword)
274
275 LEAF(fubyte)
276 PTR_LA v0, fswberr
277 blt a0, zero, fswberr # make sure address is in user space
278 nop
279 GET_CPU_PCPU(v1)
280 PTR_L v1, PC_CURPCB(v1)
281 PTR_S v0, U_PCB_ONFAULT(v1)
282 lbu v0, 0(a0) # fetch byte
283 j ra
284 PTR_S zero, U_PCB_ONFAULT(v1)
285 END(fubyte)
286
287 LEAF(suword32)
288 #ifndef __mips_n64
289 XLEAF(suword)
290 #endif
291 PTR_LA v0, fswberr
292 blt a0, zero, fswberr # make sure address is in user space
293 nop
294 GET_CPU_PCPU(v1)
295 PTR_L v1, PC_CURPCB(v1)
296 PTR_S v0, U_PCB_ONFAULT(v1)
297 sw a1, 0(a0) # store word
298 PTR_S zero, U_PCB_ONFAULT(v1)
299 j ra
300 move v0, zero
301 END(suword32)
302
303 #ifdef __mips_n64
304 LEAF(suword64)
305 XLEAF(suword)
306 PTR_LA v0, fswberr
307 blt a0, zero, fswberr # make sure address is in user space
308 nop
309 GET_CPU_PCPU(v1)
310 PTR_L v1, PC_CURPCB(v1)
311 PTR_S v0, U_PCB_ONFAULT(v1)
312 sd a1, 0(a0) # store word
313 PTR_S zero, U_PCB_ONFAULT(v1)
314 j ra
315 move v0, zero
316 END(suword64)
317 #endif
318
319 /*
320 * casueword(9)
321 * <v0>u_long casueword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long *oldval_p,
322 * <a3>u_long newval)
323 */
324 /*
325 * casueword32(9)
326 * <v0>uint32_t casueword(<a0>uint32_t *p, <a1>uint32_t oldval,
327 * <a2>uint32_t newval)
328 */
329 LEAF(casueword32)
330 #ifndef __mips_n64
331 XLEAF(casueword)
332 #endif
333 PTR_LA v0, fswberr
334 blt a0, zero, fswberr # make sure address is in user space
335 nop
336 GET_CPU_PCPU(v1)
337 PTR_L v1, PC_CURPCB(v1)
338 PTR_S v0, U_PCB_ONFAULT(v1)
339
340 li v0, 1
341 move t0, a3
342 ll t1, 0(a0)
343 bne a1, t1, 1f
344 nop
345 sc t0, 0(a0) # store word
346 xori v0, t0, 1
347 1:
348 PTR_S zero, U_PCB_ONFAULT(v1)
349 jr ra
350 sw t1, 0(a2) # unconditionally store old word
351 END(casueword32)
352
353 #ifdef __mips_n64
354 LEAF(casueword64)
355 XLEAF(casueword)
356 PTR_LA v0, fswberr
357 blt a0, zero, fswberr # make sure address is in user space
358 nop
359 GET_CPU_PCPU(v1)
360 PTR_L v1, PC_CURPCB(v1)
361 PTR_S v0, U_PCB_ONFAULT(v1)
362
363 li v0, 1
364 move t0, a3
365 lld t1, 0(a0)
366 bne a1, t1, 1f
367 nop
368 scd t0, 0(a0) # store double word
369 xori v0, t0, 1
370 1:
371 PTR_S zero, U_PCB_ONFAULT(v1)
372 jr ra
373 sd t1, 0(a2) # unconditionally store old word
374 END(casueword64)
375 #endif
376
377 /*
378 * Will have to flush the instruction cache if byte merging is done in hardware.
379 */
380 LEAF(susword)
381 PTR_LA v0, fswberr
382 blt a0, zero, fswberr # make sure address is in user space
383 nop
384 GET_CPU_PCPU(v1)
385 PTR_L v1, PC_CURPCB(v1)
386 PTR_S v0, U_PCB_ONFAULT(v1)
387 sh a1, 0(a0) # store short
388 PTR_S zero, U_PCB_ONFAULT(v1)
389 j ra
390 move v0, zero
391 END(susword)
392
393 LEAF(subyte)
394 PTR_LA v0, fswberr
395 blt a0, zero, fswberr # make sure address is in user space
396 nop
397 GET_CPU_PCPU(v1)
398 PTR_L v1, PC_CURPCB(v1)
399 PTR_S v0, U_PCB_ONFAULT(v1)
400 sb a1, 0(a0) # store byte
401 PTR_S zero, U_PCB_ONFAULT(v1)
402 j ra
403 move v0, zero
404 END(subyte)
405
406 LEAF(fswberr)
407 j ra
408 li v0, -1
409 END(fswberr)
410
411 /*
412 * memset(void *s1, int c, int len)
413 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
414 */
415 LEAF(memset)
416 .set noreorder
417 blt a2, 12, memsetsmallclr # small amount to clear?
418 move v0, a0 # save s1 for result
419
420 sll t1, a1, 8 # compute c << 8 in t1
421 or t1, t1, a1 # compute c << 8 | c in 11
422 sll t2, t1, 16 # shift that left 16
423 or t1, t2, t1 # or together
424
425 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
426 and t0, t0, 3
427 beq t0, zero, 1f # skip if word aligned
428 PTR_SUBU a2, a2, t0 # subtract from remaining count
429 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
430 PTR_ADDU a0, a0, t0
431 1:
432 and v1, a2, 3 # compute number of whole words left
433 PTR_SUBU t0, a2, v1
434 PTR_SUBU a2, a2, t0
435 PTR_ADDU t0, t0, a0 # compute ending address
436 2:
437 PTR_ADDU a0, a0, 4 # clear words
438 bne a0, t0, 2b # unrolling loop does not help
439 sw t1, -4(a0) # since we are limited by memory speed
440
441 memsetsmallclr:
442 ble a2, zero, 2f
443 PTR_ADDU t0, a2, a0 # compute ending address
444 1:
445 PTR_ADDU a0, a0, 1 # clear bytes
446 bne a0, t0, 1b
447 sb a1, -1(a0)
448 2:
449 j ra
450 nop
451 .set reorder
452 END(memset)
453
454 /*
455 * bzero(s1, n)
456 */
457 LEAF(bzero)
458 XLEAF(blkclr)
459 .set noreorder
460 blt a1, 12, smallclr # small amount to clear?
461 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
462 and a3, a3, 3
463 beq a3, zero, 1f # skip if word aligned
464 PTR_SUBU a1, a1, a3 # subtract from remaining count
465 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
466 PTR_ADDU a0, a0, a3
467 1:
468 and v0, a1, 3 # compute number of words left
469 PTR_SUBU a3, a1, v0
470 move a1, v0
471 PTR_ADDU a3, a3, a0 # compute ending address
472 2:
473 PTR_ADDU a0, a0, 4 # clear words
474 bne a0, a3, 2b # unrolling loop does not help
475 sw zero, -4(a0) # since we are limited by memory speed
476 smallclr:
477 ble a1, zero, 2f
478 PTR_ADDU a3, a1, a0 # compute ending address
479 1:
480 PTR_ADDU a0, a0, 1 # clear bytes
481 bne a0, a3, 1b
482 sb zero, -1(a0)
483 2:
484 j ra
485 nop
486 END(bzero)
487
488
489 /*
490 * bcmp(s1, s2, n)
491 */
492 LEAF(bcmp)
493 .set noreorder
494 blt a2, 16, smallcmp # is it worth any trouble?
495 xor v0, a0, a1 # compare low two bits of addresses
496 and v0, v0, 3
497 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
498 bne v0, zero, unalignedcmp # not possible to align addresses
499 and a3, a3, 3
500
501 beq a3, zero, 1f
502 PTR_SUBU a2, a2, a3 # subtract from remaining count
503 move v0, v1 # init v0,v1 so unmodified bytes match
504 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
505 LWHI v1, 0(a1)
506 PTR_ADDU a1, a1, a3
507 bne v0, v1, nomatch
508 PTR_ADDU a0, a0, a3
509 1:
510 and a3, a2, ~3 # compute number of whole words left
511 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
512 PTR_ADDU a3, a3, a0 # compute ending address
513 2:
514 lw v0, 0(a0) # compare words
515 lw v1, 0(a1)
516 PTR_ADDU a0, a0, 4
517 bne v0, v1, nomatch
518 PTR_ADDU a1, a1, 4
519 bne a0, a3, 2b
520 nop
521 b smallcmp # finish remainder
522 nop
523 unalignedcmp:
524 beq a3, zero, 2f
525 PTR_SUBU a2, a2, a3 # subtract from remaining count
526 PTR_ADDU a3, a3, a0 # compute ending address
527 1:
528 lbu v0, 0(a0) # compare bytes until a1 word aligned
529 lbu v1, 0(a1)
530 PTR_ADDU a0, a0, 1
531 bne v0, v1, nomatch
532 PTR_ADDU a1, a1, 1
533 bne a0, a3, 1b
534 nop
535 2:
536 and a3, a2, ~3 # compute number of whole words left
537 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
538 PTR_ADDU a3, a3, a0 # compute ending address
539 3:
540 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
541 LWLO v0, 3(a0)
542 lw v1, 0(a1)
543 PTR_ADDU a0, a0, 4
544 bne v0, v1, nomatch
545 PTR_ADDU a1, a1, 4
546 bne a0, a3, 3b
547 nop
548 smallcmp:
549 ble a2, zero, match
550 PTR_ADDU a3, a2, a0 # compute ending address
551 1:
552 lbu v0, 0(a0)
553 lbu v1, 0(a1)
554 PTR_ADDU a0, a0, 1
555 bne v0, v1, nomatch
556 PTR_ADDU a1, a1, 1
557 bne a0, a3, 1b
558 nop
559 match:
560 j ra
561 move v0, zero
562 nomatch:
563 j ra
564 li v0, 1
565 END(bcmp)
566
567
568 /*
569 * bit = ffs(value)
570 */
571 LEAF(ffs)
572 .set noreorder
573 beq a0, zero, 2f
574 move v0, zero
575 1:
576 and v1, a0, 1 # bit set?
577 addu v0, v0, 1
578 beq v1, zero, 1b # no, continue
579 srl a0, a0, 1
580 2:
581 j ra
582 nop
583 END(ffs)
584
585 /**
586 * void
587 * atomic_set_16(u_int16_t *a, u_int16_t b)
588 * {
589 * *a |= b;
590 * }
591 */
592 LEAF(atomic_set_16)
593 .set noreorder
594 srl a0, a0, 2 # round down address to be 32-bit aligned
595 sll a0, a0, 2
596 andi a1, a1, 0xffff
597 1:
598 ll t0, 0(a0)
599 or t0, t0, a1
600 sc t0, 0(a0)
601 beq t0, zero, 1b
602 nop
603 j ra
604 nop
605 END(atomic_set_16)
606
607 /**
608 * void
609 * atomic_clear_16(u_int16_t *a, u_int16_t b)
610 * {
611 * *a &= ~b;
612 * }
613 */
614 LEAF(atomic_clear_16)
615 .set noreorder
616 srl a0, a0, 2 # round down address to be 32-bit aligned
617 sll a0, a0, 2
618 nor a1, zero, a1
619 1:
620 ll t0, 0(a0)
621 move t1, t0
622 andi t1, t1, 0xffff # t1 has the original lower 16 bits
623 and t1, t1, a1 # t1 has the new lower 16 bits
624 srl t0, t0, 16 # preserve original top 16 bits
625 sll t0, t0, 16
626 or t0, t0, t1
627 sc t0, 0(a0)
628 beq t0, zero, 1b
629 nop
630 j ra
631 nop
632 END(atomic_clear_16)
633
634
635 /**
636 * void
637 * atomic_subtract_16(uint16_t *a, uint16_t b)
638 * {
639 * *a -= b;
640 * }
641 */
642 LEAF(atomic_subtract_16)
643 .set noreorder
644 srl a0, a0, 2 # round down address to be 32-bit aligned
645 sll a0, a0, 2
646 1:
647 ll t0, 0(a0)
648 move t1, t0
649 andi t1, t1, 0xffff # t1 has the original lower 16 bits
650 subu t1, t1, a1
651 andi t1, t1, 0xffff # t1 has the new lower 16 bits
652 srl t0, t0, 16 # preserve original top 16 bits
653 sll t0, t0, 16
654 or t0, t0, t1
655 sc t0, 0(a0)
656 beq t0, zero, 1b
657 nop
658 j ra
659 nop
660 END(atomic_subtract_16)
661
662 /**
663 * void
664 * atomic_add_16(uint16_t *a, uint16_t b)
665 * {
666 * *a += b;
667 * }
668 */
669 LEAF(atomic_add_16)
670 .set noreorder
671 srl a0, a0, 2 # round down address to be 32-bit aligned
672 sll a0, a0, 2
673 1:
674 ll t0, 0(a0)
675 move t1, t0
676 andi t1, t1, 0xffff # t1 has the original lower 16 bits
677 addu t1, t1, a1
678 andi t1, t1, 0xffff # t1 has the new lower 16 bits
679 srl t0, t0, 16 # preserve original top 16 bits
680 sll t0, t0, 16
681 or t0, t0, t1
682 sc t0, 0(a0)
683 beq t0, zero, 1b
684 nop
685 j ra
686 nop
687 END(atomic_add_16)
688
689 /**
690 * void
691 * atomic_add_8(uint8_t *a, uint8_t b)
692 * {
693 * *a += b;
694 * }
695 */
696 LEAF(atomic_add_8)
697 .set noreorder
698 srl a0, a0, 2 # round down address to be 32-bit aligned
699 sll a0, a0, 2
700 1:
701 ll t0, 0(a0)
702 move t1, t0
703 andi t1, t1, 0xff # t1 has the original lower 8 bits
704 addu t1, t1, a1
705 andi t1, t1, 0xff # t1 has the new lower 8 bits
706 srl t0, t0, 8 # preserve original top 24 bits
707 sll t0, t0, 8
708 or t0, t0, t1
709 sc t0, 0(a0)
710 beq t0, zero, 1b
711 nop
712 j ra
713 nop
714 END(atomic_add_8)
715
716
717 /**
718 * void
719 * atomic_subtract_8(uint8_t *a, uint8_t b)
720 * {
721 * *a += b;
722 * }
723 */
724 LEAF(atomic_subtract_8)
725 .set noreorder
726 srl a0, a0, 2 # round down address to be 32-bit aligned
727 sll a0, a0, 2
728 1:
729 ll t0, 0(a0)
730 move t1, t0
731 andi t1, t1, 0xff # t1 has the original lower 8 bits
732 subu t1, t1, a1
733 andi t1, t1, 0xff # t1 has the new lower 8 bits
734 srl t0, t0, 8 # preserve original top 24 bits
735 sll t0, t0, 8
736 or t0, t0, t1
737 sc t0, 0(a0)
738 beq t0, zero, 1b
739 nop
740 j ra
741 nop
742 END(atomic_subtract_8)
743
744 .set noreorder # Noreorder is default style!
745
746 #if defined(DDB) || defined(DEBUG)
747
748 LEAF(kdbpeek)
749 PTR_LA v1, ddberr
750 and v0, a0, 3 # unaligned ?
751 GET_CPU_PCPU(t1)
752 PTR_L t1, PC_CURPCB(t1)
753 bne v0, zero, 1f
754 PTR_S v1, U_PCB_ONFAULT(t1)
755
756 lw v0, (a0)
757 jr ra
758 PTR_S zero, U_PCB_ONFAULT(t1)
759
760 1:
761 LWHI v0, 0(a0)
762 LWLO v0, 3(a0)
763 jr ra
764 PTR_S zero, U_PCB_ONFAULT(t1)
765 END(kdbpeek)
766
767 LEAF(kdbpeekd)
768 PTR_LA v1, ddberr
769 and v0, a0, 3 # unaligned ?
770 GET_CPU_PCPU(t1)
771 PTR_L t1, PC_CURPCB(t1)
772 bne v0, zero, 1f
773 PTR_S v1, U_PCB_ONFAULT(t1)
774
775 ld v0, (a0)
776 jr ra
777 PTR_S zero, U_PCB_ONFAULT(t1)
778
779 1:
780 REG_LHI v0, 0(a0)
781 REG_LLO v0, 7(a0)
782 jr ra
783 PTR_S zero, U_PCB_ONFAULT(t1)
784 END(kdbpeekd)
785
786 ddberr:
787 jr ra
788 nop
789
790 #if defined(DDB)
791 LEAF(kdbpoke)
792 PTR_LA v1, ddberr
793 and v0, a0, 3 # unaligned ?
794 GET_CPU_PCPU(t1)
795 PTR_L t1, PC_CURPCB(t1)
796 bne v0, zero, 1f
797 PTR_S v1, U_PCB_ONFAULT(t1)
798
799 sw a1, (a0)
800 jr ra
801 PTR_S zero, U_PCB_ONFAULT(t1)
802
803 1:
804 SWHI a1, 0(a0)
805 SWLO a1, 3(a0)
806 jr ra
807 PTR_S zero, U_PCB_ONFAULT(t1)
808 END(kdbpoke)
809
810 .data
811 .globl esym
812 esym: .word 0
813
814 #endif /* DDB */
815 #endif /* DDB || DEBUG */
816
817 .text
818 LEAF(breakpoint)
819 break MIPS_BREAK_SOVER_VAL
820 jr ra
821 nop
822 END(breakpoint)
823
824 LEAF(setjmp)
825 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
826 REG_S s0, (SZREG * PCB_REG_S0)(a0)
827 REG_S s1, (SZREG * PCB_REG_S1)(a0)
828 REG_S s2, (SZREG * PCB_REG_S2)(a0)
829 REG_S s3, (SZREG * PCB_REG_S3)(a0)
830 REG_S s4, (SZREG * PCB_REG_S4)(a0)
831 REG_S s5, (SZREG * PCB_REG_S5)(a0)
832 REG_S s6, (SZREG * PCB_REG_S6)(a0)
833 REG_S s7, (SZREG * PCB_REG_S7)(a0)
834 REG_S s8, (SZREG * PCB_REG_S8)(a0)
835 REG_S sp, (SZREG * PCB_REG_SP)(a0)
836 REG_S ra, (SZREG * PCB_REG_RA)(a0)
837 REG_S v0, (SZREG * PCB_REG_SR)(a0)
838 jr ra
839 li v0, 0 # setjmp return
840 END(setjmp)
841
842 LEAF(longjmp)
843 REG_L v0, (SZREG * PCB_REG_SR)(a0)
844 REG_L ra, (SZREG * PCB_REG_RA)(a0)
845 REG_L s0, (SZREG * PCB_REG_S0)(a0)
846 REG_L s1, (SZREG * PCB_REG_S1)(a0)
847 REG_L s2, (SZREG * PCB_REG_S2)(a0)
848 REG_L s3, (SZREG * PCB_REG_S3)(a0)
849 REG_L s4, (SZREG * PCB_REG_S4)(a0)
850 REG_L s5, (SZREG * PCB_REG_S5)(a0)
851 REG_L s6, (SZREG * PCB_REG_S6)(a0)
852 REG_L s7, (SZREG * PCB_REG_S7)(a0)
853 REG_L s8, (SZREG * PCB_REG_S8)(a0)
854 REG_L sp, (SZREG * PCB_REG_SP)(a0)
855 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
856 ITLBNOPFIX
857 jr ra
858 li v0, 1 # longjmp return
859 END(longjmp)
860
861 LEAF(mips3_ld)
862 .set push
863 .set noreorder
864 .set mips64
865 #if defined(__mips_o32)
866 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
867 and t1, t0, ~(MIPS_SR_INT_IE)
868 mtc0 t1, MIPS_COP_0_STATUS
869 COP0_SYNC
870 nop
871 nop
872 nop
873
874 ld v0, 0(a0)
875 #if _BYTE_ORDER == _BIG_ENDIAN
876 dsll v1, v0, 32
877 dsra v1, v1, 32 # low word in v1
878 dsra v0, v0, 32 # high word in v0
879 #else
880 dsra v1, v0, 32 # high word in v1
881 dsll v0, v0, 32
882 dsra v0, v0, 32 # low word in v0
883 #endif
884
885 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
886 COP0_SYNC
887 nop
888 #else /* !__mips_o32 */
889 ld v0, 0(a0)
890 #endif /* !__mips_o32 */
891
892 jr ra
893 nop
894 .set pop
895 END(mips3_ld)
896
897 LEAF(mips3_sd)
898 .set push
899 .set mips64
900 .set noreorder
901 #if defined(__mips_o32)
902 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
903 and t1, t0, ~(MIPS_SR_INT_IE)
904 mtc0 t1, MIPS_COP_0_STATUS
905 COP0_SYNC
906 nop
907 nop
908 nop
909
910 # NOTE: a1 is padding!
911
912 #if _BYTE_ORDER == _BIG_ENDIAN
913 dsll a2, a2, 32 # high word in a2
914 dsll a3, a3, 32 # low word in a3
915 dsrl a3, a3, 32
916 #else
917 dsll a2, a2, 32 # low word in a2
918 dsrl a2, a2, 32
919 dsll a3, a3, 32 # high word in a3
920 #endif
921 or a1, a2, a3
922 sd a1, 0(a0)
923
924 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
925 COP0_SYNC
926 nop
927 #else /* !__mips_o32 */
928 sd a1, 0(a0)
929 #endif /* !__mips_o32 */
930
931 jr ra
932 nop
933 .set pop
934 END(mips3_sd)
Cache object: 84904970d5f34cf353bd35d599fa3cb3
|