1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 *
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50 * $FreeBSD$
51 */
52
53 /*
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
68 * the NetBSD Project.
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
71 *
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85 /*
86 * Contains assembly language support routines.
87 */
88
89 #include "opt_ddb.h"
90 #include <sys/errno.h>
91 #include <machine/asm.h>
92 #include <machine/cpu.h>
93 #include <machine/endian.h>
94 #include <machine/regnum.h>
95 #include <machine/cpuregs.h>
96 #include <machine/pcb.h>
97
98 #include "assym.inc"
99
100 .set noreorder # Noreorder is default style!
101
102 /*
103 * Primitives
104 */
105
106 .text
107
108 /*
109 * Copy a null terminated string from the user address space into
110 * the kernel address space.
111 *
112 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
113 * caddr_t fromaddr;
114 * caddr_t toaddr;
115 * u_int maxlength;
116 * u_int *lencopied;
117 */
118 LEAF(copyinstr)
119 PTR_LA v0, __copyinstr_err
120 blt a0, zero, __copyinstr_err # make sure address is in user space
121 GET_CPU_PCPU(v1)
122 PTR_L v1, PC_CURPCB(v1)
123 PTR_S v0, U_PCB_ONFAULT(v1)
124
125 move t0, a2
126 beq a2, zero, 4f
127 1:
128 lbu v0, 0(a0)
129 PTR_SUBU a2, a2, 1
130 beq v0, zero, 2f
131 sb v0, 0(a1) # each byte until NIL
132 PTR_ADDU a0, a0, 1
133 bne a2, zero, 1b # less than maxlen
134 PTR_ADDU a1, a1, 1
135 4:
136 li v0, ENAMETOOLONG # run out of space
137 2:
138 beq a3, zero, 3f # return num. of copied bytes
139 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
140 PTR_S a2, 0(a3)
141 3:
142
143 PTR_S zero, U_PCB_ONFAULT(v1)
144 j ra
145 nop
146
147 __copyinstr_err:
148 j ra
149 li v0, EFAULT
150 END(copyinstr)
151
152 /*
153 * Copy specified amount of data from user space into the kernel
154 * copyin(from, to, len)
155 * caddr_t *from; (user source address)
156 * caddr_t *to; (kernel destination address)
157 * unsigned len;
158 */
159 NESTED(copyin, CALLFRAME_SIZ, ra)
160 PTR_SUBU sp, sp, CALLFRAME_SIZ
161 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
162 PTR_LA v0, copyerr
163 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
164 REG_S ra, CALLFRAME_RA(sp)
165 GET_CPU_PCPU(v1)
166 PTR_L v1, PC_CURPCB(v1)
167 jal _C_LABEL(bcopy)
168 PTR_S v0, U_PCB_ONFAULT(v1)
169 REG_L ra, CALLFRAME_RA(sp)
170 GET_CPU_PCPU(v1)
171 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
172 PTR_S zero, U_PCB_ONFAULT(v1)
173 PTR_ADDU sp, sp, CALLFRAME_SIZ
174 j ra
175 move v0, zero
176 END(copyin)
177
178 /*
179 * Copy specified amount of data from kernel to the user space
180 * copyout(from, to, len)
181 * caddr_t *from; (kernel source address)
182 * caddr_t *to; (user destination address)
183 * unsigned len;
184 */
185 NESTED(copyout, CALLFRAME_SIZ, ra)
186 PTR_SUBU sp, sp, CALLFRAME_SIZ
187 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
188 PTR_LA v0, copyerr
189 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
190 REG_S ra, CALLFRAME_RA(sp)
191 GET_CPU_PCPU(v1)
192 PTR_L v1, PC_CURPCB(v1)
193 jal _C_LABEL(bcopy)
194 PTR_S v0, U_PCB_ONFAULT(v1)
195 REG_L ra, CALLFRAME_RA(sp)
196 GET_CPU_PCPU(v1)
197 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
198 PTR_S zero, U_PCB_ONFAULT(v1)
199 PTR_ADDU sp, sp, CALLFRAME_SIZ
200 j ra
201 move v0, zero
202 END(copyout)
203
204 LEAF(copyerr)
205 REG_L ra, CALLFRAME_RA(sp)
206 PTR_ADDU sp, sp, CALLFRAME_SIZ
207 j ra
208 li v0, EFAULT # return error
209 END(copyerr)
210
211 /*
212 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
213 * user-space.
214 */
215 #ifdef __mips_n64
216 LEAF(fueword64)
217 XLEAF(fueword)
218 PTR_LA v0, fswberr
219 blt a0, zero, fswberr # make sure address is in user space
220 nop
221 GET_CPU_PCPU(v1)
222 PTR_L v1, PC_CURPCB(v1)
223 PTR_S v0, U_PCB_ONFAULT(v1)
224 ld v0, 0(a0) # fetch word
225 PTR_S zero, U_PCB_ONFAULT(v1)
226 sd v0, 0(a1) # store word
227 j ra
228 li v0, 0
229 END(fueword64)
230 #endif
231
232 LEAF(fueword32)
233 #ifndef __mips_n64
234 XLEAF(fueword)
235 #endif
236 PTR_LA v0, fswberr
237 blt a0, zero, fswberr # make sure address is in user space
238 nop
239 GET_CPU_PCPU(v1)
240 PTR_L v1, PC_CURPCB(v1)
241 PTR_S v0, U_PCB_ONFAULT(v1)
242 lw v0, 0(a0) # fetch word
243 PTR_S zero, U_PCB_ONFAULT(v1)
244 sw v0, 0(a1) # store word
245 j ra
246 li v0, 0
247 END(fueword32)
248
249 LEAF(fuesword)
250 PTR_LA v0, fswberr
251 blt a0, zero, fswberr # make sure address is in user space
252 nop
253 GET_CPU_PCPU(v1)
254 PTR_L v1, PC_CURPCB(v1)
255 PTR_S v0, U_PCB_ONFAULT(v1)
256 lhu v0, 0(a0) # fetch short
257 PTR_S zero, U_PCB_ONFAULT(v1)
258 sh v0, 0(a1) # store short
259 j ra
260 li v0, 0
261 END(fuesword)
262
263 LEAF(fubyte)
264 PTR_LA v0, fswberr
265 blt a0, zero, fswberr # make sure address is in user space
266 nop
267 GET_CPU_PCPU(v1)
268 PTR_L v1, PC_CURPCB(v1)
269 PTR_S v0, U_PCB_ONFAULT(v1)
270 lbu v0, 0(a0) # fetch byte
271 j ra
272 PTR_S zero, U_PCB_ONFAULT(v1)
273 END(fubyte)
274
275 LEAF(suword32)
276 #ifndef __mips_n64
277 XLEAF(suword)
278 #endif
279 PTR_LA v0, fswberr
280 blt a0, zero, fswberr # make sure address is in user space
281 nop
282 GET_CPU_PCPU(v1)
283 PTR_L v1, PC_CURPCB(v1)
284 PTR_S v0, U_PCB_ONFAULT(v1)
285 sw a1, 0(a0) # store word
286 PTR_S zero, U_PCB_ONFAULT(v1)
287 j ra
288 move v0, zero
289 END(suword32)
290
291 #ifdef __mips_n64
292 LEAF(suword64)
293 XLEAF(suword)
294 PTR_LA v0, fswberr
295 blt a0, zero, fswberr # make sure address is in user space
296 nop
297 GET_CPU_PCPU(v1)
298 PTR_L v1, PC_CURPCB(v1)
299 PTR_S v0, U_PCB_ONFAULT(v1)
300 sd a1, 0(a0) # store word
301 PTR_S zero, U_PCB_ONFAULT(v1)
302 j ra
303 move v0, zero
304 END(suword64)
305 #endif
306
307 /*
308 * casueword(9)
309 * <v0>u_long casueword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long *oldval_p,
310 * <a3>u_long newval)
311 */
312 /*
313 * casueword32(9)
314 * <v0>uint32_t casueword(<a0>uint32_t *p, <a1>uint32_t oldval,
315 * <a2>uint32_t newval)
316 */
317 LEAF(casueword32)
318 #ifndef __mips_n64
319 XLEAF(casueword)
320 #endif
321 PTR_LA v0, fswberr
322 blt a0, zero, fswberr # make sure address is in user space
323 nop
324 GET_CPU_PCPU(v1)
325 PTR_L v1, PC_CURPCB(v1)
326 PTR_S v0, U_PCB_ONFAULT(v1)
327
328 li v0, 1
329 move t0, a3
330 ll t1, 0(a0)
331 bne a1, t1, 1f
332 nop
333 sc t0, 0(a0) # store word
334 xori v0, t0, 1
335 1:
336 PTR_S zero, U_PCB_ONFAULT(v1)
337 jr ra
338 sw t1, 0(a2) # unconditionally store old word
339 END(casueword32)
340
341 #ifdef __mips_n64
342 LEAF(casueword64)
343 XLEAF(casueword)
344 PTR_LA v0, fswberr
345 blt a0, zero, fswberr # make sure address is in user space
346 nop
347 GET_CPU_PCPU(v1)
348 PTR_L v1, PC_CURPCB(v1)
349 PTR_S v0, U_PCB_ONFAULT(v1)
350
351 li v0, 1
352 move t0, a3
353 lld t1, 0(a0)
354 bne a1, t1, 1f
355 nop
356 scd t0, 0(a0) # store double word
357 xori v0, t0, 1
358 1:
359 PTR_S zero, U_PCB_ONFAULT(v1)
360 jr ra
361 sd t1, 0(a2) # unconditionally store old word
362 END(casueword64)
363 #endif
364
365 /*
366 * Will have to flush the instruction cache if byte merging is done in hardware.
367 */
368 LEAF(susword)
369 PTR_LA v0, fswberr
370 blt a0, zero, fswberr # make sure address is in user space
371 nop
372 GET_CPU_PCPU(v1)
373 PTR_L v1, PC_CURPCB(v1)
374 PTR_S v0, U_PCB_ONFAULT(v1)
375 sh a1, 0(a0) # store short
376 PTR_S zero, U_PCB_ONFAULT(v1)
377 j ra
378 move v0, zero
379 END(susword)
380
381 LEAF(subyte)
382 PTR_LA v0, fswberr
383 blt a0, zero, fswberr # make sure address is in user space
384 nop
385 GET_CPU_PCPU(v1)
386 PTR_L v1, PC_CURPCB(v1)
387 PTR_S v0, U_PCB_ONFAULT(v1)
388 sb a1, 0(a0) # store byte
389 PTR_S zero, U_PCB_ONFAULT(v1)
390 j ra
391 move v0, zero
392 END(subyte)
393
394 LEAF(fswberr)
395 j ra
396 li v0, -1
397 END(fswberr)
398
399 /*
400 * memset(void *s1, int c, int len)
401 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
402 */
403 LEAF(memset)
404 .set noreorder
405 blt a2, 12, memsetsmallclr # small amount to clear?
406 move v0, a0 # save s1 for result
407
408 sll t1, a1, 8 # compute c << 8 in t1
409 or t1, t1, a1 # compute c << 8 | c in 11
410 sll t2, t1, 16 # shift that left 16
411 or t1, t2, t1 # or together
412
413 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
414 and t0, t0, 3
415 beq t0, zero, 1f # skip if word aligned
416 PTR_SUBU a2, a2, t0 # subtract from remaining count
417 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
418 PTR_ADDU a0, a0, t0
419 1:
420 and v1, a2, 3 # compute number of whole words left
421 PTR_SUBU t0, a2, v1
422 PTR_SUBU a2, a2, t0
423 PTR_ADDU t0, t0, a0 # compute ending address
424 2:
425 PTR_ADDU a0, a0, 4 # clear words
426 bne a0, t0, 2b # unrolling loop does not help
427 sw t1, -4(a0) # since we are limited by memory speed
428
429 memsetsmallclr:
430 ble a2, zero, 2f
431 PTR_ADDU t0, a2, a0 # compute ending address
432 1:
433 PTR_ADDU a0, a0, 1 # clear bytes
434 bne a0, t0, 1b
435 sb a1, -1(a0)
436 2:
437 j ra
438 nop
439 .set reorder
440 END(memset)
441
442 /*
443 * bzero(s1, n)
444 */
445 LEAF(bzero)
446 XLEAF(blkclr)
447 .set noreorder
448 blt a1, 12, smallclr # small amount to clear?
449 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
450 and a3, a3, 3
451 beq a3, zero, 1f # skip if word aligned
452 PTR_SUBU a1, a1, a3 # subtract from remaining count
453 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
454 PTR_ADDU a0, a0, a3
455 1:
456 and v0, a1, 3 # compute number of words left
457 PTR_SUBU a3, a1, v0
458 move a1, v0
459 PTR_ADDU a3, a3, a0 # compute ending address
460 2:
461 PTR_ADDU a0, a0, 4 # clear words
462 bne a0, a3, 2b # unrolling loop does not help
463 sw zero, -4(a0) # since we are limited by memory speed
464 smallclr:
465 ble a1, zero, 2f
466 PTR_ADDU a3, a1, a0 # compute ending address
467 1:
468 PTR_ADDU a0, a0, 1 # clear bytes
469 bne a0, a3, 1b
470 sb zero, -1(a0)
471 2:
472 j ra
473 nop
474 END(bzero)
475
476
477 /*
478 * bcmp(s1, s2, n)
479 */
480 LEAF(bcmp)
481 .set noreorder
482 blt a2, 16, smallcmp # is it worth any trouble?
483 xor v0, a0, a1 # compare low two bits of addresses
484 and v0, v0, 3
485 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
486 bne v0, zero, unalignedcmp # not possible to align addresses
487 and a3, a3, 3
488
489 beq a3, zero, 1f
490 PTR_SUBU a2, a2, a3 # subtract from remaining count
491 move v0, v1 # init v0,v1 so unmodified bytes match
492 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
493 LWHI v1, 0(a1)
494 PTR_ADDU a1, a1, a3
495 bne v0, v1, nomatch
496 PTR_ADDU a0, a0, a3
497 1:
498 and a3, a2, ~3 # compute number of whole words left
499 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
500 PTR_ADDU a3, a3, a0 # compute ending address
501 2:
502 lw v0, 0(a0) # compare words
503 lw v1, 0(a1)
504 PTR_ADDU a0, a0, 4
505 bne v0, v1, nomatch
506 PTR_ADDU a1, a1, 4
507 bne a0, a3, 2b
508 nop
509 b smallcmp # finish remainder
510 nop
511 unalignedcmp:
512 beq a3, zero, 2f
513 PTR_SUBU a2, a2, a3 # subtract from remaining count
514 PTR_ADDU a3, a3, a0 # compute ending address
515 1:
516 lbu v0, 0(a0) # compare bytes until a1 word aligned
517 lbu v1, 0(a1)
518 PTR_ADDU a0, a0, 1
519 bne v0, v1, nomatch
520 PTR_ADDU a1, a1, 1
521 bne a0, a3, 1b
522 nop
523 2:
524 and a3, a2, ~3 # compute number of whole words left
525 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
526 PTR_ADDU a3, a3, a0 # compute ending address
527 3:
528 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
529 LWLO v0, 3(a0)
530 lw v1, 0(a1)
531 PTR_ADDU a0, a0, 4
532 bne v0, v1, nomatch
533 PTR_ADDU a1, a1, 4
534 bne a0, a3, 3b
535 nop
536 smallcmp:
537 ble a2, zero, match
538 PTR_ADDU a3, a2, a0 # compute ending address
539 1:
540 lbu v0, 0(a0)
541 lbu v1, 0(a1)
542 PTR_ADDU a0, a0, 1
543 bne v0, v1, nomatch
544 PTR_ADDU a1, a1, 1
545 bne a0, a3, 1b
546 nop
547 match:
548 j ra
549 move v0, zero
550 nomatch:
551 j ra
552 li v0, 1
553 END(bcmp)
554
555
556 /*
557 * bit = ffs(value)
558 */
559 LEAF(ffs)
560 .set noreorder
561 beq a0, zero, 2f
562 move v0, zero
563 1:
564 and v1, a0, 1 # bit set?
565 addu v0, v0, 1
566 beq v1, zero, 1b # no, continue
567 srl a0, a0, 1
568 2:
569 j ra
570 nop
571 END(ffs)
572
573 /**
574 * void
575 * atomic_set_16(u_int16_t *a, u_int16_t b)
576 * {
577 * *a |= b;
578 * }
579 */
580 LEAF(atomic_set_16)
581 .set noreorder
582 /* NB: Only bit 1 is masked so the ll catches unaligned inputs */
583 andi t0, a0, 2 # get unaligned offset
584 xor a0, a0, t0 # align pointer
585 #if _BYTE_ORDER == BIG_ENDIAN
586 xori t0, t0, 2
587 #endif
588 sll t0, t0, 3 # convert byte offset to bit offset
589 sll a1, a1, t0 # put bits in the right half
590 1:
591 ll t0, 0(a0)
592 or t0, t0, a1
593 sc t0, 0(a0)
594 beq t0, zero, 1b
595 nop
596 j ra
597 nop
598 END(atomic_set_16)
599
600 /**
601 * void
602 * atomic_clear_16(u_int16_t *a, u_int16_t b)
603 * {
604 * *a &= ~b;
605 * }
606 */
607 LEAF(atomic_clear_16)
608 .set noreorder
609 /* NB: Only bit 1 is masked so the ll catches unaligned inputs */
610 andi t0, a0, 2 # get unaligned offset
611 xor a0, a0, t0 # align pointer
612 #if _BYTE_ORDER == BIG_ENDIAN
613 xori t0, t0, 2
614 #endif
615 sll t0, t0, 3 # convert byte offset to bit offset
616 sll a1, a1, t0 # put bits in the right half
617 not a1, a1
618 1:
619 ll t0, 0(a0)
620 and t0, t0, a1
621 sc t0, 0(a0)
622 beq t0, zero, 1b
623 nop
624 j ra
625 nop
626 END(atomic_clear_16)
627
628
629 /**
630 * void
631 * atomic_subtract_16(uint16_t *a, uint16_t b)
632 * {
633 * *a -= b;
634 * }
635 */
636 LEAF(atomic_subtract_16)
637 .set noreorder
638 /* NB: Only bit 1 is masked so the ll catches unaligned inputs */
639 andi t0, a0, 2 # get unaligned offset
640 xor a0, a0, t0 # align pointer
641 #if _BYTE_ORDER == BIG_ENDIAN
642 xori t0, t0, 2 # flip order for big-endian
643 #endif
644 sll t0, t0, 3 # convert byte offset to bit offset
645 sll a1, a1, t0 # put bits in the right half
646 li t2, 0xffff
647 sll t2, t2, t0 # compute mask
648 1:
649 ll t0, 0(a0)
650 subu t1, t0, a1
651 /* Exploit ((t0 & ~t2) | (t1 & t2)) = t0 ^ ((t0 ^ t1) & t2) */
652 xor t1, t0, t1
653 and t1, t1, t2
654 xor t0, t0, t1
655 sc t0, 0(a0)
656 beq t0, zero, 1b
657 nop
658 j ra
659 nop
660 END(atomic_subtract_16)
661
662 /**
663 * void
664 * atomic_add_16(uint16_t *a, uint16_t b)
665 * {
666 * *a += b;
667 * }
668 */
669 LEAF(atomic_add_16)
670 .set noreorder
671 /* NB: Only bit 1 is masked so the ll catches unaligned inputs */
672 andi t0, a0, 2 # get unaligned offset
673 xor a0, a0, t0 # align pointer
674 #if _BYTE_ORDER == BIG_ENDIAN
675 xori t0, t0, 2 # flip order for big-endian
676 #endif
677 sll t0, t0, 3 # convert byte offset to bit offset
678 sll a1, a1, t0 # put bits in the right half
679 li t2, 0xffff
680 sll t2, t2, t0 # compute mask
681 1:
682 ll t0, 0(a0)
683 addu t1, t0, a1
684 /* Exploit ((t0 & ~t2) | (t1 & t2)) = t0 ^ ((t0 ^ t1) & t2) */
685 xor t1, t0, t1
686 and t1, t1, t2
687 xor t0, t0, t1
688 sc t0, 0(a0)
689 beq t0, zero, 1b
690 nop
691 j ra
692 nop
693 END(atomic_add_16)
694
695 /**
696 * void
697 * atomic_add_8(uint8_t *a, uint8_t b)
698 * {
699 * *a += b;
700 * }
701 */
702 LEAF(atomic_add_8)
703 .set noreorder
704 andi t0, a0, 3 # get unaligned offset
705 xor a0, a0, t0 # align pointer
706 #if _BYTE_ORDER == BIG_ENDIAN
707 xori t0, t0, 3 # flip order for big-endian
708 #endif
709 sll t0, t0, 3 # convert byte offset to bit offset
710 sll a1, a1, t0 # put bits in the right quarter
711 li t2, 0xff
712 sll t2, t2, t0 # compute mask
713 1:
714 ll t0, 0(a0)
715 addu t1, t0, a1
716 /* Exploit ((t0 & ~t2) | (t1 & t2)) = t0 ^ ((t0 ^ t1) & t2) */
717 xor t1, t0, t1
718 and t1, t1, t2
719 xor t0, t0, t1
720 sc t0, 0(a0)
721 beq t0, zero, 1b
722 nop
723 j ra
724 nop
725 END(atomic_add_8)
726
727
728 /**
729 * void
730 * atomic_subtract_8(uint8_t *a, uint8_t b)
731 * {
732 * *a += b;
733 * }
734 */
735 LEAF(atomic_subtract_8)
736 .set noreorder
737 andi t0, a0, 3 # get unaligned offset
738 xor a0, a0, t0 # align pointer
739 #if _BYTE_ORDER == BIG_ENDIAN
740 xori t0, t0, 3 # flip order for big-endian
741 #endif
742 sll t0, t0, 3 # convert byte offset to bit offset
743 sll a1, a1, t0 # put bits in the right quarter
744 li t2, 0xff
745 sll t2, t2, t0 # compute mask
746 1:
747 ll t0, 0(a0)
748 subu t1, t0, a1
749 /* Exploit ((t0 & ~t2) | (t1 & t2)) = t0 ^ ((t0 ^ t1) & t2) */
750 xor t1, t0, t1
751 and t1, t1, t2
752 xor t0, t0, t1
753 sc t0, 0(a0)
754 beq t0, zero, 1b
755 nop
756 j ra
757 nop
758 END(atomic_subtract_8)
759
760 .set noreorder # Noreorder is default style!
761
762 #if defined(DDB) || defined(DEBUG)
763
764 LEAF(kdbpeek)
765 PTR_LA v1, ddberr
766 and v0, a0, 3 # unaligned ?
767 GET_CPU_PCPU(t1)
768 PTR_L t1, PC_CURPCB(t1)
769 bne v0, zero, 1f
770 PTR_S v1, U_PCB_ONFAULT(t1)
771
772 lw v0, (a0)
773 jr ra
774 PTR_S zero, U_PCB_ONFAULT(t1)
775
776 1:
777 LWHI v0, 0(a0)
778 LWLO v0, 3(a0)
779 jr ra
780 PTR_S zero, U_PCB_ONFAULT(t1)
781 END(kdbpeek)
782
783 LEAF(kdbpeekd)
784 PTR_LA v1, ddberr
785 and v0, a0, 3 # unaligned ?
786 GET_CPU_PCPU(t1)
787 PTR_L t1, PC_CURPCB(t1)
788 bne v0, zero, 1f
789 PTR_S v1, U_PCB_ONFAULT(t1)
790
791 ld v0, (a0)
792 jr ra
793 PTR_S zero, U_PCB_ONFAULT(t1)
794
795 1:
796 REG_LHI v0, 0(a0)
797 REG_LLO v0, 7(a0)
798 jr ra
799 PTR_S zero, U_PCB_ONFAULT(t1)
800 END(kdbpeekd)
801
802 ddberr:
803 jr ra
804 nop
805
806 #if defined(DDB)
807 LEAF(kdbpoke)
808 PTR_LA v1, ddberr
809 and v0, a0, 3 # unaligned ?
810 GET_CPU_PCPU(t1)
811 PTR_L t1, PC_CURPCB(t1)
812 bne v0, zero, 1f
813 PTR_S v1, U_PCB_ONFAULT(t1)
814
815 sw a1, (a0)
816 jr ra
817 PTR_S zero, U_PCB_ONFAULT(t1)
818
819 1:
820 SWHI a1, 0(a0)
821 SWLO a1, 3(a0)
822 jr ra
823 PTR_S zero, U_PCB_ONFAULT(t1)
824 END(kdbpoke)
825
826 .data
827 .globl esym
828 esym: .word 0
829
830 #endif /* DDB */
831 #endif /* DDB || DEBUG */
832
833 .text
834 LEAF(breakpoint)
835 break MIPS_BREAK_SOVER_VAL
836 jr ra
837 nop
838 END(breakpoint)
839
840 LEAF(setjmp)
841 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
842 REG_S s0, (SZREG * PCB_REG_S0)(a0)
843 REG_S s1, (SZREG * PCB_REG_S1)(a0)
844 REG_S s2, (SZREG * PCB_REG_S2)(a0)
845 REG_S s3, (SZREG * PCB_REG_S3)(a0)
846 REG_S s4, (SZREG * PCB_REG_S4)(a0)
847 REG_S s5, (SZREG * PCB_REG_S5)(a0)
848 REG_S s6, (SZREG * PCB_REG_S6)(a0)
849 REG_S s7, (SZREG * PCB_REG_S7)(a0)
850 REG_S s8, (SZREG * PCB_REG_S8)(a0)
851 REG_S sp, (SZREG * PCB_REG_SP)(a0)
852 REG_S ra, (SZREG * PCB_REG_RA)(a0)
853 REG_S v0, (SZREG * PCB_REG_SR)(a0)
854 jr ra
855 li v0, 0 # setjmp return
856 END(setjmp)
857
858 LEAF(longjmp)
859 REG_L v0, (SZREG * PCB_REG_SR)(a0)
860 REG_L ra, (SZREG * PCB_REG_RA)(a0)
861 REG_L s0, (SZREG * PCB_REG_S0)(a0)
862 REG_L s1, (SZREG * PCB_REG_S1)(a0)
863 REG_L s2, (SZREG * PCB_REG_S2)(a0)
864 REG_L s3, (SZREG * PCB_REG_S3)(a0)
865 REG_L s4, (SZREG * PCB_REG_S4)(a0)
866 REG_L s5, (SZREG * PCB_REG_S5)(a0)
867 REG_L s6, (SZREG * PCB_REG_S6)(a0)
868 REG_L s7, (SZREG * PCB_REG_S7)(a0)
869 REG_L s8, (SZREG * PCB_REG_S8)(a0)
870 REG_L sp, (SZREG * PCB_REG_SP)(a0)
871 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
872 ITLBNOPFIX
873 jr ra
874 li v0, 1 # longjmp return
875 END(longjmp)
Cache object: 3c2e9ecf8bb23b14de273e36b140d3ab
|