1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
2 /*-
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
40 *
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
47 *
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
50 * $FreeBSD: releng/11.1/sys/mips/mips/support.S 256497 2013-10-15 04:45:09Z imp $
51 */
52
53 /*
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
68 * the NetBSD Project.
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
71 *
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85 /*
86 * Contains assembly language support routines.
87 */
88
89 #include "opt_ddb.h"
90 #include <sys/errno.h>
91 #include <machine/asm.h>
92 #include <machine/cpu.h>
93 #include <machine/regnum.h>
94 #include <machine/cpuregs.h>
95 #include <machine/pcb.h>
96
97 #include "assym.s"
98
99 .set noreorder # Noreorder is default style!
100
101 /*
102 * Primitives
103 */
104
105 .text
106
107 /*
108 * See if access to addr with a len type instruction causes a machine check.
109 * len is length of access (1=byte, 2=short, 4=int)
110 *
111 * badaddr(addr, len)
112 * char *addr;
113 * int len;
114 */
115 LEAF(badaddr)
116 PTR_LA v0, baderr
117 GET_CPU_PCPU(v1)
118 PTR_L v1, PC_CURPCB(v1)
119 bne a1, 1, 2f
120 PTR_S v0, U_PCB_ONFAULT(v1)
121 b 5f
122 lbu v0, (a0)
123 2:
124 bne a1, 2, 4f
125 nop
126 b 5f
127 lhu v0, (a0)
128 4:
129 lw v0, (a0)
130 5:
131 PTR_S zero, U_PCB_ONFAULT(v1)
132 j ra
133 move v0, zero # made it w/o errors
134 baderr:
135 j ra
136 li v0, 1 # trap sends us here
137 END(badaddr)
138
139 /*
140 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
141 * Copy a NIL-terminated string, at most maxlen characters long. Return the
142 * number of characters copied (including the NIL) in *lencopied. If the
143 * string is too long, return ENAMETOOLONG; else return 0.
144 */
145 LEAF(copystr)
146 move t0, a2
147 beq a2, zero, 4f
148 1:
149 lbu v0, 0(a0)
150 PTR_SUBU a2, a2, 1
151 beq v0, zero, 2f
152 sb v0, 0(a1) # each byte until NIL
153 PTR_ADDU a0, a0, 1
154 bne a2, zero, 1b # less than maxlen
155 PTR_ADDU a1, a1, 1
156 4:
157 li v0, ENAMETOOLONG # run out of space
158 2:
159 beq a3, zero, 3f # return num. of copied bytes
160 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
161 PTR_S a2, 0(a3)
162 3:
163 j ra # v0 is 0 or ENAMETOOLONG
164 nop
165 END(copystr)
166
167
168 /*
169 * Copy a null terminated string from the user address space into
170 * the kernel address space.
171 *
172 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
173 * caddr_t fromaddr;
174 * caddr_t toaddr;
175 * u_int maxlength;
176 * u_int *lencopied;
177 */
178 NESTED(copyinstr, CALLFRAME_SIZ, ra)
179 PTR_SUBU sp, sp, CALLFRAME_SIZ
180 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
181 PTR_LA v0, copyerr
182 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
183 REG_S ra, CALLFRAME_RA(sp)
184 GET_CPU_PCPU(v1)
185 PTR_L v1, PC_CURPCB(v1)
186 jal _C_LABEL(copystr)
187 PTR_S v0, U_PCB_ONFAULT(v1)
188 REG_L ra, CALLFRAME_RA(sp)
189 GET_CPU_PCPU(v1)
190 PTR_L v1, PC_CURPCB(v1)
191 PTR_S zero, U_PCB_ONFAULT(v1)
192 j ra
193 PTR_ADDU sp, sp, CALLFRAME_SIZ
194 END(copyinstr)
195
196 /*
197 * Copy a null terminated string from the kernel address space into
198 * the user address space.
199 *
200 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
201 * caddr_t fromaddr;
202 * caddr_t toaddr;
203 * u_int maxlength;
204 * u_int *lencopied;
205 */
206 NESTED(copyoutstr, CALLFRAME_SIZ, ra)
207 PTR_SUBU sp, sp, CALLFRAME_SIZ
208 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
209 PTR_LA v0, copyerr
210 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
211 REG_S ra, CALLFRAME_RA(sp)
212 GET_CPU_PCPU(v1)
213 PTR_L v1, PC_CURPCB(v1)
214 jal _C_LABEL(copystr)
215 PTR_S v0, U_PCB_ONFAULT(v1)
216 REG_L ra, CALLFRAME_RA(sp)
217 GET_CPU_PCPU(v1)
218 PTR_L v1, PC_CURPCB(v1)
219 PTR_S zero, U_PCB_ONFAULT(v1)
220 j ra
221 PTR_ADDU sp, sp, CALLFRAME_SIZ
222 END(copyoutstr)
223
224 /*
225 * Copy specified amount of data from user space into the kernel
226 * copyin(from, to, len)
227 * caddr_t *from; (user source address)
228 * caddr_t *to; (kernel destination address)
229 * unsigned len;
230 */
231 NESTED(copyin, CALLFRAME_SIZ, ra)
232 PTR_SUBU sp, sp, CALLFRAME_SIZ
233 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
234 PTR_LA v0, copyerr
235 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
236 REG_S ra, CALLFRAME_RA(sp)
237 GET_CPU_PCPU(v1)
238 PTR_L v1, PC_CURPCB(v1)
239 jal _C_LABEL(bcopy)
240 PTR_S v0, U_PCB_ONFAULT(v1)
241 REG_L ra, CALLFRAME_RA(sp)
242 GET_CPU_PCPU(v1)
243 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
244 PTR_S zero, U_PCB_ONFAULT(v1)
245 PTR_ADDU sp, sp, CALLFRAME_SIZ
246 j ra
247 move v0, zero
248 END(copyin)
249
250 /*
251 * Copy specified amount of data from kernel to the user space
252 * copyout(from, to, len)
253 * caddr_t *from; (kernel source address)
254 * caddr_t *to; (user destination address)
255 * unsigned len;
256 */
257 NESTED(copyout, CALLFRAME_SIZ, ra)
258 PTR_SUBU sp, sp, CALLFRAME_SIZ
259 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
260 PTR_LA v0, copyerr
261 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
262 REG_S ra, CALLFRAME_RA(sp)
263 GET_CPU_PCPU(v1)
264 PTR_L v1, PC_CURPCB(v1)
265 jal _C_LABEL(bcopy)
266 PTR_S v0, U_PCB_ONFAULT(v1)
267 REG_L ra, CALLFRAME_RA(sp)
268 GET_CPU_PCPU(v1)
269 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
270 PTR_S zero, U_PCB_ONFAULT(v1)
271 PTR_ADDU sp, sp, CALLFRAME_SIZ
272 j ra
273 move v0, zero
274 END(copyout)
275
276 LEAF(copyerr)
277 REG_L ra, CALLFRAME_RA(sp)
278 PTR_ADDU sp, sp, CALLFRAME_SIZ
279 j ra
280 li v0, EFAULT # return error
281 END(copyerr)
282
283 /*
284 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
285 * user text space.
286 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
287 * user data space.
288 */
289 #ifdef __mips_n64
290 LEAF(fuword64)
291 XLEAF(fuword)
292 PTR_LA v0, fswberr
293 blt a0, zero, fswberr # make sure address is in user space
294 nop
295 GET_CPU_PCPU(v1)
296 PTR_L v1, PC_CURPCB(v1)
297 PTR_S v0, U_PCB_ONFAULT(v1)
298 ld v0, 0(a0) # fetch word
299 j ra
300 PTR_S zero, U_PCB_ONFAULT(v1)
301 END(fuword64)
302 #endif
303
304 LEAF(fuword32)
305 #ifndef __mips_n64
306 XLEAF(fuword)
307 #endif
308 PTR_LA v0, fswberr
309 blt a0, zero, fswberr # make sure address is in user space
310 nop
311 GET_CPU_PCPU(v1)
312 PTR_L v1, PC_CURPCB(v1)
313 PTR_S v0, U_PCB_ONFAULT(v1)
314 lw v0, 0(a0) # fetch word
315 j ra
316 PTR_S zero, U_PCB_ONFAULT(v1)
317 END(fuword32)
318
319 LEAF(fusword)
320 PTR_LA v0, fswberr
321 blt a0, zero, fswberr # make sure address is in user space
322 nop
323 GET_CPU_PCPU(v1)
324 PTR_L v1, PC_CURPCB(v1)
325 PTR_S v0, U_PCB_ONFAULT(v1)
326 lhu v0, 0(a0) # fetch short
327 j ra
328 PTR_S zero, U_PCB_ONFAULT(v1)
329 END(fusword)
330
331 LEAF(fubyte)
332 PTR_LA v0, fswberr
333 blt a0, zero, fswberr # make sure address is in user space
334 nop
335 GET_CPU_PCPU(v1)
336 PTR_L v1, PC_CURPCB(v1)
337 PTR_S v0, U_PCB_ONFAULT(v1)
338 lbu v0, 0(a0) # fetch byte
339 j ra
340 PTR_S zero, U_PCB_ONFAULT(v1)
341 END(fubyte)
342
343 LEAF(suword32)
344 #ifndef __mips_n64
345 XLEAF(suword)
346 #endif
347 PTR_LA v0, fswberr
348 blt a0, zero, fswberr # make sure address is in user space
349 nop
350 GET_CPU_PCPU(v1)
351 PTR_L v1, PC_CURPCB(v1)
352 PTR_S v0, U_PCB_ONFAULT(v1)
353 sw a1, 0(a0) # store word
354 PTR_S zero, U_PCB_ONFAULT(v1)
355 j ra
356 move v0, zero
357 END(suword32)
358
359 #ifdef __mips_n64
360 LEAF(suword64)
361 XLEAF(suword)
362 PTR_LA v0, fswberr
363 blt a0, zero, fswberr # make sure address is in user space
364 nop
365 GET_CPU_PCPU(v1)
366 PTR_L v1, PC_CURPCB(v1)
367 PTR_S v0, U_PCB_ONFAULT(v1)
368 sd a1, 0(a0) # store word
369 PTR_S zero, U_PCB_ONFAULT(v1)
370 j ra
371 move v0, zero
372 END(suword64)
373 #endif
374
375 /*
376 * casuword(9)
377 * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
378 */
379 /*
380 * casuword32(9)
381 * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
382 * <a2>uint32_t newval)
383 */
384 LEAF(casuword32)
385 #ifndef __mips_n64
386 XLEAF(casuword)
387 #endif
388 PTR_LA v0, fswberr
389 blt a0, zero, fswberr # make sure address is in user space
390 nop
391 GET_CPU_PCPU(v1)
392 PTR_L v1, PC_CURPCB(v1)
393 PTR_S v0, U_PCB_ONFAULT(v1)
394 1:
395 move t0, a2
396 ll v0, 0(a0)
397 bne a1, v0, 2f
398 nop
399 sc t0, 0(a0) # store word
400 beqz t0, 1b
401 nop
402 j 3f
403 nop
404 2:
405 li v0, -1
406 3:
407 PTR_S zero, U_PCB_ONFAULT(v1)
408 jr ra
409 nop
410 END(casuword32)
411
412 #ifdef __mips_n64
413 LEAF(casuword64)
414 XLEAF(casuword)
415 PTR_LA v0, fswberr
416 blt a0, zero, fswberr # make sure address is in user space
417 nop
418 GET_CPU_PCPU(v1)
419 PTR_L v1, PC_CURPCB(v1)
420 PTR_S v0, U_PCB_ONFAULT(v1)
421 1:
422 move t0, a2
423 lld v0, 0(a0)
424 bne a1, v0, 2f
425 nop
426 scd t0, 0(a0) # store double word
427 beqz t0, 1b
428 nop
429 j 3f
430 nop
431 2:
432 li v0, -1
433 3:
434 PTR_S zero, U_PCB_ONFAULT(v1)
435 jr ra
436 nop
437 END(casuword64)
438 #endif
439
440 /*
441 * Will have to flush the instruction cache if byte merging is done in hardware.
442 */
443 LEAF(susword)
444 PTR_LA v0, fswberr
445 blt a0, zero, fswberr # make sure address is in user space
446 nop
447 GET_CPU_PCPU(v1)
448 PTR_L v1, PC_CURPCB(v1)
449 PTR_S v0, U_PCB_ONFAULT(v1)
450 sh a1, 0(a0) # store short
451 PTR_S zero, U_PCB_ONFAULT(v1)
452 j ra
453 move v0, zero
454 END(susword)
455
456 LEAF(subyte)
457 PTR_LA v0, fswberr
458 blt a0, zero, fswberr # make sure address is in user space
459 nop
460 GET_CPU_PCPU(v1)
461 PTR_L v1, PC_CURPCB(v1)
462 PTR_S v0, U_PCB_ONFAULT(v1)
463 sb a1, 0(a0) # store byte
464 PTR_S zero, U_PCB_ONFAULT(v1)
465 j ra
466 move v0, zero
467 END(subyte)
468
469 LEAF(fswberr)
470 j ra
471 li v0, -1
472 END(fswberr)
473
474 /*
475 * fuswintr and suswintr are just like fusword and susword except that if
476 * the page is not in memory or would cause a trap, then we return an error.
477 * The important thing is to prevent sleep() and switch().
478 */
479 LEAF(fuswintr)
480 PTR_LA v0, fswintrberr
481 blt a0, zero, fswintrberr # make sure address is in user space
482 nop
483 GET_CPU_PCPU(v1)
484 PTR_L v1, PC_CURPCB(v1)
485 PTR_S v0, U_PCB_ONFAULT(v1)
486 lhu v0, 0(a0) # fetch short
487 j ra
488 PTR_S zero, U_PCB_ONFAULT(v1)
489 END(fuswintr)
490
491 LEAF(suswintr)
492 PTR_LA v0, fswintrberr
493 blt a0, zero, fswintrberr # make sure address is in user space
494 nop
495 GET_CPU_PCPU(v1)
496 PTR_L v1, PC_CURPCB(v1)
497 PTR_S v0, U_PCB_ONFAULT(v1)
498 sh a1, 0(a0) # store short
499 PTR_S zero, U_PCB_ONFAULT(v1)
500 j ra
501 move v0, zero
502 END(suswintr)
503
504 LEAF(fswintrberr)
505 j ra
506 li v0, -1
507 END(fswintrberr)
508
509 /*
510 * memset(void *s1, int c, int len)
511 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
512 */
513 LEAF(memset)
514 .set noreorder
515 blt a2, 12, memsetsmallclr # small amount to clear?
516 move v0, a0 # save s1 for result
517
518 sll t1, a1, 8 # compute c << 8 in t1
519 or t1, t1, a1 # compute c << 8 | c in 11
520 sll t2, t1, 16 # shift that left 16
521 or t1, t2, t1 # or together
522
523 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
524 and t0, t0, 3
525 beq t0, zero, 1f # skip if word aligned
526 PTR_SUBU a2, a2, t0 # subtract from remaining count
527 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
528 PTR_ADDU a0, a0, t0
529 1:
530 and v1, a2, 3 # compute number of whole words left
531 PTR_SUBU t0, a2, v1
532 PTR_SUBU a2, a2, t0
533 PTR_ADDU t0, t0, a0 # compute ending address
534 2:
535 PTR_ADDU a0, a0, 4 # clear words
536 bne a0, t0, 2b # unrolling loop does not help
537 sw t1, -4(a0) # since we are limited by memory speed
538
539 memsetsmallclr:
540 ble a2, zero, 2f
541 PTR_ADDU t0, a2, a0 # compute ending address
542 1:
543 PTR_ADDU a0, a0, 1 # clear bytes
544 bne a0, t0, 1b
545 sb a1, -1(a0)
546 2:
547 j ra
548 nop
549 .set reorder
550 END(memset)
551
552 /*
553 * bzero(s1, n)
554 */
555 LEAF(bzero)
556 XLEAF(blkclr)
557 .set noreorder
558 blt a1, 12, smallclr # small amount to clear?
559 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
560 and a3, a3, 3
561 beq a3, zero, 1f # skip if word aligned
562 PTR_SUBU a1, a1, a3 # subtract from remaining count
563 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
564 PTR_ADDU a0, a0, a3
565 1:
566 and v0, a1, 3 # compute number of words left
567 PTR_SUBU a3, a1, v0
568 move a1, v0
569 PTR_ADDU a3, a3, a0 # compute ending address
570 2:
571 PTR_ADDU a0, a0, 4 # clear words
572 bne a0, a3, 2b # unrolling loop does not help
573 sw zero, -4(a0) # since we are limited by memory speed
574 smallclr:
575 ble a1, zero, 2f
576 PTR_ADDU a3, a1, a0 # compute ending address
577 1:
578 PTR_ADDU a0, a0, 1 # clear bytes
579 bne a0, a3, 1b
580 sb zero, -1(a0)
581 2:
582 j ra
583 nop
584 END(bzero)
585
586
587 /*
588 * bcmp(s1, s2, n)
589 */
590 LEAF(bcmp)
591 .set noreorder
592 blt a2, 16, smallcmp # is it worth any trouble?
593 xor v0, a0, a1 # compare low two bits of addresses
594 and v0, v0, 3
595 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
596 bne v0, zero, unalignedcmp # not possible to align addresses
597 and a3, a3, 3
598
599 beq a3, zero, 1f
600 PTR_SUBU a2, a2, a3 # subtract from remaining count
601 move v0, v1 # init v0,v1 so unmodified bytes match
602 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
603 LWHI v1, 0(a1)
604 PTR_ADDU a1, a1, a3
605 bne v0, v1, nomatch
606 PTR_ADDU a0, a0, a3
607 1:
608 and a3, a2, ~3 # compute number of whole words left
609 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
610 PTR_ADDU a3, a3, a0 # compute ending address
611 2:
612 lw v0, 0(a0) # compare words
613 lw v1, 0(a1)
614 PTR_ADDU a0, a0, 4
615 bne v0, v1, nomatch
616 PTR_ADDU a1, a1, 4
617 bne a0, a3, 2b
618 nop
619 b smallcmp # finish remainder
620 nop
621 unalignedcmp:
622 beq a3, zero, 2f
623 PTR_SUBU a2, a2, a3 # subtract from remaining count
624 PTR_ADDU a3, a3, a0 # compute ending address
625 1:
626 lbu v0, 0(a0) # compare bytes until a1 word aligned
627 lbu v1, 0(a1)
628 PTR_ADDU a0, a0, 1
629 bne v0, v1, nomatch
630 PTR_ADDU a1, a1, 1
631 bne a0, a3, 1b
632 nop
633 2:
634 and a3, a2, ~3 # compute number of whole words left
635 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
636 PTR_ADDU a3, a3, a0 # compute ending address
637 3:
638 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
639 LWLO v0, 3(a0)
640 lw v1, 0(a1)
641 PTR_ADDU a0, a0, 4
642 bne v0, v1, nomatch
643 PTR_ADDU a1, a1, 4
644 bne a0, a3, 3b
645 nop
646 smallcmp:
647 ble a2, zero, match
648 PTR_ADDU a3, a2, a0 # compute ending address
649 1:
650 lbu v0, 0(a0)
651 lbu v1, 0(a1)
652 PTR_ADDU a0, a0, 1
653 bne v0, v1, nomatch
654 PTR_ADDU a1, a1, 1
655 bne a0, a3, 1b
656 nop
657 match:
658 j ra
659 move v0, zero
660 nomatch:
661 j ra
662 li v0, 1
663 END(bcmp)
664
665
666 /*
667 * bit = ffs(value)
668 */
669 LEAF(ffs)
670 .set noreorder
671 beq a0, zero, 2f
672 move v0, zero
673 1:
674 and v1, a0, 1 # bit set?
675 addu v0, v0, 1
676 beq v1, zero, 1b # no, continue
677 srl a0, a0, 1
678 2:
679 j ra
680 nop
681 END(ffs)
682
683 /**
684 * void
685 * atomic_set_16(u_int16_t *a, u_int16_t b)
686 * {
687 * *a |= b;
688 * }
689 */
690 LEAF(atomic_set_16)
691 .set noreorder
692 srl a0, a0, 2 # round down address to be 32-bit aligned
693 sll a0, a0, 2
694 andi a1, a1, 0xffff
695 1:
696 ll t0, 0(a0)
697 or t0, t0, a1
698 sc t0, 0(a0)
699 beq t0, zero, 1b
700 nop
701 j ra
702 nop
703 END(atomic_set_16)
704
705 /**
706 * void
707 * atomic_clear_16(u_int16_t *a, u_int16_t b)
708 * {
709 * *a &= ~b;
710 * }
711 */
712 LEAF(atomic_clear_16)
713 .set noreorder
714 srl a0, a0, 2 # round down address to be 32-bit aligned
715 sll a0, a0, 2
716 nor a1, zero, a1
717 1:
718 ll t0, 0(a0)
719 move t1, t0
720 andi t1, t1, 0xffff # t1 has the original lower 16 bits
721 and t1, t1, a1 # t1 has the new lower 16 bits
722 srl t0, t0, 16 # preserve original top 16 bits
723 sll t0, t0, 16
724 or t0, t0, t1
725 sc t0, 0(a0)
726 beq t0, zero, 1b
727 nop
728 j ra
729 nop
730 END(atomic_clear_16)
731
732
733 /**
734 * void
735 * atomic_subtract_16(uint16_t *a, uint16_t b)
736 * {
737 * *a -= b;
738 * }
739 */
740 LEAF(atomic_subtract_16)
741 .set noreorder
742 srl a0, a0, 2 # round down address to be 32-bit aligned
743 sll a0, a0, 2
744 1:
745 ll t0, 0(a0)
746 move t1, t0
747 andi t1, t1, 0xffff # t1 has the original lower 16 bits
748 subu t1, t1, a1
749 andi t1, t1, 0xffff # t1 has the new lower 16 bits
750 srl t0, t0, 16 # preserve original top 16 bits
751 sll t0, t0, 16
752 or t0, t0, t1
753 sc t0, 0(a0)
754 beq t0, zero, 1b
755 nop
756 j ra
757 nop
758 END(atomic_subtract_16)
759
760 /**
761 * void
762 * atomic_add_16(uint16_t *a, uint16_t b)
763 * {
764 * *a += b;
765 * }
766 */
767 LEAF(atomic_add_16)
768 .set noreorder
769 srl a0, a0, 2 # round down address to be 32-bit aligned
770 sll a0, a0, 2
771 1:
772 ll t0, 0(a0)
773 move t1, t0
774 andi t1, t1, 0xffff # t1 has the original lower 16 bits
775 addu t1, t1, a1
776 andi t1, t1, 0xffff # t1 has the new lower 16 bits
777 srl t0, t0, 16 # preserve original top 16 bits
778 sll t0, t0, 16
779 or t0, t0, t1
780 sc t0, 0(a0)
781 beq t0, zero, 1b
782 nop
783 j ra
784 nop
785 END(atomic_add_16)
786
787 /**
788 * void
789 * atomic_add_8(uint8_t *a, uint8_t b)
790 * {
791 * *a += b;
792 * }
793 */
794 LEAF(atomic_add_8)
795 .set noreorder
796 srl a0, a0, 2 # round down address to be 32-bit aligned
797 sll a0, a0, 2
798 1:
799 ll t0, 0(a0)
800 move t1, t0
801 andi t1, t1, 0xff # t1 has the original lower 8 bits
802 addu t1, t1, a1
803 andi t1, t1, 0xff # t1 has the new lower 8 bits
804 srl t0, t0, 8 # preserve original top 24 bits
805 sll t0, t0, 8
806 or t0, t0, t1
807 sc t0, 0(a0)
808 beq t0, zero, 1b
809 nop
810 j ra
811 nop
812 END(atomic_add_8)
813
814
815 /**
816 * void
817 * atomic_subtract_8(uint8_t *a, uint8_t b)
818 * {
819 * *a += b;
820 * }
821 */
822 LEAF(atomic_subtract_8)
823 .set noreorder
824 srl a0, a0, 2 # round down address to be 32-bit aligned
825 sll a0, a0, 2
826 1:
827 ll t0, 0(a0)
828 move t1, t0
829 andi t1, t1, 0xff # t1 has the original lower 8 bits
830 subu t1, t1, a1
831 andi t1, t1, 0xff # t1 has the new lower 8 bits
832 srl t0, t0, 8 # preserve original top 24 bits
833 sll t0, t0, 8
834 or t0, t0, t1
835 sc t0, 0(a0)
836 beq t0, zero, 1b
837 nop
838 j ra
839 nop
840 END(atomic_subtract_8)
841
842 /*
843 * atomic 64-bit register read/write assembly language support routines.
844 */
845
846 .set noreorder # Noreorder is default style!
847
848 #if !defined(__mips_n64) && !defined(__mips_n32)
849 /*
850 * I don't know if these routines have the right number of
851 * NOPs in it for all processors. XXX
852 *
853 * Maybe it would be better to just leave this undefined in that case.
854 *
855 * XXX These routines are not safe in the case of a TLB miss on a1 or
856 * a0 unless the trapframe is 64-bit, which it just isn't with O32.
857 * If we take any exception, not just an interrupt, the upper
858 * 32-bits will be clobbered. Use only N32 and N64 kernels if you
859 * want to use 64-bit registers while interrupts are enabled or
860 * with memory operations. Since this isn't even using load-linked
861 * and store-conditional, perhaps it should just use two registers
862 * instead, as is right and good with the O32 ABI.
863 */
864 LEAF(atomic_store_64)
865 mfc0 t1, MIPS_COP_0_STATUS
866 and t2, t1, ~MIPS_SR_INT_IE
867 mtc0 t2, MIPS_COP_0_STATUS
868 nop
869 nop
870 nop
871 nop
872 ld t0, (a1)
873 nop
874 nop
875 sd t0, (a0)
876 nop
877 nop
878 mtc0 t1,MIPS_COP_0_STATUS
879 nop
880 nop
881 nop
882 nop
883 j ra
884 nop
885 END(atomic_store_64)
886
887 LEAF(atomic_load_64)
888 mfc0 t1, MIPS_COP_0_STATUS
889 and t2, t1, ~MIPS_SR_INT_IE
890 mtc0 t2, MIPS_COP_0_STATUS
891 nop
892 nop
893 nop
894 nop
895 ld t0, (a0)
896 nop
897 nop
898 sd t0, (a1)
899 nop
900 nop
901 mtc0 t1,MIPS_COP_0_STATUS
902 nop
903 nop
904 nop
905 nop
906 j ra
907 nop
908 END(atomic_load_64)
909 #endif
910
911 #if defined(DDB) || defined(DEBUG)
912
913 LEAF(kdbpeek)
914 PTR_LA v1, ddberr
915 and v0, a0, 3 # unaligned ?
916 GET_CPU_PCPU(t1)
917 PTR_L t1, PC_CURPCB(t1)
918 bne v0, zero, 1f
919 PTR_S v1, U_PCB_ONFAULT(t1)
920
921 lw v0, (a0)
922 jr ra
923 PTR_S zero, U_PCB_ONFAULT(t1)
924
925 1:
926 LWHI v0, 0(a0)
927 LWLO v0, 3(a0)
928 jr ra
929 PTR_S zero, U_PCB_ONFAULT(t1)
930 END(kdbpeek)
931
932 LEAF(kdbpeekd)
933 PTR_LA v1, ddberr
934 and v0, a0, 3 # unaligned ?
935 GET_CPU_PCPU(t1)
936 PTR_L t1, PC_CURPCB(t1)
937 bne v0, zero, 1f
938 PTR_S v1, U_PCB_ONFAULT(t1)
939
940 ld v0, (a0)
941 jr ra
942 PTR_S zero, U_PCB_ONFAULT(t1)
943
944 1:
945 REG_LHI v0, 0(a0)
946 REG_LLO v0, 7(a0)
947 jr ra
948 PTR_S zero, U_PCB_ONFAULT(t1)
949 END(kdbpeekd)
950
951 ddberr:
952 jr ra
953 nop
954
955 #if defined(DDB)
956 LEAF(kdbpoke)
957 PTR_LA v1, ddberr
958 and v0, a0, 3 # unaligned ?
959 GET_CPU_PCPU(t1)
960 PTR_L t1, PC_CURPCB(t1)
961 bne v0, zero, 1f
962 PTR_S v1, U_PCB_ONFAULT(t1)
963
964 sw a1, (a0)
965 jr ra
966 PTR_S zero, U_PCB_ONFAULT(t1)
967
968 1:
969 SWHI a1, 0(a0)
970 SWLO a1, 3(a0)
971 jr ra
972 PTR_S zero, U_PCB_ONFAULT(t1)
973 END(kdbpoke)
974
975 .data
976 .globl esym
977 esym: .word 0
978
979 #endif /* DDB */
980 #endif /* DDB || DEBUG */
981
982 .text
983 LEAF(breakpoint)
984 break MIPS_BREAK_SOVER_VAL
985 jr ra
986 nop
987 END(breakpoint)
988
989 LEAF(setjmp)
990 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
991 REG_S s0, (SZREG * PCB_REG_S0)(a0)
992 REG_S s1, (SZREG * PCB_REG_S1)(a0)
993 REG_S s2, (SZREG * PCB_REG_S2)(a0)
994 REG_S s3, (SZREG * PCB_REG_S3)(a0)
995 REG_S s4, (SZREG * PCB_REG_S4)(a0)
996 REG_S s5, (SZREG * PCB_REG_S5)(a0)
997 REG_S s6, (SZREG * PCB_REG_S6)(a0)
998 REG_S s7, (SZREG * PCB_REG_S7)(a0)
999 REG_S s8, (SZREG * PCB_REG_S8)(a0)
1000 REG_S sp, (SZREG * PCB_REG_SP)(a0)
1001 REG_S ra, (SZREG * PCB_REG_RA)(a0)
1002 REG_S v0, (SZREG * PCB_REG_SR)(a0)
1003 jr ra
1004 li v0, 0 # setjmp return
1005 END(setjmp)
1006
1007 LEAF(longjmp)
1008 REG_L v0, (SZREG * PCB_REG_SR)(a0)
1009 REG_L ra, (SZREG * PCB_REG_RA)(a0)
1010 REG_L s0, (SZREG * PCB_REG_S0)(a0)
1011 REG_L s1, (SZREG * PCB_REG_S1)(a0)
1012 REG_L s2, (SZREG * PCB_REG_S2)(a0)
1013 REG_L s3, (SZREG * PCB_REG_S3)(a0)
1014 REG_L s4, (SZREG * PCB_REG_S4)(a0)
1015 REG_L s5, (SZREG * PCB_REG_S5)(a0)
1016 REG_L s6, (SZREG * PCB_REG_S6)(a0)
1017 REG_L s7, (SZREG * PCB_REG_S7)(a0)
1018 REG_L s8, (SZREG * PCB_REG_S8)(a0)
1019 REG_L sp, (SZREG * PCB_REG_SP)(a0)
1020 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1021 ITLBNOPFIX
1022 jr ra
1023 li v0, 1 # longjmp return
1024 END(longjmp)
1025
1026 LEAF(mips3_ld)
1027 .set push
1028 .set noreorder
1029 .set mips64
1030 #if defined(__mips_o32)
1031 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1032 and t1, t0, ~(MIPS_SR_INT_IE)
1033 mtc0 t1, MIPS_COP_0_STATUS
1034 COP0_SYNC
1035 nop
1036 nop
1037 nop
1038
1039 ld v0, 0(a0)
1040 #if _BYTE_ORDER == _BIG_ENDIAN
1041 dsll v1, v0, 32
1042 dsra v1, v1, 32 # low word in v1
1043 dsra v0, v0, 32 # high word in v0
1044 #else
1045 dsra v1, v0, 32 # high word in v1
1046 dsll v0, v0, 32
1047 dsra v0, v0, 32 # low word in v0
1048 #endif
1049
1050 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1051 COP0_SYNC
1052 nop
1053 #else /* !__mips_o32 */
1054 ld v0, 0(a0)
1055 #endif /* !__mips_o32 */
1056
1057 jr ra
1058 nop
1059 .set pop
1060 END(mips3_ld)
1061
1062 LEAF(mips3_sd)
1063 .set push
1064 .set mips64
1065 .set noreorder
1066 #if defined(__mips_o32)
1067 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1068 and t1, t0, ~(MIPS_SR_INT_IE)
1069 mtc0 t1, MIPS_COP_0_STATUS
1070 COP0_SYNC
1071 nop
1072 nop
1073 nop
1074
1075 # NOTE: a1 is padding!
1076
1077 #if _BYTE_ORDER == _BIG_ENDIAN
1078 dsll a2, a2, 32 # high word in a2
1079 dsll a3, a3, 32 # low word in a3
1080 dsrl a3, a3, 32
1081 #else
1082 dsll a2, a2, 32 # low word in a2
1083 dsrl a2, a2, 32
1084 dsll a3, a3, 32 # high word in a3
1085 #endif
1086 or a1, a2, a3
1087 sd a1, 0(a0)
1088
1089 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1090 COP0_SYNC
1091 nop
1092 #else /* !__mips_o32 */
1093 sd a1, 0(a0)
1094 #endif /* !__mips_o32 */
1095
1096 jr ra
1097 nop
1098 .set pop
1099 END(mips3_sd)
Cache object: c7bba5acbba9a68dfa03a9a6682a2318
|