1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32
33 #include "opt_ddb.h"
34
35 #include <machine/asmacros.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38
39 #include "assym.s"
40
41 ALIGN_DATA
42 .globl intrcnt, eintrcnt
43 intrcnt:
44 .space INTRCNT_COUNT * 8
45 eintrcnt:
46
47 .globl intrnames, eintrnames
48 intrnames:
49 .space INTRCNT_COUNT * (MAXCOMLEN + 1)
50 eintrnames:
51
52 .text
53
54 /*
55 * bcopy family
56 * void bzero(void *buf, u_int len)
57 */
58
59 /* done */
60 ENTRY(bzero)
61 movq %rsi,%rcx
62 xorq %rax,%rax
63 shrq $3,%rcx
64 cld
65 rep
66 stosq
67 movq %rsi,%rcx
68 andq $7,%rcx
69 rep
70 stosb
71 ret
72
73 /* Address: %rdi */
74 ENTRY(pagezero)
75 movq $-PAGE_SIZE,%rdx
76 subq %rdx,%rdi
77 xorq %rax,%rax
78 1:
79 movnti %rax,(%rdi,%rdx)
80 movnti %rax,8(%rdi,%rdx)
81 movnti %rax,16(%rdi,%rdx)
82 movnti %rax,24(%rdi,%rdx)
83 addq $32,%rdx
84 jne 1b
85 sfence
86 ret
87
88 ENTRY(bcmp)
89 movq %rdx,%rcx
90 shrq $3,%rcx
91 cld /* compare forwards */
92 repe
93 cmpsq
94 jne 1f
95
96 movq %rdx,%rcx
97 andq $7,%rcx
98 repe
99 cmpsb
100 1:
101 setne %al
102 movsbl %al,%eax
103 ret
104
105 /*
106 * bcopy(src, dst, cnt)
107 * rdi, rsi, rdx
108 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
109 */
110 ENTRY(bcopy)
111 xchgq %rsi,%rdi
112 movq %rdx,%rcx
113
114 movq %rdi,%rax
115 subq %rsi,%rax
116 cmpq %rcx,%rax /* overlapping && src < dst? */
117 jb 1f
118
119 shrq $3,%rcx /* copy by 64-bit words */
120 cld /* nope, copy forwards */
121 rep
122 movsq
123 movq %rdx,%rcx
124 andq $7,%rcx /* any bytes left? */
125 rep
126 movsb
127 ret
128
129 /* ALIGN_TEXT */
130 1:
131 addq %rcx,%rdi /* copy backwards */
132 addq %rcx,%rsi
133 decq %rdi
134 decq %rsi
135 andq $7,%rcx /* any fractional bytes? */
136 std
137 rep
138 movsb
139 movq %rdx,%rcx /* copy remainder by 32-bit words */
140 shrq $3,%rcx
141 subq $7,%rsi
142 subq $7,%rdi
143 rep
144 movsq
145 cld
146 ret
147
148 /*
149 * Note: memcpy does not support overlapping copies
150 */
151 ENTRY(memcpy)
152 movq %rdx,%rcx
153 shrq $3,%rcx /* copy by 64-bit words */
154 cld /* copy forwards */
155 rep
156 movsq
157 movq %rdx,%rcx
158 andq $7,%rcx /* any bytes left? */
159 rep
160 movsb
161 ret
162
163 /*
164 * pagecopy(%rdi=from, %rsi=to)
165 */
166 ENTRY(pagecopy)
167 movq $-PAGE_SIZE,%rax
168 movq %rax,%rdx
169 subq %rax,%rdi
170 subq %rax,%rsi
171 1:
172 prefetchnta (%rdi,%rax)
173 addq $64,%rax
174 jne 1b
175 2:
176 movq (%rdi,%rdx),%rax
177 movnti %rax,(%rsi,%rdx)
178 movq 8(%rdi,%rdx),%rax
179 movnti %rax,8(%rsi,%rdx)
180 movq 16(%rdi,%rdx),%rax
181 movnti %rax,16(%rsi,%rdx)
182 movq 24(%rdi,%rdx),%rax
183 movnti %rax,24(%rsi,%rdx)
184 addq $32,%rdx
185 jne 2b
186 sfence
187 ret
188
189 /* fillw(pat, base, cnt) */
190 /* %rdi,%rsi, %rdx */
191 ENTRY(fillw)
192 movq %rdi,%rax
193 movq %rsi,%rdi
194 movq %rdx,%rcx
195 cld
196 rep
197 stosw
198 ret
199
200 /*****************************************************************************/
201 /* copyout and fubyte family */
202 /*****************************************************************************/
203 /*
204 * Access user memory from inside the kernel. These routines should be
205 * the only places that do this.
206 *
207 * These routines set curpcb->onfault for the time they execute. When a
208 * protection violation occurs inside the functions, the trap handler
209 * returns to *curpcb->onfault instead of the function.
210 */
211
212 /*
213 * copyout(from_kernel, to_user, len) - MP SAFE
214 * %rdi, %rsi, %rdx
215 */
216 ENTRY(copyout)
217 movq PCPU(CURPCB),%rax
218 movq $copyout_fault,PCB_ONFAULT(%rax)
219 testq %rdx,%rdx /* anything to do? */
220 jz done_copyout
221
222 /*
223 * Check explicitly for non-user addresses. If 486 write protection
224 * is being used, this check is essential because we are in kernel
225 * mode so the h/w does not provide any protection against writing
226 * kernel addresses.
227 */
228
229 /*
230 * First, prevent address wrapping.
231 */
232 movq %rsi,%rax
233 addq %rdx,%rax
234 jc copyout_fault
235 /*
236 * XXX STOP USING VM_MAXUSER_ADDRESS.
237 * It is an end address, not a max, so every time it is used correctly it
238 * looks like there is an off by one error, and of course it caused an off
239 * by one error in several places.
240 */
241 movq $VM_MAXUSER_ADDRESS,%rcx
242 cmpq %rcx,%rax
243 ja copyout_fault
244
245 xchgq %rdi, %rsi
246 /* bcopy(%rsi, %rdi, %rdx) */
247 movq %rdx,%rcx
248
249 shrq $3,%rcx
250 cld
251 rep
252 movsq
253 movb %dl,%cl
254 andb $7,%cl
255 rep
256 movsb
257
258 done_copyout:
259 xorq %rax,%rax
260 movq PCPU(CURPCB),%rdx
261 movq %rax,PCB_ONFAULT(%rdx)
262 ret
263
264 ALIGN_TEXT
265 copyout_fault:
266 movq PCPU(CURPCB),%rdx
267 movq $0,PCB_ONFAULT(%rdx)
268 movq $EFAULT,%rax
269 ret
270
271 /*
272 * copyin(from_user, to_kernel, len) - MP SAFE
273 * %rdi, %rsi, %rdx
274 */
275 ENTRY(copyin)
276 movq PCPU(CURPCB),%rax
277 movq $copyin_fault,PCB_ONFAULT(%rax)
278 testq %rdx,%rdx /* anything to do? */
279 jz done_copyin
280
281 /*
282 * make sure address is valid
283 */
284 movq %rdi,%rax
285 addq %rdx,%rax
286 jc copyin_fault
287 movq $VM_MAXUSER_ADDRESS,%rcx
288 cmpq %rcx,%rax
289 ja copyin_fault
290
291 xchgq %rdi, %rsi
292 movq %rdx, %rcx
293 movb %cl,%al
294 shrq $3,%rcx /* copy longword-wise */
295 cld
296 rep
297 movsq
298 movb %al,%cl
299 andb $7,%cl /* copy remaining bytes */
300 rep
301 movsb
302
303 done_copyin:
304 xorq %rax,%rax
305 movq PCPU(CURPCB),%rdx
306 movq %rax,PCB_ONFAULT(%rdx)
307 ret
308
309 ALIGN_TEXT
310 copyin_fault:
311 movq PCPU(CURPCB),%rdx
312 movq $0,PCB_ONFAULT(%rdx)
313 movq $EFAULT,%rax
314 ret
315
316 /*
317 * casuword32. Compare and set user integer. Returns -1 or the current value.
318 * dst = %rdi, old = %rsi, new = %rdx
319 */
320 ENTRY(casuword32)
321 movq PCPU(CURPCB),%rcx
322 movq $fusufault,PCB_ONFAULT(%rcx)
323
324 movq $VM_MAXUSER_ADDRESS-4,%rax
325 cmpq %rax,%rdi /* verify address is valid */
326 ja fusufault
327
328 movl %esi,%eax /* old */
329 #ifdef SMP
330 lock
331 #endif
332 cmpxchgl %edx,(%rdi) /* new = %edx */
333
334 /*
335 * The old value is in %eax. If the store succeeded it will be the
336 * value we expected (old) from before the store, otherwise it will
337 * be the current value.
338 */
339
340 movq PCPU(CURPCB),%rcx
341 movq $0,PCB_ONFAULT(%rcx)
342 ret
343
344 /*
345 * casuptr. Compare and set user pointer. Returns -1 or the current value.
346 * dst = %rdi, old = %rsi, new = %rdx
347 */
348 ENTRY(casuptr)
349 movq PCPU(CURPCB),%rcx
350 movq $fusufault,PCB_ONFAULT(%rcx)
351
352 movq $VM_MAXUSER_ADDRESS-4,%rax
353 cmpq %rax,%rdi /* verify address is valid */
354 ja fusufault
355
356 movq %rsi, %rax /* old */
357 #ifdef SMP
358 lock
359 #endif
360 cmpxchgq %rdx, (%rdi) /* new = %rdx */
361
362 /*
363 * The old value is in %eax. If the store succeeded it will be the
364 * value we expected (old) from before the store, otherwise it will
365 * be the current value.
366 */
367
368 movq PCPU(CURPCB),%rcx
369 movq $fusufault,PCB_ONFAULT(%rcx)
370 movq $0,PCB_ONFAULT(%rcx)
371 ret
372
373 /*
374 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
375 * byte from user memory. All these functions are MPSAFE.
376 * addr = %rdi
377 */
378
379 ALTENTRY(fuword64)
380 ENTRY(fuword)
381 movq PCPU(CURPCB),%rcx
382 movq $fusufault,PCB_ONFAULT(%rcx)
383
384 movq $VM_MAXUSER_ADDRESS-8,%rax
385 cmpq %rax,%rdi /* verify address is valid */
386 ja fusufault
387
388 movq (%rdi),%rax
389 movq $0,PCB_ONFAULT(%rcx)
390 ret
391
392 ENTRY(fuword32)
393 movq PCPU(CURPCB),%rcx
394 movq $fusufault,PCB_ONFAULT(%rcx)
395
396 movq $VM_MAXUSER_ADDRESS-4,%rax
397 cmpq %rax,%rdi /* verify address is valid */
398 ja fusufault
399
400 movl (%rdi),%eax
401 movq $0,PCB_ONFAULT(%rcx)
402 ret
403
404 /*
405 * fuswintr() and suswintr() are specialized variants of fuword16() and
406 * suword16(), respectively. They are called from the profiling code,
407 * potentially at interrupt time. If they fail, that's okay; good things
408 * will happen later. They always fail for now, until the trap code is
409 * able to deal with this.
410 */
411 ALTENTRY(suswintr)
412 ENTRY(fuswintr)
413 movq $-1,%rax
414 ret
415
416 ENTRY(fuword16)
417 movq PCPU(CURPCB),%rcx
418 movq $fusufault,PCB_ONFAULT(%rcx)
419
420 movq $VM_MAXUSER_ADDRESS-2,%rax
421 cmpq %rax,%rdi
422 ja fusufault
423
424 movzwl (%rdi),%eax
425 movq $0,PCB_ONFAULT(%rcx)
426 ret
427
428 ENTRY(fubyte)
429 movq PCPU(CURPCB),%rcx
430 movq $fusufault,PCB_ONFAULT(%rcx)
431
432 movq $VM_MAXUSER_ADDRESS-1,%rax
433 cmpq %rax,%rdi
434 ja fusufault
435
436 movzbl (%rdi),%eax
437 movq $0,PCB_ONFAULT(%rcx)
438 ret
439
440 ALIGN_TEXT
441 fusufault:
442 movq PCPU(CURPCB),%rcx
443 xorq %rax,%rax
444 movq %rax,PCB_ONFAULT(%rcx)
445 decq %rax
446 ret
447
448 /*
449 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
450 * user memory. All these functions are MPSAFE.
451 * addr = %rdi, value = %rsi
452 */
453 ALTENTRY(suword64)
454 ENTRY(suword)
455 movq PCPU(CURPCB),%rcx
456 movq $fusufault,PCB_ONFAULT(%rcx)
457
458 movq $VM_MAXUSER_ADDRESS-8,%rax
459 cmpq %rax,%rdi /* verify address validity */
460 ja fusufault
461
462 movq %rsi,(%rdi)
463 xorq %rax,%rax
464 movq PCPU(CURPCB),%rcx
465 movq %rax,PCB_ONFAULT(%rcx)
466 ret
467
468 ENTRY(suword32)
469 movq PCPU(CURPCB),%rcx
470 movq $fusufault,PCB_ONFAULT(%rcx)
471
472 movq $VM_MAXUSER_ADDRESS-4,%rax
473 cmpq %rax,%rdi /* verify address validity */
474 ja fusufault
475
476 movl %esi,(%rdi)
477 xorq %rax,%rax
478 movq PCPU(CURPCB),%rcx
479 movq %rax,PCB_ONFAULT(%rcx)
480 ret
481
482 ENTRY(suword16)
483 movq PCPU(CURPCB),%rcx
484 movq $fusufault,PCB_ONFAULT(%rcx)
485
486 movq $VM_MAXUSER_ADDRESS-2,%rax
487 cmpq %rax,%rdi /* verify address validity */
488 ja fusufault
489
490 movw %si,(%rdi)
491 xorq %rax,%rax
492 movq PCPU(CURPCB),%rcx /* restore trashed register */
493 movq %rax,PCB_ONFAULT(%rcx)
494 ret
495
496 ENTRY(subyte)
497 movq PCPU(CURPCB),%rcx
498 movq $fusufault,PCB_ONFAULT(%rcx)
499
500 movq $VM_MAXUSER_ADDRESS-1,%rax
501 cmpq %rax,%rdi /* verify address validity */
502 ja fusufault
503
504 movl %esi, %eax
505 movb %al,(%rdi)
506 xorq %rax,%rax
507 movq PCPU(CURPCB),%rcx /* restore trashed register */
508 movq %rax,PCB_ONFAULT(%rcx)
509 ret
510
511 /*
512 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
513 * %rdi, %rsi, %rdx, %rcx
514 *
515 * copy a string from from to to, stop when a 0 character is reached.
516 * return ENAMETOOLONG if string is longer than maxlen, and
517 * EFAULT on protection violations. If lencopied is non-zero,
518 * return the actual length in *lencopied.
519 */
520 ENTRY(copyinstr)
521 movq %rdx, %r8 /* %r8 = maxlen */
522 movq %rcx, %r9 /* %r9 = *len */
523 xchgq %rdi, %rsi /* %rdi = from, %rsi = to */
524 movq PCPU(CURPCB),%rcx
525 movq $cpystrflt,PCB_ONFAULT(%rcx)
526
527 movq $VM_MAXUSER_ADDRESS,%rax
528
529 /* make sure 'from' is within bounds */
530 subq %rsi,%rax
531 jbe cpystrflt
532
533 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
534 cmpq %rdx,%rax
535 jae 1f
536 movq %rax,%rdx
537 movq %rax,%r8
538 1:
539 incq %rdx
540 cld
541
542 2:
543 decq %rdx
544 jz 3f
545
546 lodsb
547 stosb
548 orb %al,%al
549 jnz 2b
550
551 /* Success -- 0 byte reached */
552 decq %rdx
553 xorq %rax,%rax
554 jmp cpystrflt_x
555 3:
556 /* rdx is zero - return ENAMETOOLONG or EFAULT */
557 movq $VM_MAXUSER_ADDRESS,%rax
558 cmpq %rax,%rsi
559 jae cpystrflt
560 4:
561 movq $ENAMETOOLONG,%rax
562 jmp cpystrflt_x
563
564 cpystrflt:
565 movq $EFAULT,%rax
566
567 cpystrflt_x:
568 /* set *lencopied and return %eax */
569 movq PCPU(CURPCB),%rcx
570 movq $0,PCB_ONFAULT(%rcx)
571
572 testq %r9,%r9
573 jz 1f
574 subq %rdx,%r8
575 movq %r8,(%r9)
576 1:
577 ret
578
579
580 /*
581 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
582 * %rdi, %rsi, %rdx, %rcx
583 */
584 ENTRY(copystr)
585 movq %rdx, %r8 /* %r8 = maxlen */
586
587 xchgq %rdi, %rsi
588 incq %rdx
589 cld
590 1:
591 decq %rdx
592 jz 4f
593 lodsb
594 stosb
595 orb %al,%al
596 jnz 1b
597
598 /* Success -- 0 byte reached */
599 decq %rdx
600 xorq %rax,%rax
601 jmp 6f
602 4:
603 /* rdx is zero -- return ENAMETOOLONG */
604 movq $ENAMETOOLONG,%rax
605
606 6:
607
608 testq %rcx, %rcx
609 jz 7f
610 /* set *lencopied and return %rax */
611 subq %rdx, %r8
612 movq %r8, (%rcx)
613 7:
614 ret
615
616 /*
617 * Handling of special amd64 registers and descriptor tables etc
618 * %rdi
619 */
620 /* void lgdt(struct region_descriptor *rdp); */
621 ENTRY(lgdt)
622 /* reload the descriptor table */
623 lgdt (%rdi)
624
625 /* flush the prefetch q */
626 jmp 1f
627 nop
628 1:
629 movl $KDSEL, %eax
630 movl %eax,%ds
631 movl %eax,%es
632 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
633 movl %eax,%gs
634 movl %eax,%ss
635
636 /* reload code selector by turning return into intersegmental return */
637 popq %rax
638 pushq $KCSEL
639 pushq %rax
640 MEXITCOUNT
641 lretq
642
643 /*****************************************************************************/
644 /* setjump, longjump */
645 /*****************************************************************************/
646
647 ENTRY(setjmp)
648 movq %rbx,0(%rdi) /* save rbx */
649 movq %rsp,8(%rdi) /* save rsp */
650 movq %rbp,16(%rdi) /* save rbp */
651 movq %r12,24(%rdi) /* save r12 */
652 movq %r13,32(%rdi) /* save r13 */
653 movq %r14,40(%rdi) /* save r14 */
654 movq %r15,48(%rdi) /* save r15 */
655 movq 0(%rsp),%rdx /* get rta */
656 movq %rdx,56(%rdi) /* save rip */
657 xorl %eax,%eax /* return(0); */
658 ret
659
660 ENTRY(longjmp)
661 movq 0(%rdi),%rbx /* restore rbx */
662 movq 8(%rdi),%rsp /* restore rsp */
663 movq 16(%rdi),%rbp /* restore rbp */
664 movq 24(%rdi),%r12 /* restore r12 */
665 movq 32(%rdi),%r13 /* restore r13 */
666 movq 40(%rdi),%r14 /* restore r14 */
667 movq 48(%rdi),%r15 /* restore r15 */
668 movq 56(%rdi),%rdx /* get rta */
669 movq %rdx,0(%rsp) /* put in return frame */
670 xorl %eax,%eax /* return(1); */
671 incl %eax
672 ret
673
674 /*
675 * Support for BB-profiling (gcc -a). The kernbb program will extract
676 * the data from the kernel.
677 */
678
679 .data
680 ALIGN_DATA
681 .globl bbhead
682 bbhead:
683 .quad 0
684
685 .text
686 NON_GPROF_ENTRY(__bb_init_func)
687 movq $1,(%rdi)
688 movq bbhead,%rax
689 movq %rax,32(%rdi)
690 movq %rdi,bbhead
691 NON_GPROF_RET
Cache object: 2e434a79822b65beeadc210b7f6c1a7d
|