1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD$
31 */
32
33 #include "opt_ddb.h"
34
35 #include <machine/asmacros.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38
39 #include "assym.s"
40
41 ALIGN_DATA
42 .globl intrcnt, eintrcnt
43 intrcnt:
44 .space INTRCNT_COUNT * 8
45 eintrcnt:
46
47 .globl intrnames, eintrnames
48 intrnames:
49 .space INTRCNT_COUNT * (MAXCOMLEN + 1)
50 eintrnames:
51
52 .text
53
54 /*
55 * bcopy family
56 * void bzero(void *buf, u_int len)
57 */
58
59 /* done */
60 ENTRY(bzero)
61 movq %rsi,%rcx
62 xorq %rax,%rax
63 shrq $3,%rcx
64 cld
65 rep
66 stosq
67 movq %rsi,%rcx
68 andq $7,%rcx
69 rep
70 stosb
71 ret
72
73 /* Address: %rdi */
74 ENTRY(pagezero)
75 movq $-PAGE_SIZE,%rdx
76 subq %rdx,%rdi
77 xorq %rax,%rax
78 1:
79 movnti %rax,(%rdi,%rdx)
80 movnti %rax,8(%rdi,%rdx)
81 movnti %rax,16(%rdi,%rdx)
82 movnti %rax,24(%rdi,%rdx)
83 addq $32,%rdx
84 jne 1b
85 sfence
86 ret
87
88 ENTRY(bcmp)
89 xorq %rax,%rax
90
91 movq %rdx,%rcx
92 shrq $3,%rcx
93 cld /* compare forwards */
94 repe
95 cmpsq
96 jne 1f
97
98 movq %rdx,%rcx
99 andq $7,%rcx
100 repe
101 cmpsb
102 je 2f
103 1:
104 incq %rax
105 2:
106 ret
107
108 /*
109 * bcopy(src, dst, cnt)
110 * rdi, rsi, rdx
111 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
112 */
113 ENTRY(bcopy)
114 xchgq %rsi,%rdi
115 movq %rdx,%rcx
116
117 movq %rdi,%rax
118 subq %rsi,%rax
119 cmpq %rcx,%rax /* overlapping && src < dst? */
120 jb 1f
121
122 shrq $3,%rcx /* copy by 64-bit words */
123 cld /* nope, copy forwards */
124 rep
125 movsq
126 movq %rdx,%rcx
127 andq $7,%rcx /* any bytes left? */
128 rep
129 movsb
130 ret
131
132 /* ALIGN_TEXT */
133 1:
134 addq %rcx,%rdi /* copy backwards */
135 addq %rcx,%rsi
136 decq %rdi
137 decq %rsi
138 andq $7,%rcx /* any fractional bytes? */
139 std
140 rep
141 movsb
142 movq %rdx,%rcx /* copy remainder by 32-bit words */
143 shrq $3,%rcx
144 subq $7,%rsi
145 subq $7,%rdi
146 rep
147 movsq
148 cld
149 ret
150
151 /*
152 * Note: memcpy does not support overlapping copies
153 */
154 ENTRY(memcpy)
155 movq %rdx,%rcx
156 shrq $3,%rcx /* copy by 64-bit words */
157 cld /* copy forwards */
158 rep
159 movsq
160 movq %rdx,%rcx
161 andq $7,%rcx /* any bytes left? */
162 rep
163 movsb
164 ret
165
166 /*
167 * pagecopy(%rdi=from, %rsi=to)
168 */
169 ENTRY(pagecopy)
170 movq $-PAGE_SIZE,%rax
171 movq %rax,%rdx
172 subq %rax,%rdi
173 subq %rax,%rsi
174 1:
175 prefetchnta (%rdi,%rax)
176 addq $64,%rax
177 jne 1b
178 2:
179 movq (%rdi,%rdx),%rax
180 movnti %rax,(%rsi,%rdx)
181 movq 8(%rdi,%rdx),%rax
182 movnti %rax,8(%rsi,%rdx)
183 movq 16(%rdi,%rdx),%rax
184 movnti %rax,16(%rsi,%rdx)
185 movq 24(%rdi,%rdx),%rax
186 movnti %rax,24(%rsi,%rdx)
187 addq $32,%rdx
188 jne 2b
189 sfence
190 ret
191
192 /* fillw(pat, base, cnt) */
193 /* %rdi,%rsi, %rdx */
194 ENTRY(fillw)
195 movq %rdi,%rax
196 movq %rsi,%rdi
197 movq %rdx,%rcx
198 cld
199 rep
200 stosw
201 ret
202
203 /*****************************************************************************/
204 /* copyout and fubyte family */
205 /*****************************************************************************/
206 /*
207 * Access user memory from inside the kernel. These routines should be
208 * the only places that do this.
209 *
210 * These routines set curpcb->onfault for the time they execute. When a
211 * protection violation occurs inside the functions, the trap handler
212 * returns to *curpcb->onfault instead of the function.
213 */
214
215 /*
216 * copyout(from_kernel, to_user, len) - MP SAFE
217 * %rdi, %rsi, %rdx
218 */
219 ENTRY(copyout)
220 movq PCPU(CURPCB),%rax
221 movq $copyout_fault,PCB_ONFAULT(%rax)
222 testq %rdx,%rdx /* anything to do? */
223 jz done_copyout
224
225 /*
226 * Check explicitly for non-user addresses. If 486 write protection
227 * is being used, this check is essential because we are in kernel
228 * mode so the h/w does not provide any protection against writing
229 * kernel addresses.
230 */
231
232 /*
233 * First, prevent address wrapping.
234 */
235 movq %rsi,%rax
236 addq %rdx,%rax
237 jc copyout_fault
238 /*
239 * XXX STOP USING VM_MAXUSER_ADDRESS.
240 * It is an end address, not a max, so every time it is used correctly it
241 * looks like there is an off by one error, and of course it caused an off
242 * by one error in several places.
243 */
244 movq $VM_MAXUSER_ADDRESS,%rcx
245 cmpq %rcx,%rax
246 ja copyout_fault
247
248 xchgq %rdi, %rsi
249 /* bcopy(%rsi, %rdi, %rdx) */
250 movq %rdx,%rcx
251
252 shrq $3,%rcx
253 cld
254 rep
255 movsq
256 movb %dl,%cl
257 andb $7,%cl
258 rep
259 movsb
260
261 done_copyout:
262 xorq %rax,%rax
263 movq PCPU(CURPCB),%rdx
264 movq %rax,PCB_ONFAULT(%rdx)
265 ret
266
267 ALIGN_TEXT
268 copyout_fault:
269 movq PCPU(CURPCB),%rdx
270 movq $0,PCB_ONFAULT(%rdx)
271 movq $EFAULT,%rax
272 ret
273
274 /*
275 * copyin(from_user, to_kernel, len) - MP SAFE
276 * %rdi, %rsi, %rdx
277 */
278 ENTRY(copyin)
279 movq PCPU(CURPCB),%rax
280 movq $copyin_fault,PCB_ONFAULT(%rax)
281 testq %rdx,%rdx /* anything to do? */
282 jz done_copyin
283
284 /*
285 * make sure address is valid
286 */
287 movq %rdi,%rax
288 addq %rdx,%rax
289 jc copyin_fault
290 movq $VM_MAXUSER_ADDRESS,%rcx
291 cmpq %rcx,%rax
292 ja copyin_fault
293
294 xchgq %rdi, %rsi
295 movq %rdx, %rcx
296 movb %cl,%al
297 shrq $3,%rcx /* copy longword-wise */
298 cld
299 rep
300 movsq
301 movb %al,%cl
302 andb $7,%cl /* copy remaining bytes */
303 rep
304 movsb
305
306 done_copyin:
307 xorq %rax,%rax
308 movq PCPU(CURPCB),%rdx
309 movq %rax,PCB_ONFAULT(%rdx)
310 ret
311
312 ALIGN_TEXT
313 copyin_fault:
314 movq PCPU(CURPCB),%rdx
315 movq $0,PCB_ONFAULT(%rdx)
316 movq $EFAULT,%rax
317 ret
318
319 /*
320 * casuptr. Compare and set user pointer. Returns -1 or the current value.
321 * dst = %rdi, old = %rsi, new = %rdx
322 */
323 ENTRY(casuptr)
324 movq PCPU(CURPCB),%rcx
325 movq $fusufault,PCB_ONFAULT(%rcx)
326
327 movq $VM_MAXUSER_ADDRESS-4,%rax
328 cmpq %rax,%rdi /* verify address is valid */
329 ja fusufault
330
331 movq %rsi, %rax /* old */
332 #ifdef SMP
333 lock
334 #endif
335 cmpxchgq %rdx, (%rdi) /* new = %rdx */
336
337 /*
338 * The old value is in %eax. If the store succeeded it will be the
339 * value we expected (old) from before the store, otherwise it will
340 * be the current value.
341 */
342
343 movq PCPU(CURPCB),%rcx
344 movq $fusufault,PCB_ONFAULT(%rcx)
345 movq $0,PCB_ONFAULT(%rcx)
346 ret
347
348 /*
349 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
350 * byte from user memory. All these functions are MPSAFE.
351 * addr = %rdi
352 */
353
354 ALTENTRY(fuword64)
355 ENTRY(fuword)
356 movq PCPU(CURPCB),%rcx
357 movq $fusufault,PCB_ONFAULT(%rcx)
358
359 movq $VM_MAXUSER_ADDRESS-8,%rax
360 cmpq %rax,%rdi /* verify address is valid */
361 ja fusufault
362
363 movq (%rdi),%rax
364 movq $0,PCB_ONFAULT(%rcx)
365 ret
366
367 ENTRY(fuword32)
368 movq PCPU(CURPCB),%rcx
369 movq $fusufault,PCB_ONFAULT(%rcx)
370
371 movq $VM_MAXUSER_ADDRESS-4,%rax
372 cmpq %rax,%rdi /* verify address is valid */
373 ja fusufault
374
375 movl (%rdi),%eax
376 movq $0,PCB_ONFAULT(%rcx)
377 ret
378
379 /*
380 * fuswintr() and suswintr() are specialized variants of fuword16() and
381 * suword16(), respectively. They are called from the profiling code,
382 * potentially at interrupt time. If they fail, that's okay; good things
383 * will happen later. They always fail for now, until the trap code is
384 * able to deal with this.
385 */
386 ALTENTRY(suswintr)
387 ENTRY(fuswintr)
388 movq $-1,%rax
389 ret
390
391 ENTRY(fuword16)
392 movq PCPU(CURPCB),%rcx
393 movq $fusufault,PCB_ONFAULT(%rcx)
394
395 movq $VM_MAXUSER_ADDRESS-2,%rax
396 cmpq %rax,%rdi
397 ja fusufault
398
399 movzwl (%rdi),%eax
400 movq $0,PCB_ONFAULT(%rcx)
401 ret
402
403 ENTRY(fubyte)
404 movq PCPU(CURPCB),%rcx
405 movq $fusufault,PCB_ONFAULT(%rcx)
406
407 movq $VM_MAXUSER_ADDRESS-1,%rax
408 cmpq %rax,%rdi
409 ja fusufault
410
411 movzbl (%rdi),%eax
412 movq $0,PCB_ONFAULT(%rcx)
413 ret
414
415 ALIGN_TEXT
416 fusufault:
417 movq PCPU(CURPCB),%rcx
418 xorq %rax,%rax
419 movq %rax,PCB_ONFAULT(%rcx)
420 decq %rax
421 ret
422
423 /*
424 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
425 * user memory. All these functions are MPSAFE.
426 * addr = %rdi, value = %rsi
427 */
428 ALTENTRY(suword64)
429 ENTRY(suword)
430 movq PCPU(CURPCB),%rcx
431 movq $fusufault,PCB_ONFAULT(%rcx)
432
433 movq $VM_MAXUSER_ADDRESS-8,%rax
434 cmpq %rax,%rdi /* verify address validity */
435 ja fusufault
436
437 movq %rsi,(%rdi)
438 xorq %rax,%rax
439 movq PCPU(CURPCB),%rcx
440 movq %rax,PCB_ONFAULT(%rcx)
441 ret
442
443 ENTRY(suword32)
444 movq PCPU(CURPCB),%rcx
445 movq $fusufault,PCB_ONFAULT(%rcx)
446
447 movq $VM_MAXUSER_ADDRESS-4,%rax
448 cmpq %rax,%rdi /* verify address validity */
449 ja fusufault
450
451 movl %esi,(%rdi)
452 xorq %rax,%rax
453 movq PCPU(CURPCB),%rcx
454 movq %rax,PCB_ONFAULT(%rcx)
455 ret
456
457 ENTRY(suword16)
458 movq PCPU(CURPCB),%rcx
459 movq $fusufault,PCB_ONFAULT(%rcx)
460
461 movq $VM_MAXUSER_ADDRESS-2,%rax
462 cmpq %rax,%rdi /* verify address validity */
463 ja fusufault
464
465 movw %si,(%rdi)
466 xorq %rax,%rax
467 movq PCPU(CURPCB),%rcx /* restore trashed register */
468 movq %rax,PCB_ONFAULT(%rcx)
469 ret
470
471 ENTRY(subyte)
472 movq PCPU(CURPCB),%rcx
473 movq $fusufault,PCB_ONFAULT(%rcx)
474
475 movq $VM_MAXUSER_ADDRESS-1,%rax
476 cmpq %rax,%rdi /* verify address validity */
477 ja fusufault
478
479 movl %esi, %eax
480 movb %al,(%rdi)
481 xorq %rax,%rax
482 movq PCPU(CURPCB),%rcx /* restore trashed register */
483 movq %rax,PCB_ONFAULT(%rcx)
484 ret
485
486 /*
487 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
488 * %rdi, %rsi, %rdx, %rcx
489 *
490 * copy a string from from to to, stop when a 0 character is reached.
491 * return ENAMETOOLONG if string is longer than maxlen, and
492 * EFAULT on protection violations. If lencopied is non-zero,
493 * return the actual length in *lencopied.
494 */
495 ENTRY(copyinstr)
496 movq %rdx, %r8 /* %r8 = maxlen */
497 movq %rcx, %r9 /* %r9 = *len */
498 xchgq %rdi, %rsi /* %rdi = from, %rsi = to */
499 movq PCPU(CURPCB),%rcx
500 movq $cpystrflt,PCB_ONFAULT(%rcx)
501
502 movq $VM_MAXUSER_ADDRESS,%rax
503
504 /* make sure 'from' is within bounds */
505 subq %rsi,%rax
506 jbe cpystrflt
507
508 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
509 cmpq %rdx,%rax
510 jae 1f
511 movq %rax,%rdx
512 movq %rax,%r8
513 1:
514 incq %rdx
515 cld
516
517 2:
518 decq %rdx
519 jz 3f
520
521 lodsb
522 stosb
523 orb %al,%al
524 jnz 2b
525
526 /* Success -- 0 byte reached */
527 decq %rdx
528 xorq %rax,%rax
529 jmp cpystrflt_x
530 3:
531 /* rdx is zero - return ENAMETOOLONG or EFAULT */
532 movq $VM_MAXUSER_ADDRESS,%rax
533 cmpq %rax,%rsi
534 jae cpystrflt
535 4:
536 movq $ENAMETOOLONG,%rax
537 jmp cpystrflt_x
538
539 cpystrflt:
540 movq $EFAULT,%rax
541
542 cpystrflt_x:
543 /* set *lencopied and return %eax */
544 movq PCPU(CURPCB),%rcx
545 movq $0,PCB_ONFAULT(%rcx)
546
547 testq %r9,%r9
548 jz 1f
549 subq %rdx,%r8
550 movq %r8,(%r9)
551 1:
552 ret
553
554
555 /*
556 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
557 * %rdi, %rsi, %rdx, %rcx
558 */
559 ENTRY(copystr)
560 movq %rdx, %r8 /* %r8 = maxlen */
561
562 xchgq %rdi, %rsi
563 incq %rdx
564 cld
565 1:
566 decq %rdx
567 jz 4f
568 lodsb
569 stosb
570 orb %al,%al
571 jnz 1b
572
573 /* Success -- 0 byte reached */
574 decq %rdx
575 xorq %rax,%rax
576 jmp 6f
577 4:
578 /* rdx is zero -- return ENAMETOOLONG */
579 movq $ENAMETOOLONG,%rax
580
581 6:
582
583 testq %rcx, %rcx
584 jz 7f
585 /* set *lencopied and return %rax */
586 subq %rdx, %r8
587 movq %r8, (%rcx)
588 7:
589 ret
590
591 /*
592 * Handling of special amd64 registers and descriptor tables etc
593 * %rdi
594 */
595 /* void lgdt(struct region_descriptor *rdp); */
596 ENTRY(lgdt)
597 /* reload the descriptor table */
598 lgdt (%rdi)
599
600 /* flush the prefetch q */
601 jmp 1f
602 nop
603 1:
604 movl $KDSEL, %eax
605 movl %eax,%ds
606 movl %eax,%es
607 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
608 movl %eax,%gs
609 movl %eax,%ss
610
611 /* reload code selector by turning return into intersegmental return */
612 popq %rax
613 pushq $KCSEL
614 pushq %rax
615 MEXITCOUNT
616 lretq
617
618 /*****************************************************************************/
619 /* setjump, longjump */
620 /*****************************************************************************/
621
622 ENTRY(setjmp)
623 movq %rbx,0(%rdi) /* save rbx */
624 movq %rsp,8(%rdi) /* save rsp */
625 movq %rbp,16(%rdi) /* save rbp */
626 movq %r12,24(%rdi) /* save r12 */
627 movq %r13,32(%rdi) /* save r13 */
628 movq %r14,40(%rdi) /* save r14 */
629 movq %r15,48(%rdi) /* save r15 */
630 movq 0(%rsp),%rdx /* get rta */
631 movq %rdx,56(%rdi) /* save rip */
632 xorl %eax,%eax /* return(0); */
633 ret
634
635 ENTRY(longjmp)
636 movq 0(%rdi),%rbx /* restore rbx */
637 movq 8(%rdi),%rsp /* restore rsp */
638 movq 16(%rdi),%rbp /* restore rbp */
639 movq 24(%rdi),%r12 /* restore r12 */
640 movq 32(%rdi),%r13 /* restore r13 */
641 movq 40(%rdi),%r14 /* restore r14 */
642 movq 48(%rdi),%r15 /* restore r15 */
643 movq 56(%rdi),%rdx /* get rta */
644 movq %rdx,0(%rsp) /* put in return frame */
645 xorl %eax,%eax /* return(1); */
646 incl %eax
647 ret
648
649 /*
650 * Support for BB-profiling (gcc -a). The kernbb program will extract
651 * the data from the kernel.
652 */
653
654 .data
655 ALIGN_DATA
656 .globl bbhead
657 bbhead:
658 .quad 0
659
660 .text
661 NON_GPROF_ENTRY(__bb_init_func)
662 movq $1,(%rdi)
663 movq bbhead,%rax
664 movq %rax,32(%rdi)
665 movq %rdi,bbhead
666 NON_GPROF_RET
Cache object: 7fab49eb88c9196c0903bf0d73f81d7e
|