1 /*-
2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: releng/11.2/sys/amd64/amd64/support.S 334150 2018-05-24 11:59:33Z kib $
31 */
32
33 #include "opt_ddb.h"
34
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
38
39 #include "assym.s"
40
41 .text
42
43 /*
44 * bcopy family
45 * void bzero(void *buf, u_int len)
46 */
47
48 /* done */
49 ENTRY(bzero)
50 PUSH_FRAME_POINTER
51 movq %rsi,%rcx
52 xorl %eax,%eax
53 shrq $3,%rcx
54 rep
55 stosq
56 movq %rsi,%rcx
57 andq $7,%rcx
58 rep
59 stosb
60 POP_FRAME_POINTER
61 ret
62 END(bzero)
63
64 /* Address: %rdi */
65 ENTRY(pagezero)
66 PUSH_FRAME_POINTER
67 movq $PAGE_SIZE/8,%rcx
68 xorl %eax,%eax
69 rep
70 stosq
71 POP_FRAME_POINTER
72 ret
73 END(pagezero)
74
75 ENTRY(bcmp)
76 PUSH_FRAME_POINTER
77 movq %rdx,%rcx
78 shrq $3,%rcx
79 repe
80 cmpsq
81 jne 1f
82
83 movq %rdx,%rcx
84 andq $7,%rcx
85 repe
86 cmpsb
87 1:
88 setne %al
89 movsbl %al,%eax
90 POP_FRAME_POINTER
91 ret
92 END(bcmp)
93
94 /*
95 * bcopy(src, dst, cnt)
96 * rdi, rsi, rdx
97 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
98 */
99 ENTRY(bcopy)
100 PUSH_FRAME_POINTER
101 xchgq %rsi,%rdi
102 movq %rdx,%rcx
103
104 movq %rdi,%rax
105 subq %rsi,%rax
106 cmpq %rcx,%rax /* overlapping && src < dst? */
107 jb 1f
108
109 shrq $3,%rcx /* copy by 64-bit words */
110 rep
111 movsq
112 movq %rdx,%rcx
113 andq $7,%rcx /* any bytes left? */
114 rep
115 movsb
116 POP_FRAME_POINTER
117 ret
118
119 /* ALIGN_TEXT */
120 1:
121 addq %rcx,%rdi /* copy backwards */
122 addq %rcx,%rsi
123 decq %rdi
124 decq %rsi
125 andq $7,%rcx /* any fractional bytes? */
126 std
127 rep
128 movsb
129 movq %rdx,%rcx /* copy remainder by 32-bit words */
130 shrq $3,%rcx
131 subq $7,%rsi
132 subq $7,%rdi
133 rep
134 movsq
135 cld
136 POP_FRAME_POINTER
137 ret
138 END(bcopy)
139
140 /*
141 * Note: memcpy does not support overlapping copies
142 */
143 ENTRY(memcpy)
144 PUSH_FRAME_POINTER
145 movq %rdi,%rax
146 movq %rdx,%rcx
147 shrq $3,%rcx /* copy by 64-bit words */
148 rep
149 movsq
150 movq %rdx,%rcx
151 andq $7,%rcx /* any bytes left? */
152 rep
153 movsb
154 POP_FRAME_POINTER
155 ret
156 END(memcpy)
157
158 /*
159 * pagecopy(%rdi=from, %rsi=to)
160 */
161 ENTRY(pagecopy)
162 PUSH_FRAME_POINTER
163 movq $-PAGE_SIZE,%rax
164 movq %rax,%rdx
165 subq %rax,%rdi
166 subq %rax,%rsi
167 1:
168 prefetchnta (%rdi,%rax)
169 addq $64,%rax
170 jne 1b
171 2:
172 movq (%rdi,%rdx),%rax
173 movnti %rax,(%rsi,%rdx)
174 movq 8(%rdi,%rdx),%rax
175 movnti %rax,8(%rsi,%rdx)
176 movq 16(%rdi,%rdx),%rax
177 movnti %rax,16(%rsi,%rdx)
178 movq 24(%rdi,%rdx),%rax
179 movnti %rax,24(%rsi,%rdx)
180 addq $32,%rdx
181 jne 2b
182 sfence
183 POP_FRAME_POINTER
184 ret
185 END(pagecopy)
186
187 /* fillw(pat, base, cnt) */
188 /* %rdi,%rsi, %rdx */
189 ENTRY(fillw)
190 PUSH_FRAME_POINTER
191 movq %rdi,%rax
192 movq %rsi,%rdi
193 movq %rdx,%rcx
194 rep
195 stosw
196 POP_FRAME_POINTER
197 ret
198 END(fillw)
199
200 /*****************************************************************************/
201 /* copyout and fubyte family */
202 /*****************************************************************************/
203 /*
204 * Access user memory from inside the kernel. These routines should be
205 * the only places that do this.
206 *
207 * These routines set curpcb->pcb_onfault for the time they execute. When a
208 * protection violation occurs inside the functions, the trap handler
209 * returns to *curpcb->pcb_onfault instead of the function.
210 */
211
212 /*
213 * copyout(from_kernel, to_user, len)
214 * %rdi, %rsi, %rdx
215 */
216 ENTRY(copyout)
217 PUSH_FRAME_POINTER
218 movq PCPU(CURPCB),%rax
219 movq $copyout_fault,PCB_ONFAULT(%rax)
220 testq %rdx,%rdx /* anything to do? */
221 jz done_copyout
222
223 /*
224 * Check explicitly for non-user addresses. This check is essential
225 * because it prevents usermode from writing into the kernel. We do
226 * not verify anywhere else that the user did not specify a rogue
227 * address.
228 */
229 /*
230 * First, prevent address wrapping.
231 */
232 movq %rsi,%rax
233 addq %rdx,%rax
234 jc copyout_fault
235 /*
236 * XXX STOP USING VM_MAXUSER_ADDRESS.
237 * It is an end address, not a max, so every time it is used correctly it
238 * looks like there is an off by one error, and of course it caused an off
239 * by one error in several places.
240 */
241 movq $VM_MAXUSER_ADDRESS,%rcx
242 cmpq %rcx,%rax
243 ja copyout_fault
244
245 xchgq %rdi,%rsi
246 /* bcopy(%rsi, %rdi, %rdx) */
247 movq %rdx,%rcx
248
249 shrq $3,%rcx
250 rep
251 movsq
252 movb %dl,%cl
253 andb $7,%cl
254 rep
255 movsb
256
257 done_copyout:
258 xorl %eax,%eax
259 movq PCPU(CURPCB),%rdx
260 movq %rax,PCB_ONFAULT(%rdx)
261 POP_FRAME_POINTER
262 ret
263
264 ALIGN_TEXT
265 copyout_fault:
266 movq PCPU(CURPCB),%rdx
267 movq $0,PCB_ONFAULT(%rdx)
268 movq $EFAULT,%rax
269 POP_FRAME_POINTER
270 ret
271 END(copyout)
272
273 /*
274 * copyin(from_user, to_kernel, len)
275 * %rdi, %rsi, %rdx
276 */
277 ENTRY(copyin)
278 PUSH_FRAME_POINTER
279 movq PCPU(CURPCB),%rax
280 movq $copyin_fault,PCB_ONFAULT(%rax)
281 testq %rdx,%rdx /* anything to do? */
282 jz done_copyin
283
284 /*
285 * make sure address is valid
286 */
287 movq %rdi,%rax
288 addq %rdx,%rax
289 jc copyin_fault
290 movq $VM_MAXUSER_ADDRESS,%rcx
291 cmpq %rcx,%rax
292 ja copyin_fault
293
294 xchgq %rdi,%rsi
295 movq %rdx,%rcx
296 movb %cl,%al
297 shrq $3,%rcx /* copy longword-wise */
298 rep
299 movsq
300 movb %al,%cl
301 andb $7,%cl /* copy remaining bytes */
302 rep
303 movsb
304
305 done_copyin:
306 xorl %eax,%eax
307 movq PCPU(CURPCB),%rdx
308 movq %rax,PCB_ONFAULT(%rdx)
309 POP_FRAME_POINTER
310 ret
311
312 ALIGN_TEXT
313 copyin_fault:
314 movq PCPU(CURPCB),%rdx
315 movq $0,PCB_ONFAULT(%rdx)
316 movq $EFAULT,%rax
317 POP_FRAME_POINTER
318 ret
319 END(copyin)
320
321 /*
322 * casueword32. Compare and set user integer. Returns -1 on fault,
323 * 0 if access was successful. Old value is written to *oldp.
324 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
325 */
326 ENTRY(casueword32)
327 PUSH_FRAME_POINTER
328 movq PCPU(CURPCB),%r8
329 movq $fusufault,PCB_ONFAULT(%r8)
330
331 movq $VM_MAXUSER_ADDRESS-4,%rax
332 cmpq %rax,%rdi /* verify address is valid */
333 ja fusufault
334
335 movl %esi,%eax /* old */
336 #ifdef SMP
337 lock
338 #endif
339 cmpxchgl %ecx,(%rdi) /* new = %ecx */
340
341 /*
342 * The old value is in %eax. If the store succeeded it will be the
343 * value we expected (old) from before the store, otherwise it will
344 * be the current value. Save %eax into %esi to prepare the return
345 * value.
346 */
347 movl %eax,%esi
348 xorl %eax,%eax
349 movq %rax,PCB_ONFAULT(%r8)
350
351 /*
352 * Access the oldp after the pcb_onfault is cleared, to correctly
353 * catch corrupted pointer.
354 */
355 movl %esi,(%rdx) /* oldp = %rdx */
356 POP_FRAME_POINTER
357 ret
358 END(casueword32)
359
360 /*
361 * casueword. Compare and set user long. Returns -1 on fault,
362 * 0 if access was successful. Old value is written to *oldp.
363 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
364 */
365 ENTRY(casueword)
366 PUSH_FRAME_POINTER
367 movq PCPU(CURPCB),%r8
368 movq $fusufault,PCB_ONFAULT(%r8)
369
370 movq $VM_MAXUSER_ADDRESS-4,%rax
371 cmpq %rax,%rdi /* verify address is valid */
372 ja fusufault
373
374 movq %rsi,%rax /* old */
375 #ifdef SMP
376 lock
377 #endif
378 cmpxchgq %rcx,(%rdi) /* new = %rcx */
379
380 /*
381 * The old value is in %rax. If the store succeeded it will be the
382 * value we expected (old) from before the store, otherwise it will
383 * be the current value.
384 */
385 movq %rax,%rsi
386 xorl %eax,%eax
387 movq %rax,PCB_ONFAULT(%r8)
388 movq %rsi,(%rdx)
389 POP_FRAME_POINTER
390 ret
391 END(casueword)
392
393 /*
394 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
395 * byte from user memory.
396 * addr = %rdi, valp = %rsi
397 */
398
399 ALTENTRY(fueword64)
400 ENTRY(fueword)
401 PUSH_FRAME_POINTER
402 movq PCPU(CURPCB),%rcx
403 movq $fusufault,PCB_ONFAULT(%rcx)
404
405 movq $VM_MAXUSER_ADDRESS-8,%rax
406 cmpq %rax,%rdi /* verify address is valid */
407 ja fusufault
408
409 xorl %eax,%eax
410 movq (%rdi),%r11
411 movq %rax,PCB_ONFAULT(%rcx)
412 movq %r11,(%rsi)
413 POP_FRAME_POINTER
414 ret
415 END(fueword64)
416 END(fueword)
417
418 ENTRY(fueword32)
419 PUSH_FRAME_POINTER
420 movq PCPU(CURPCB),%rcx
421 movq $fusufault,PCB_ONFAULT(%rcx)
422
423 movq $VM_MAXUSER_ADDRESS-4,%rax
424 cmpq %rax,%rdi /* verify address is valid */
425 ja fusufault
426
427 xorl %eax,%eax
428 movl (%rdi),%r11d
429 movq %rax,PCB_ONFAULT(%rcx)
430 movl %r11d,(%rsi)
431 POP_FRAME_POINTER
432 ret
433 END(fueword32)
434
435 /*
436 * fuswintr() and suswintr() are specialized variants of fuword16() and
437 * suword16(), respectively. They are called from the profiling code,
438 * potentially at interrupt time. If they fail, that's okay; good things
439 * will happen later. They always fail for now, until the trap code is
440 * able to deal with this.
441 */
442 ALTENTRY(suswintr)
443 ENTRY(fuswintr)
444 movq $-1,%rax
445 ret
446 END(suswintr)
447 END(fuswintr)
448
449 ENTRY(fuword16)
450 PUSH_FRAME_POINTER
451 movq PCPU(CURPCB),%rcx
452 movq $fusufault,PCB_ONFAULT(%rcx)
453
454 movq $VM_MAXUSER_ADDRESS-2,%rax
455 cmpq %rax,%rdi
456 ja fusufault
457
458 movzwl (%rdi),%eax
459 movq $0,PCB_ONFAULT(%rcx)
460 POP_FRAME_POINTER
461 ret
462 END(fuword16)
463
464 ENTRY(fubyte)
465 PUSH_FRAME_POINTER
466 movq PCPU(CURPCB),%rcx
467 movq $fusufault,PCB_ONFAULT(%rcx)
468
469 movq $VM_MAXUSER_ADDRESS-1,%rax
470 cmpq %rax,%rdi
471 ja fusufault
472
473 movzbl (%rdi),%eax
474 movq $0,PCB_ONFAULT(%rcx)
475 POP_FRAME_POINTER
476 ret
477 END(fubyte)
478
479 ALIGN_TEXT
480 fusufault:
481 movq PCPU(CURPCB),%rcx
482 xorl %eax,%eax
483 movq %rax,PCB_ONFAULT(%rcx)
484 decq %rax
485 POP_FRAME_POINTER
486 ret
487
488 /*
489 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
490 * user memory.
491 * addr = %rdi, value = %rsi
492 */
493 ALTENTRY(suword64)
494 ENTRY(suword)
495 PUSH_FRAME_POINTER
496 movq PCPU(CURPCB),%rcx
497 movq $fusufault,PCB_ONFAULT(%rcx)
498
499 movq $VM_MAXUSER_ADDRESS-8,%rax
500 cmpq %rax,%rdi /* verify address validity */
501 ja fusufault
502
503 movq %rsi,(%rdi)
504 xorl %eax,%eax
505 movq PCPU(CURPCB),%rcx
506 movq %rax,PCB_ONFAULT(%rcx)
507 POP_FRAME_POINTER
508 ret
509 END(suword64)
510 END(suword)
511
512 ENTRY(suword32)
513 PUSH_FRAME_POINTER
514 movq PCPU(CURPCB),%rcx
515 movq $fusufault,PCB_ONFAULT(%rcx)
516
517 movq $VM_MAXUSER_ADDRESS-4,%rax
518 cmpq %rax,%rdi /* verify address validity */
519 ja fusufault
520
521 movl %esi,(%rdi)
522 xorl %eax,%eax
523 movq PCPU(CURPCB),%rcx
524 movq %rax,PCB_ONFAULT(%rcx)
525 POP_FRAME_POINTER
526 ret
527 END(suword32)
528
529 ENTRY(suword16)
530 PUSH_FRAME_POINTER
531 movq PCPU(CURPCB),%rcx
532 movq $fusufault,PCB_ONFAULT(%rcx)
533
534 movq $VM_MAXUSER_ADDRESS-2,%rax
535 cmpq %rax,%rdi /* verify address validity */
536 ja fusufault
537
538 movw %si,(%rdi)
539 xorl %eax,%eax
540 movq PCPU(CURPCB),%rcx /* restore trashed register */
541 movq %rax,PCB_ONFAULT(%rcx)
542 POP_FRAME_POINTER
543 ret
544 END(suword16)
545
546 ENTRY(subyte)
547 PUSH_FRAME_POINTER
548 movq PCPU(CURPCB),%rcx
549 movq $fusufault,PCB_ONFAULT(%rcx)
550
551 movq $VM_MAXUSER_ADDRESS-1,%rax
552 cmpq %rax,%rdi /* verify address validity */
553 ja fusufault
554
555 movl %esi,%eax
556 movb %al,(%rdi)
557 xorl %eax,%eax
558 movq PCPU(CURPCB),%rcx /* restore trashed register */
559 movq %rax,PCB_ONFAULT(%rcx)
560 POP_FRAME_POINTER
561 ret
562 END(subyte)
563
564 /*
565 * copyinstr(from, to, maxlen, int *lencopied)
566 * %rdi, %rsi, %rdx, %rcx
567 *
568 * copy a string from 'from' to 'to', stop when a 0 character is reached.
569 * return ENAMETOOLONG if string is longer than maxlen, and
570 * EFAULT on protection violations. If lencopied is non-zero,
571 * return the actual length in *lencopied.
572 */
573 ENTRY(copyinstr)
574 PUSH_FRAME_POINTER
575 movq %rdx,%r8 /* %r8 = maxlen */
576 movq %rcx,%r9 /* %r9 = *len */
577 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
578 movq PCPU(CURPCB),%rcx
579 movq $cpystrflt,PCB_ONFAULT(%rcx)
580
581 movq $VM_MAXUSER_ADDRESS,%rax
582
583 /* make sure 'from' is within bounds */
584 subq %rsi,%rax
585 jbe cpystrflt
586
587 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
588 cmpq %rdx,%rax
589 jae 1f
590 movq %rax,%rdx
591 movq %rax,%r8
592 1:
593 incq %rdx
594
595 2:
596 decq %rdx
597 jz 3f
598
599 lodsb
600 stosb
601 orb %al,%al
602 jnz 2b
603
604 /* Success -- 0 byte reached */
605 decq %rdx
606 xorl %eax,%eax
607 jmp cpystrflt_x
608 3:
609 /* rdx is zero - return ENAMETOOLONG or EFAULT */
610 movq $VM_MAXUSER_ADDRESS,%rax
611 cmpq %rax,%rsi
612 jae cpystrflt
613 4:
614 movq $ENAMETOOLONG,%rax
615 jmp cpystrflt_x
616
617 cpystrflt:
618 movq $EFAULT,%rax
619
620 cpystrflt_x:
621 /* set *lencopied and return %eax */
622 movq PCPU(CURPCB),%rcx
623 movq $0,PCB_ONFAULT(%rcx)
624
625 testq %r9,%r9
626 jz 1f
627 subq %rdx,%r8
628 movq %r8,(%r9)
629 1:
630 POP_FRAME_POINTER
631 ret
632 END(copyinstr)
633
634 /*
635 * copystr(from, to, maxlen, int *lencopied)
636 * %rdi, %rsi, %rdx, %rcx
637 */
638 ENTRY(copystr)
639 PUSH_FRAME_POINTER
640 movq %rdx,%r8 /* %r8 = maxlen */
641
642 xchgq %rdi,%rsi
643 incq %rdx
644 1:
645 decq %rdx
646 jz 4f
647 lodsb
648 stosb
649 orb %al,%al
650 jnz 1b
651
652 /* Success -- 0 byte reached */
653 decq %rdx
654 xorl %eax,%eax
655 jmp 6f
656 4:
657 /* rdx is zero -- return ENAMETOOLONG */
658 movq $ENAMETOOLONG,%rax
659
660 6:
661
662 testq %rcx,%rcx
663 jz 7f
664 /* set *lencopied and return %rax */
665 subq %rdx,%r8
666 movq %r8,(%rcx)
667 7:
668 POP_FRAME_POINTER
669 ret
670 END(copystr)
671
672 /*
673 * Handling of special amd64 registers and descriptor tables etc
674 */
675 /* void lgdt(struct region_descriptor *rdp); */
676 ENTRY(lgdt)
677 /* reload the descriptor table */
678 lgdt (%rdi)
679
680 /* flush the prefetch q */
681 jmp 1f
682 nop
683 1:
684 movl $KDSEL,%eax
685 movl %eax,%ds
686 movl %eax,%es
687 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
688 movl %eax,%gs
689 movl %eax,%ss
690
691 /* reload code selector by turning return into intersegmental return */
692 popq %rax
693 pushq $KCSEL
694 pushq %rax
695 MEXITCOUNT
696 lretq
697 END(lgdt)
698
699 /*****************************************************************************/
700 /* setjump, longjump */
701 /*****************************************************************************/
702
703 ENTRY(setjmp)
704 movq %rbx,0(%rdi) /* save rbx */
705 movq %rsp,8(%rdi) /* save rsp */
706 movq %rbp,16(%rdi) /* save rbp */
707 movq %r12,24(%rdi) /* save r12 */
708 movq %r13,32(%rdi) /* save r13 */
709 movq %r14,40(%rdi) /* save r14 */
710 movq %r15,48(%rdi) /* save r15 */
711 movq 0(%rsp),%rdx /* get rta */
712 movq %rdx,56(%rdi) /* save rip */
713 xorl %eax,%eax /* return(0); */
714 ret
715 END(setjmp)
716
717 ENTRY(longjmp)
718 movq 0(%rdi),%rbx /* restore rbx */
719 movq 8(%rdi),%rsp /* restore rsp */
720 movq 16(%rdi),%rbp /* restore rbp */
721 movq 24(%rdi),%r12 /* restore r12 */
722 movq 32(%rdi),%r13 /* restore r13 */
723 movq 40(%rdi),%r14 /* restore r14 */
724 movq 48(%rdi),%r15 /* restore r15 */
725 movq 56(%rdi),%rdx /* get rta */
726 movq %rdx,0(%rsp) /* put in return frame */
727 xorl %eax,%eax /* return(1); */
728 incl %eax
729 ret
730 END(longjmp)
731
732 /*
733 * Support for reading MSRs in the safe manner.
734 */
735 ENTRY(rdmsr_safe)
736 /* int rdmsr_safe(u_int msr, uint64_t *data) */
737 PUSH_FRAME_POINTER
738 movq PCPU(CURPCB),%r8
739 movq $msr_onfault,PCB_ONFAULT(%r8)
740 movl %edi,%ecx
741 rdmsr /* Read MSR pointed by %ecx. Returns
742 hi byte in edx, lo in %eax */
743 salq $32,%rdx /* sign-shift %rdx left */
744 movl %eax,%eax /* zero-extend %eax -> %rax */
745 orq %rdx,%rax
746 movq %rax,(%rsi)
747 xorq %rax,%rax
748 movq %rax,PCB_ONFAULT(%r8)
749 POP_FRAME_POINTER
750 ret
751
752 /*
753 * Support for writing MSRs in the safe manner.
754 */
755 ENTRY(wrmsr_safe)
756 /* int wrmsr_safe(u_int msr, uint64_t data) */
757 PUSH_FRAME_POINTER
758 movq PCPU(CURPCB),%r8
759 movq $msr_onfault,PCB_ONFAULT(%r8)
760 movl %edi,%ecx
761 movl %esi,%eax
762 sarq $32,%rsi
763 movl %esi,%edx
764 wrmsr /* Write MSR pointed by %ecx. Accepts
765 hi byte in edx, lo in %eax. */
766 xorq %rax,%rax
767 movq %rax,PCB_ONFAULT(%r8)
768 POP_FRAME_POINTER
769 ret
770
771 /*
772 * MSR operations fault handler
773 */
774 ALIGN_TEXT
775 msr_onfault:
776 movq $0,PCB_ONFAULT(%r8)
777 movl $EFAULT,%eax
778 POP_FRAME_POINTER
779 ret
780
781 /*
782 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
783 * Invalidates address space addressed by ucr3, then returns to kcr3.
784 * Done in assembler to ensure no other memory accesses happen while
785 * on ucr3.
786 */
787 ALIGN_TEXT
788 ENTRY(pmap_pti_pcid_invalidate)
789 pushfq
790 cli
791 movq %rdi,%cr3 /* to user page table */
792 movq %rsi,%cr3 /* back to kernel */
793 popfq
794 retq
795
796 /*
797 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
798 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
799 */
800 ALIGN_TEXT
801 ENTRY(pmap_pti_pcid_invlpg)
802 pushfq
803 cli
804 movq %rdi,%cr3 /* to user page table */
805 invlpg (%rdx)
806 movq %rsi,%cr3 /* back to kernel */
807 popfq
808 retq
809
810 /*
811 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
812 * vm_offset_t eva);
813 * Invalidates virtual addresses between sva and eva in address space ucr3,
814 * then returns to kcr3.
815 */
816 ALIGN_TEXT
817 ENTRY(pmap_pti_pcid_invlrng)
818 pushfq
819 cli
820 movq %rdi,%cr3 /* to user page table */
821 1: invlpg (%rdx)
822 addq $PAGE_SIZE,%rdx
823 cmpq %rdx,%rcx
824 ja 1b
825 movq %rsi,%cr3 /* back to kernel */
826 popfq
827 retq
828
829 .altmacro
830 .macro ibrs_seq_label l
831 handle_ibrs_\l:
832 .endm
833 .macro ibrs_call_label l
834 call handle_ibrs_\l
835 .endm
836 .macro ibrs_seq count
837 ll=1
838 .rept \count
839 ibrs_call_label %(ll)
840 nop
841 ibrs_seq_label %(ll)
842 addq $8,%rsp
843 ll=ll+1
844 .endr
845 .endm
846
847 /* all callers already saved %rax, %rdx, and %rcx */
848 ENTRY(handle_ibrs_entry)
849 cmpb $0,hw_ibrs_active(%rip)
850 je 1f
851 movl $MSR_IA32_SPEC_CTRL,%ecx
852 rdmsr
853 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
854 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
855 wrmsr
856 movb $1,PCPU(IBPB_SET)
857 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
858 jne 1f
859 ibrs_seq 32
860 1: ret
861 END(handle_ibrs_entry)
862
863 ENTRY(handle_ibrs_exit)
864 cmpb $0,PCPU(IBPB_SET)
865 je 1f
866 movl $MSR_IA32_SPEC_CTRL,%ecx
867 rdmsr
868 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
869 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
870 wrmsr
871 movb $0,PCPU(IBPB_SET)
872 1: ret
873 END(handle_ibrs_exit)
874
875 /* registers-neutral version, but needs stack */
876 ENTRY(handle_ibrs_exit_rs)
877 cmpb $0,PCPU(IBPB_SET)
878 je 1f
879 pushq %rax
880 pushq %rdx
881 pushq %rcx
882 movl $MSR_IA32_SPEC_CTRL,%ecx
883 rdmsr
884 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
885 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
886 wrmsr
887 popq %rcx
888 popq %rdx
889 popq %rax
890 movb $0,PCPU(IBPB_SET)
891 1: ret
892 END(handle_ibrs_exit_rs)
893
894 .noaltmacro
Cache object: 544d4a76f55ee547630688b3794f86fe
|