1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/8.4/sys/i386/i386/support.s 215513 2010-11-19 09:49:14Z kib $
30 */
31
32 #include "opt_npx.h"
33
34 #include <machine/asmacros.h>
35 #include <machine/cputypes.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38 #include <machine/specialreg.h>
39
40 #include "assym.s"
41
42 #define IDXSHIFT 10
43
44 .data
45 ALIGN_DATA
46 .globl intrcnt, eintrcnt
47 intrcnt:
48 .space INTRCNT_COUNT * 4
49 eintrcnt:
50
51 .globl intrnames, eintrnames
52 intrnames:
53 .space INTRCNT_COUNT * (MAXCOMLEN + 1)
54 eintrnames:
55
56 .text
57
58 /*
59 * bcopy family
60 * void bzero(void *buf, u_int len)
61 */
62 ENTRY(bzero)
63 pushl %edi
64 movl 8(%esp),%edi
65 movl 12(%esp),%ecx
66 xorl %eax,%eax
67 shrl $2,%ecx
68 cld
69 rep
70 stosl
71 movl 12(%esp),%ecx
72 andl $3,%ecx
73 rep
74 stosb
75 popl %edi
76 ret
77 END(bzero)
78
79 ENTRY(sse2_pagezero)
80 pushl %ebx
81 movl 8(%esp),%ecx
82 movl %ecx,%eax
83 addl $4096,%eax
84 xor %ebx,%ebx
85 1:
86 movnti %ebx,(%ecx)
87 addl $4,%ecx
88 cmpl %ecx,%eax
89 jne 1b
90 sfence
91 popl %ebx
92 ret
93 END(sse2_pagezero)
94
95 ENTRY(i686_pagezero)
96 pushl %edi
97 pushl %ebx
98
99 movl 12(%esp),%edi
100 movl $1024,%ecx
101 cld
102
103 ALIGN_TEXT
104 1:
105 xorl %eax,%eax
106 repe
107 scasl
108 jnz 2f
109
110 popl %ebx
111 popl %edi
112 ret
113
114 ALIGN_TEXT
115
116 2:
117 incl %ecx
118 subl $4,%edi
119
120 movl %ecx,%edx
121 cmpl $16,%ecx
122
123 jge 3f
124
125 movl %edi,%ebx
126 andl $0x3f,%ebx
127 shrl %ebx
128 shrl %ebx
129 movl $16,%ecx
130 subl %ebx,%ecx
131
132 3:
133 subl %ecx,%edx
134 rep
135 stosl
136
137 movl %edx,%ecx
138 testl %edx,%edx
139 jnz 1b
140
141 popl %ebx
142 popl %edi
143 ret
144 END(i686_pagezero)
145
146 /* fillw(pat, base, cnt) */
147 ENTRY(fillw)
148 pushl %edi
149 movl 8(%esp),%eax
150 movl 12(%esp),%edi
151 movl 16(%esp),%ecx
152 cld
153 rep
154 stosw
155 popl %edi
156 ret
157 END(fillw)
158
159 ENTRY(bcopyb)
160 pushl %esi
161 pushl %edi
162 movl 12(%esp),%esi
163 movl 16(%esp),%edi
164 movl 20(%esp),%ecx
165 movl %edi,%eax
166 subl %esi,%eax
167 cmpl %ecx,%eax /* overlapping && src < dst? */
168 jb 1f
169 cld /* nope, copy forwards */
170 rep
171 movsb
172 popl %edi
173 popl %esi
174 ret
175
176 ALIGN_TEXT
177 1:
178 addl %ecx,%edi /* copy backwards. */
179 addl %ecx,%esi
180 decl %edi
181 decl %esi
182 std
183 rep
184 movsb
185 popl %edi
186 popl %esi
187 cld
188 ret
189 END(bcopyb)
190
191 /*
192 * bcopy(src, dst, cnt)
193 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
194 */
195 ENTRY(bcopy)
196 pushl %esi
197 pushl %edi
198 movl 12(%esp),%esi
199 movl 16(%esp),%edi
200 movl 20(%esp),%ecx
201
202 movl %edi,%eax
203 subl %esi,%eax
204 cmpl %ecx,%eax /* overlapping && src < dst? */
205 jb 1f
206
207 shrl $2,%ecx /* copy by 32-bit words */
208 cld /* nope, copy forwards */
209 rep
210 movsl
211 movl 20(%esp),%ecx
212 andl $3,%ecx /* any bytes left? */
213 rep
214 movsb
215 popl %edi
216 popl %esi
217 ret
218
219 ALIGN_TEXT
220 1:
221 addl %ecx,%edi /* copy backwards */
222 addl %ecx,%esi
223 decl %edi
224 decl %esi
225 andl $3,%ecx /* any fractional bytes? */
226 std
227 rep
228 movsb
229 movl 20(%esp),%ecx /* copy remainder by 32-bit words */
230 shrl $2,%ecx
231 subl $3,%esi
232 subl $3,%edi
233 rep
234 movsl
235 popl %edi
236 popl %esi
237 cld
238 ret
239 END(bcopy)
240
241 /*
242 * Note: memcpy does not support overlapping copies
243 */
244 ENTRY(memcpy)
245 pushl %edi
246 pushl %esi
247 movl 12(%esp),%edi
248 movl 16(%esp),%esi
249 movl 20(%esp),%ecx
250 movl %edi,%eax
251 shrl $2,%ecx /* copy by 32-bit words */
252 cld /* nope, copy forwards */
253 rep
254 movsl
255 movl 20(%esp),%ecx
256 andl $3,%ecx /* any bytes left? */
257 rep
258 movsb
259 popl %esi
260 popl %edi
261 ret
262 END(memcpy)
263
264 /*****************************************************************************/
265 /* copyout and fubyte family */
266 /*****************************************************************************/
267 /*
268 * Access user memory from inside the kernel. These routines and possibly
269 * the math- and DOS emulators should be the only places that do this.
270 *
271 * We have to access the memory with user's permissions, so use a segment
272 * selector with RPL 3. For writes to user space we have to additionally
273 * check the PTE for write permission, because the 386 does not check
274 * write permissions when we are executing with EPL 0. The 486 does check
275 * this if the WP bit is set in CR0, so we can use a simpler version here.
276 *
277 * These routines set curpcb->onfault for the time they execute. When a
278 * protection violation occurs inside the functions, the trap handler
279 * returns to *curpcb->onfault instead of the function.
280 */
281
282 /*
283 * copyout(from_kernel, to_user, len) - MP SAFE
284 */
285 ENTRY(copyout)
286 movl PCPU(CURPCB),%eax
287 movl $copyout_fault,PCB_ONFAULT(%eax)
288 pushl %esi
289 pushl %edi
290 pushl %ebx
291 movl 16(%esp),%esi
292 movl 20(%esp),%edi
293 movl 24(%esp),%ebx
294 testl %ebx,%ebx /* anything to do? */
295 jz done_copyout
296
297 /*
298 * Check explicitly for non-user addresses. If 486 write protection
299 * is being used, this check is essential because we are in kernel
300 * mode so the h/w does not provide any protection against writing
301 * kernel addresses.
302 */
303
304 /*
305 * First, prevent address wrapping.
306 */
307 movl %edi,%eax
308 addl %ebx,%eax
309 jc copyout_fault
310 /*
311 * XXX STOP USING VM_MAXUSER_ADDRESS.
312 * It is an end address, not a max, so every time it is used correctly it
313 * looks like there is an off by one error, and of course it caused an off
314 * by one error in several places.
315 */
316 cmpl $VM_MAXUSER_ADDRESS,%eax
317 ja copyout_fault
318
319 /* bcopy(%esi, %edi, %ebx) */
320 movl %ebx,%ecx
321
322 shrl $2,%ecx
323 cld
324 rep
325 movsl
326 movb %bl,%cl
327 andb $3,%cl
328 rep
329 movsb
330
331 done_copyout:
332 popl %ebx
333 popl %edi
334 popl %esi
335 xorl %eax,%eax
336 movl PCPU(CURPCB),%edx
337 movl %eax,PCB_ONFAULT(%edx)
338 ret
339 END(copyout)
340
341 ALIGN_TEXT
342 copyout_fault:
343 popl %ebx
344 popl %edi
345 popl %esi
346 movl PCPU(CURPCB),%edx
347 movl $0,PCB_ONFAULT(%edx)
348 movl $EFAULT,%eax
349 ret
350
351 /*
352 * copyin(from_user, to_kernel, len) - MP SAFE
353 */
354 ENTRY(copyin)
355 movl PCPU(CURPCB),%eax
356 movl $copyin_fault,PCB_ONFAULT(%eax)
357 pushl %esi
358 pushl %edi
359 movl 12(%esp),%esi /* caddr_t from */
360 movl 16(%esp),%edi /* caddr_t to */
361 movl 20(%esp),%ecx /* size_t len */
362
363 /*
364 * make sure address is valid
365 */
366 movl %esi,%edx
367 addl %ecx,%edx
368 jc copyin_fault
369 cmpl $VM_MAXUSER_ADDRESS,%edx
370 ja copyin_fault
371
372 movb %cl,%al
373 shrl $2,%ecx /* copy longword-wise */
374 cld
375 rep
376 movsl
377 movb %al,%cl
378 andb $3,%cl /* copy remaining bytes */
379 rep
380 movsb
381
382 popl %edi
383 popl %esi
384 xorl %eax,%eax
385 movl PCPU(CURPCB),%edx
386 movl %eax,PCB_ONFAULT(%edx)
387 ret
388 END(copyin)
389
390 ALIGN_TEXT
391 copyin_fault:
392 popl %edi
393 popl %esi
394 movl PCPU(CURPCB),%edx
395 movl $0,PCB_ONFAULT(%edx)
396 movl $EFAULT,%eax
397 ret
398
399 /*
400 * casuword. Compare and set user word. Returns -1 or the current value.
401 */
402
403 ALTENTRY(casuword32)
404 ENTRY(casuword)
405 movl PCPU(CURPCB),%ecx
406 movl $fusufault,PCB_ONFAULT(%ecx)
407 movl 4(%esp),%edx /* dst */
408 movl 8(%esp),%eax /* old */
409 movl 12(%esp),%ecx /* new */
410
411 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
412 ja fusufault
413
414 #ifdef SMP
415 lock
416 #endif
417 cmpxchgl %ecx,(%edx) /* Compare and set. */
418
419 /*
420 * The old value is in %eax. If the store succeeded it will be the
421 * value we expected (old) from before the store, otherwise it will
422 * be the current value.
423 */
424
425 movl PCPU(CURPCB),%ecx
426 movl $fusufault,PCB_ONFAULT(%ecx)
427 movl $0,PCB_ONFAULT(%ecx)
428 ret
429 END(casuword32)
430 END(casuword)
431
432 /*
433 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
434 * memory. All these functions are MPSAFE.
435 */
436
437 ALTENTRY(fuword32)
438 ENTRY(fuword)
439 movl PCPU(CURPCB),%ecx
440 movl $fusufault,PCB_ONFAULT(%ecx)
441 movl 4(%esp),%edx /* from */
442
443 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
444 ja fusufault
445
446 movl (%edx),%eax
447 movl $0,PCB_ONFAULT(%ecx)
448 ret
449 END(fuword32)
450 END(fuword)
451
452 /*
453 * fuswintr() and suswintr() are specialized variants of fuword16() and
454 * suword16(), respectively. They are called from the profiling code,
455 * potentially at interrupt time. If they fail, that's okay; good things
456 * will happen later. They always fail for now, until the trap code is
457 * able to deal with this.
458 */
459 ALTENTRY(suswintr)
460 ENTRY(fuswintr)
461 movl $-1,%eax
462 ret
463 END(suswintr)
464 END(fuswintr)
465
466 ENTRY(fuword16)
467 movl PCPU(CURPCB),%ecx
468 movl $fusufault,PCB_ONFAULT(%ecx)
469 movl 4(%esp),%edx
470
471 cmpl $VM_MAXUSER_ADDRESS-2,%edx
472 ja fusufault
473
474 movzwl (%edx),%eax
475 movl $0,PCB_ONFAULT(%ecx)
476 ret
477 END(fuword16)
478
479 ENTRY(fubyte)
480 movl PCPU(CURPCB),%ecx
481 movl $fusufault,PCB_ONFAULT(%ecx)
482 movl 4(%esp),%edx
483
484 cmpl $VM_MAXUSER_ADDRESS-1,%edx
485 ja fusufault
486
487 movzbl (%edx),%eax
488 movl $0,PCB_ONFAULT(%ecx)
489 ret
490 END(fubyte)
491
492 ALIGN_TEXT
493 fusufault:
494 movl PCPU(CURPCB),%ecx
495 xorl %eax,%eax
496 movl %eax,PCB_ONFAULT(%ecx)
497 decl %eax
498 ret
499
500 /*
501 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
502 * All these functions are MPSAFE.
503 */
504
505 ALTENTRY(suword32)
506 ENTRY(suword)
507 movl PCPU(CURPCB),%ecx
508 movl $fusufault,PCB_ONFAULT(%ecx)
509 movl 4(%esp),%edx
510
511 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
512 ja fusufault
513
514 movl 8(%esp),%eax
515 movl %eax,(%edx)
516 xorl %eax,%eax
517 movl PCPU(CURPCB),%ecx
518 movl %eax,PCB_ONFAULT(%ecx)
519 ret
520 END(suword32)
521 END(suword)
522
523 ENTRY(suword16)
524 movl PCPU(CURPCB),%ecx
525 movl $fusufault,PCB_ONFAULT(%ecx)
526 movl 4(%esp),%edx
527
528 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
529 ja fusufault
530
531 movw 8(%esp),%ax
532 movw %ax,(%edx)
533 xorl %eax,%eax
534 movl PCPU(CURPCB),%ecx /* restore trashed register */
535 movl %eax,PCB_ONFAULT(%ecx)
536 ret
537 END(suword16)
538
539 ENTRY(subyte)
540 movl PCPU(CURPCB),%ecx
541 movl $fusufault,PCB_ONFAULT(%ecx)
542 movl 4(%esp),%edx
543
544 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
545 ja fusufault
546
547 movb 8(%esp),%al
548 movb %al,(%edx)
549 xorl %eax,%eax
550 movl PCPU(CURPCB),%ecx /* restore trashed register */
551 movl %eax,PCB_ONFAULT(%ecx)
552 ret
553 END(subyte)
554
555 /*
556 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
557 *
558 * copy a string from from to to, stop when a 0 character is reached.
559 * return ENAMETOOLONG if string is longer than maxlen, and
560 * EFAULT on protection violations. If lencopied is non-zero,
561 * return the actual length in *lencopied.
562 */
563 ENTRY(copyinstr)
564 pushl %esi
565 pushl %edi
566 movl PCPU(CURPCB),%ecx
567 movl $cpystrflt,PCB_ONFAULT(%ecx)
568
569 movl 12(%esp),%esi /* %esi = from */
570 movl 16(%esp),%edi /* %edi = to */
571 movl 20(%esp),%edx /* %edx = maxlen */
572
573 movl $VM_MAXUSER_ADDRESS,%eax
574
575 /* make sure 'from' is within bounds */
576 subl %esi,%eax
577 jbe cpystrflt
578
579 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
580 cmpl %edx,%eax
581 jae 1f
582 movl %eax,%edx
583 movl %eax,20(%esp)
584 1:
585 incl %edx
586 cld
587
588 2:
589 decl %edx
590 jz 3f
591
592 lodsb
593 stosb
594 orb %al,%al
595 jnz 2b
596
597 /* Success -- 0 byte reached */
598 decl %edx
599 xorl %eax,%eax
600 jmp cpystrflt_x
601 3:
602 /* edx is zero - return ENAMETOOLONG or EFAULT */
603 cmpl $VM_MAXUSER_ADDRESS,%esi
604 jae cpystrflt
605 4:
606 movl $ENAMETOOLONG,%eax
607 jmp cpystrflt_x
608
609 cpystrflt:
610 movl $EFAULT,%eax
611
612 cpystrflt_x:
613 /* set *lencopied and return %eax */
614 movl PCPU(CURPCB),%ecx
615 movl $0,PCB_ONFAULT(%ecx)
616 movl 20(%esp),%ecx
617 subl %edx,%ecx
618 movl 24(%esp),%edx
619 testl %edx,%edx
620 jz 1f
621 movl %ecx,(%edx)
622 1:
623 popl %edi
624 popl %esi
625 ret
626 END(copyinstr)
627
628 /*
629 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
630 */
631 ENTRY(copystr)
632 pushl %esi
633 pushl %edi
634
635 movl 12(%esp),%esi /* %esi = from */
636 movl 16(%esp),%edi /* %edi = to */
637 movl 20(%esp),%edx /* %edx = maxlen */
638 incl %edx
639 cld
640 1:
641 decl %edx
642 jz 4f
643 lodsb
644 stosb
645 orb %al,%al
646 jnz 1b
647
648 /* Success -- 0 byte reached */
649 decl %edx
650 xorl %eax,%eax
651 jmp 6f
652 4:
653 /* edx is zero -- return ENAMETOOLONG */
654 movl $ENAMETOOLONG,%eax
655
656 6:
657 /* set *lencopied and return %eax */
658 movl 20(%esp),%ecx
659 subl %edx,%ecx
660 movl 24(%esp),%edx
661 testl %edx,%edx
662 jz 7f
663 movl %ecx,(%edx)
664 7:
665 popl %edi
666 popl %esi
667 ret
668 END(copystr)
669
670 ENTRY(bcmp)
671 pushl %edi
672 pushl %esi
673 movl 12(%esp),%edi
674 movl 16(%esp),%esi
675 movl 20(%esp),%edx
676
677 movl %edx,%ecx
678 shrl $2,%ecx
679 cld /* compare forwards */
680 repe
681 cmpsl
682 jne 1f
683
684 movl %edx,%ecx
685 andl $3,%ecx
686 repe
687 cmpsb
688 1:
689 setne %al
690 movsbl %al,%eax
691 popl %esi
692 popl %edi
693 ret
694 END(bcmp)
695
696 /*
697 * Handling of special 386 registers and descriptor tables etc
698 */
699 /* void lgdt(struct region_descriptor *rdp); */
700 ENTRY(lgdt)
701 #ifndef XEN
702 /* reload the descriptor table */
703 movl 4(%esp),%eax
704 lgdt (%eax)
705 #endif
706
707 /* flush the prefetch q */
708 jmp 1f
709 nop
710 1:
711 /* reload "stale" selectors */
712 movl $KDSEL,%eax
713 movl %eax,%ds
714 movl %eax,%es
715 movl %eax,%gs
716 movl %eax,%ss
717 movl $KPSEL,%eax
718 movl %eax,%fs
719
720 /* reload code selector by turning return into intersegmental return */
721 movl (%esp),%eax
722 pushl %eax
723 movl $KCSEL,4(%esp)
724 MEXITCOUNT
725 lret
726 END(lgdt)
727
728 /* ssdtosd(*ssdp,*sdp) */
729 ENTRY(ssdtosd)
730 pushl %ebx
731 movl 8(%esp),%ecx
732 movl 8(%ecx),%ebx
733 shll $16,%ebx
734 movl (%ecx),%edx
735 roll $16,%edx
736 movb %dh,%bl
737 movb %dl,%bh
738 rorl $8,%ebx
739 movl 4(%ecx),%eax
740 movw %ax,%dx
741 andl $0xf0000,%eax
742 orl %eax,%ebx
743 movl 12(%esp),%ecx
744 movl %edx,(%ecx)
745 movl %ebx,4(%ecx)
746 popl %ebx
747 ret
748 END(ssdtosd)
749
750 /* void reset_dbregs() */
751 ENTRY(reset_dbregs)
752 movl $0,%eax
753 movl %eax,%dr7 /* disable all breapoints first */
754 movl %eax,%dr0
755 movl %eax,%dr1
756 movl %eax,%dr2
757 movl %eax,%dr3
758 movl %eax,%dr6
759 ret
760 END(reset_dbregs)
761
762 /*****************************************************************************/
763 /* setjump, longjump */
764 /*****************************************************************************/
765
766 ENTRY(setjmp)
767 movl 4(%esp),%eax
768 movl %ebx,(%eax) /* save ebx */
769 movl %esp,4(%eax) /* save esp */
770 movl %ebp,8(%eax) /* save ebp */
771 movl %esi,12(%eax) /* save esi */
772 movl %edi,16(%eax) /* save edi */
773 movl (%esp),%edx /* get rta */
774 movl %edx,20(%eax) /* save eip */
775 xorl %eax,%eax /* return(0); */
776 ret
777 END(setjmp)
778
779 ENTRY(longjmp)
780 movl 4(%esp),%eax
781 movl (%eax),%ebx /* restore ebx */
782 movl 4(%eax),%esp /* restore esp */
783 movl 8(%eax),%ebp /* restore ebp */
784 movl 12(%eax),%esi /* restore esi */
785 movl 16(%eax),%edi /* restore edi */
786 movl 20(%eax),%edx /* get rta */
787 movl %edx,(%esp) /* put in return frame */
788 xorl %eax,%eax /* return(1); */
789 incl %eax
790 ret
791 END(longjmp)
792
793 /*
794 * Support for BB-profiling (gcc -a). The kernbb program will extract
795 * the data from the kernel.
796 */
797
798 .data
799 ALIGN_DATA
800 .globl bbhead
801 bbhead:
802 .long 0
803
804 .text
805 NON_GPROF_ENTRY(__bb_init_func)
806 movl 4(%esp),%eax
807 movl $1,(%eax)
808 movl bbhead,%edx
809 movl %edx,16(%eax)
810 movl %eax,bbhead
811 NON_GPROF_RET
812
813 /*
814 * Support for reading MSRs in the safe manner.
815 */
816 ENTRY(rdmsr_safe)
817 /* int rdmsr_safe(u_int msr, uint64_t *data) */
818 movl PCPU(CURPCB),%ecx
819 movl $msr_onfault,PCB_ONFAULT(%ecx)
820
821 movl 4(%esp),%ecx
822 rdmsr
823 movl 8(%esp),%ecx
824 movl %eax,(%ecx)
825 movl %edx,4(%ecx)
826 xorl %eax,%eax
827
828 movl PCPU(CURPCB),%ecx
829 movl %eax,PCB_ONFAULT(%ecx)
830
831 ret
832
833 /*
834 * Support for writing MSRs in the safe manner.
835 */
836 ENTRY(wrmsr_safe)
837 /* int wrmsr_safe(u_int msr, uint64_t data) */
838 movl PCPU(CURPCB),%ecx
839 movl $msr_onfault,PCB_ONFAULT(%ecx)
840
841 movl 4(%esp),%ecx
842 movl 8(%esp),%eax
843 movl 12(%esp),%edx
844 wrmsr
845 xorl %eax,%eax
846
847 movl PCPU(CURPCB),%ecx
848 movl %eax,PCB_ONFAULT(%ecx)
849
850 ret
851
852 /*
853 * MSR operations fault handler
854 */
855 ALIGN_TEXT
856 msr_onfault:
857 movl PCPU(CURPCB),%ecx
858 movl $0,PCB_ONFAULT(%ecx)
859 movl $EFAULT,%eax
860 ret
Cache object: 1b8fdd1a76adb022ad404943738f60d2
|