1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/10.2/sys/i386/i386/support.s 274648 2014-11-18 12:53:32Z kib $
30 */
31
32 #include "opt_npx.h"
33
34 #include <machine/asmacros.h>
35 #include <machine/cputypes.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38 #include <machine/specialreg.h>
39
40 #include "assym.s"
41
42 #define IDXSHIFT 10
43
44 .text
45
46 /*
47 * bcopy family
48 * void bzero(void *buf, u_int len)
49 */
50 ENTRY(bzero)
51 pushl %edi
52 movl 8(%esp),%edi
53 movl 12(%esp),%ecx
54 xorl %eax,%eax
55 shrl $2,%ecx
56 cld
57 rep
58 stosl
59 movl 12(%esp),%ecx
60 andl $3,%ecx
61 rep
62 stosb
63 popl %edi
64 ret
65 END(bzero)
66
67 ENTRY(sse2_pagezero)
68 pushl %ebx
69 movl 8(%esp),%ecx
70 movl %ecx,%eax
71 addl $4096,%eax
72 xor %ebx,%ebx
73 1:
74 movnti %ebx,(%ecx)
75 addl $4,%ecx
76 cmpl %ecx,%eax
77 jne 1b
78 sfence
79 popl %ebx
80 ret
81 END(sse2_pagezero)
82
83 ENTRY(i686_pagezero)
84 pushl %edi
85 pushl %ebx
86
87 movl 12(%esp),%edi
88 movl $1024,%ecx
89 cld
90
91 ALIGN_TEXT
92 1:
93 xorl %eax,%eax
94 repe
95 scasl
96 jnz 2f
97
98 popl %ebx
99 popl %edi
100 ret
101
102 ALIGN_TEXT
103
104 2:
105 incl %ecx
106 subl $4,%edi
107
108 movl %ecx,%edx
109 cmpl $16,%ecx
110
111 jge 3f
112
113 movl %edi,%ebx
114 andl $0x3f,%ebx
115 shrl %ebx
116 shrl %ebx
117 movl $16,%ecx
118 subl %ebx,%ecx
119
120 3:
121 subl %ecx,%edx
122 rep
123 stosl
124
125 movl %edx,%ecx
126 testl %edx,%edx
127 jnz 1b
128
129 popl %ebx
130 popl %edi
131 ret
132 END(i686_pagezero)
133
134 /* fillw(pat, base, cnt) */
135 ENTRY(fillw)
136 pushl %edi
137 movl 8(%esp),%eax
138 movl 12(%esp),%edi
139 movl 16(%esp),%ecx
140 cld
141 rep
142 stosw
143 popl %edi
144 ret
145 END(fillw)
146
147 ENTRY(bcopyb)
148 pushl %esi
149 pushl %edi
150 movl 12(%esp),%esi
151 movl 16(%esp),%edi
152 movl 20(%esp),%ecx
153 movl %edi,%eax
154 subl %esi,%eax
155 cmpl %ecx,%eax /* overlapping && src < dst? */
156 jb 1f
157 cld /* nope, copy forwards */
158 rep
159 movsb
160 popl %edi
161 popl %esi
162 ret
163
164 ALIGN_TEXT
165 1:
166 addl %ecx,%edi /* copy backwards. */
167 addl %ecx,%esi
168 decl %edi
169 decl %esi
170 std
171 rep
172 movsb
173 popl %edi
174 popl %esi
175 cld
176 ret
177 END(bcopyb)
178
179 /*
180 * bcopy(src, dst, cnt)
181 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
182 */
183 ENTRY(bcopy)
184 pushl %ebp
185 movl %esp,%ebp
186 pushl %esi
187 pushl %edi
188 movl 8(%ebp),%esi
189 movl 12(%ebp),%edi
190 movl 16(%ebp),%ecx
191
192 movl %edi,%eax
193 subl %esi,%eax
194 cmpl %ecx,%eax /* overlapping && src < dst? */
195 jb 1f
196
197 shrl $2,%ecx /* copy by 32-bit words */
198 cld /* nope, copy forwards */
199 rep
200 movsl
201 movl 16(%ebp),%ecx
202 andl $3,%ecx /* any bytes left? */
203 rep
204 movsb
205 popl %edi
206 popl %esi
207 popl %ebp
208 ret
209
210 ALIGN_TEXT
211 1:
212 addl %ecx,%edi /* copy backwards */
213 addl %ecx,%esi
214 decl %edi
215 decl %esi
216 andl $3,%ecx /* any fractional bytes? */
217 std
218 rep
219 movsb
220 movl 16(%ebp),%ecx /* copy remainder by 32-bit words */
221 shrl $2,%ecx
222 subl $3,%esi
223 subl $3,%edi
224 rep
225 movsl
226 popl %edi
227 popl %esi
228 cld
229 popl %ebp
230 ret
231 END(bcopy)
232
233 /*
234 * Note: memcpy does not support overlapping copies
235 */
236 ENTRY(memcpy)
237 pushl %edi
238 pushl %esi
239 movl 12(%esp),%edi
240 movl 16(%esp),%esi
241 movl 20(%esp),%ecx
242 movl %edi,%eax
243 shrl $2,%ecx /* copy by 32-bit words */
244 cld /* nope, copy forwards */
245 rep
246 movsl
247 movl 20(%esp),%ecx
248 andl $3,%ecx /* any bytes left? */
249 rep
250 movsb
251 popl %esi
252 popl %edi
253 ret
254 END(memcpy)
255
256 /*****************************************************************************/
257 /* copyout and fubyte family */
258 /*****************************************************************************/
259 /*
260 * Access user memory from inside the kernel. These routines and possibly
261 * the math- and DOS emulators should be the only places that do this.
262 *
263 * We have to access the memory with user's permissions, so use a segment
264 * selector with RPL 3. For writes to user space we have to additionally
265 * check the PTE for write permission, because the 386 does not check
266 * write permissions when we are executing with EPL 0. The 486 does check
267 * this if the WP bit is set in CR0, so we can use a simpler version here.
268 *
269 * These routines set curpcb->pcb_onfault for the time they execute. When a
270 * protection violation occurs inside the functions, the trap handler
271 * returns to *curpcb->pcb_onfault instead of the function.
272 */
273
274 /*
275 * copyout(from_kernel, to_user, len) - MP SAFE
276 */
277 ENTRY(copyout)
278 movl PCPU(CURPCB),%eax
279 movl $copyout_fault,PCB_ONFAULT(%eax)
280 pushl %esi
281 pushl %edi
282 pushl %ebx
283 movl 16(%esp),%esi
284 movl 20(%esp),%edi
285 movl 24(%esp),%ebx
286 testl %ebx,%ebx /* anything to do? */
287 jz done_copyout
288
289 /*
290 * Check explicitly for non-user addresses. If 486 write protection
291 * is being used, this check is essential because we are in kernel
292 * mode so the h/w does not provide any protection against writing
293 * kernel addresses.
294 */
295
296 /*
297 * First, prevent address wrapping.
298 */
299 movl %edi,%eax
300 addl %ebx,%eax
301 jc copyout_fault
302 /*
303 * XXX STOP USING VM_MAXUSER_ADDRESS.
304 * It is an end address, not a max, so every time it is used correctly it
305 * looks like there is an off by one error, and of course it caused an off
306 * by one error in several places.
307 */
308 cmpl $VM_MAXUSER_ADDRESS,%eax
309 ja copyout_fault
310
311 /* bcopy(%esi, %edi, %ebx) */
312 movl %ebx,%ecx
313
314 shrl $2,%ecx
315 cld
316 rep
317 movsl
318 movb %bl,%cl
319 andb $3,%cl
320 rep
321 movsb
322
323 done_copyout:
324 popl %ebx
325 popl %edi
326 popl %esi
327 xorl %eax,%eax
328 movl PCPU(CURPCB),%edx
329 movl %eax,PCB_ONFAULT(%edx)
330 ret
331 END(copyout)
332
333 ALIGN_TEXT
334 copyout_fault:
335 popl %ebx
336 popl %edi
337 popl %esi
338 movl PCPU(CURPCB),%edx
339 movl $0,PCB_ONFAULT(%edx)
340 movl $EFAULT,%eax
341 ret
342
343 /*
344 * copyin(from_user, to_kernel, len) - MP SAFE
345 */
346 ENTRY(copyin)
347 movl PCPU(CURPCB),%eax
348 movl $copyin_fault,PCB_ONFAULT(%eax)
349 pushl %esi
350 pushl %edi
351 movl 12(%esp),%esi /* caddr_t from */
352 movl 16(%esp),%edi /* caddr_t to */
353 movl 20(%esp),%ecx /* size_t len */
354
355 /*
356 * make sure address is valid
357 */
358 movl %esi,%edx
359 addl %ecx,%edx
360 jc copyin_fault
361 cmpl $VM_MAXUSER_ADDRESS,%edx
362 ja copyin_fault
363
364 movb %cl,%al
365 shrl $2,%ecx /* copy longword-wise */
366 cld
367 rep
368 movsl
369 movb %al,%cl
370 andb $3,%cl /* copy remaining bytes */
371 rep
372 movsb
373
374 popl %edi
375 popl %esi
376 xorl %eax,%eax
377 movl PCPU(CURPCB),%edx
378 movl %eax,PCB_ONFAULT(%edx)
379 ret
380 END(copyin)
381
382 ALIGN_TEXT
383 copyin_fault:
384 popl %edi
385 popl %esi
386 movl PCPU(CURPCB),%edx
387 movl $0,PCB_ONFAULT(%edx)
388 movl $EFAULT,%eax
389 ret
390
391 /*
392 * casueword. Compare and set user word. Returns -1 on fault,
393 * 0 on non-faulting access. The current value is in *oldp.
394 */
395 ALTENTRY(casueword32)
396 ENTRY(casueword)
397 movl PCPU(CURPCB),%ecx
398 movl $fusufault,PCB_ONFAULT(%ecx)
399 movl 4(%esp),%edx /* dst */
400 movl 8(%esp),%eax /* old */
401 movl 16(%esp),%ecx /* new */
402
403 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
404 ja fusufault
405
406 #ifdef SMP
407 lock
408 #endif
409 cmpxchgl %ecx,(%edx) /* Compare and set. */
410
411 /*
412 * The old value is in %eax. If the store succeeded it will be the
413 * value we expected (old) from before the store, otherwise it will
414 * be the current value.
415 */
416
417 movl PCPU(CURPCB),%ecx
418 movl $0,PCB_ONFAULT(%ecx)
419 movl 12(%esp),%edx /* oldp */
420 movl %eax,(%edx)
421 xorl %eax,%eax
422 ret
423 END(casueword32)
424 END(casueword)
425
426 /*
427 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
428 * memory.
429 */
430
431 ALTENTRY(fueword32)
432 ENTRY(fueword)
433 movl PCPU(CURPCB),%ecx
434 movl $fusufault,PCB_ONFAULT(%ecx)
435 movl 4(%esp),%edx /* from */
436
437 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
438 ja fusufault
439
440 movl (%edx),%eax
441 movl $0,PCB_ONFAULT(%ecx)
442 movl 8(%esp),%edx
443 movl %eax,(%edx)
444 xorl %eax,%eax
445 ret
446 END(fueword32)
447 END(fueword)
448
449 /*
450 * fuswintr() and suswintr() are specialized variants of fuword16() and
451 * suword16(), respectively. They are called from the profiling code,
452 * potentially at interrupt time. If they fail, that's okay; good things
453 * will happen later. They always fail for now, until the trap code is
454 * able to deal with this.
455 */
456 ALTENTRY(suswintr)
457 ENTRY(fuswintr)
458 movl $-1,%eax
459 ret
460 END(suswintr)
461 END(fuswintr)
462
463 ENTRY(fuword16)
464 movl PCPU(CURPCB),%ecx
465 movl $fusufault,PCB_ONFAULT(%ecx)
466 movl 4(%esp),%edx
467
468 cmpl $VM_MAXUSER_ADDRESS-2,%edx
469 ja fusufault
470
471 movzwl (%edx),%eax
472 movl $0,PCB_ONFAULT(%ecx)
473 ret
474 END(fuword16)
475
476 ENTRY(fubyte)
477 movl PCPU(CURPCB),%ecx
478 movl $fusufault,PCB_ONFAULT(%ecx)
479 movl 4(%esp),%edx
480
481 cmpl $VM_MAXUSER_ADDRESS-1,%edx
482 ja fusufault
483
484 movzbl (%edx),%eax
485 movl $0,PCB_ONFAULT(%ecx)
486 ret
487 END(fubyte)
488
489 ALIGN_TEXT
490 fusufault:
491 movl PCPU(CURPCB),%ecx
492 xorl %eax,%eax
493 movl %eax,PCB_ONFAULT(%ecx)
494 decl %eax
495 ret
496
497 /*
498 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
499 * All these functions are MPSAFE.
500 */
501
502 ALTENTRY(suword32)
503 ENTRY(suword)
504 movl PCPU(CURPCB),%ecx
505 movl $fusufault,PCB_ONFAULT(%ecx)
506 movl 4(%esp),%edx
507
508 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
509 ja fusufault
510
511 movl 8(%esp),%eax
512 movl %eax,(%edx)
513 xorl %eax,%eax
514 movl PCPU(CURPCB),%ecx
515 movl %eax,PCB_ONFAULT(%ecx)
516 ret
517 END(suword32)
518 END(suword)
519
520 ENTRY(suword16)
521 movl PCPU(CURPCB),%ecx
522 movl $fusufault,PCB_ONFAULT(%ecx)
523 movl 4(%esp),%edx
524
525 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
526 ja fusufault
527
528 movw 8(%esp),%ax
529 movw %ax,(%edx)
530 xorl %eax,%eax
531 movl PCPU(CURPCB),%ecx /* restore trashed register */
532 movl %eax,PCB_ONFAULT(%ecx)
533 ret
534 END(suword16)
535
536 ENTRY(subyte)
537 movl PCPU(CURPCB),%ecx
538 movl $fusufault,PCB_ONFAULT(%ecx)
539 movl 4(%esp),%edx
540
541 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
542 ja fusufault
543
544 movb 8(%esp),%al
545 movb %al,(%edx)
546 xorl %eax,%eax
547 movl PCPU(CURPCB),%ecx /* restore trashed register */
548 movl %eax,PCB_ONFAULT(%ecx)
549 ret
550 END(subyte)
551
552 /*
553 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
554 *
555 * copy a string from from to to, stop when a 0 character is reached.
556 * return ENAMETOOLONG if string is longer than maxlen, and
557 * EFAULT on protection violations. If lencopied is non-zero,
558 * return the actual length in *lencopied.
559 */
560 ENTRY(copyinstr)
561 pushl %esi
562 pushl %edi
563 movl PCPU(CURPCB),%ecx
564 movl $cpystrflt,PCB_ONFAULT(%ecx)
565
566 movl 12(%esp),%esi /* %esi = from */
567 movl 16(%esp),%edi /* %edi = to */
568 movl 20(%esp),%edx /* %edx = maxlen */
569
570 movl $VM_MAXUSER_ADDRESS,%eax
571
572 /* make sure 'from' is within bounds */
573 subl %esi,%eax
574 jbe cpystrflt
575
576 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
577 cmpl %edx,%eax
578 jae 1f
579 movl %eax,%edx
580 movl %eax,20(%esp)
581 1:
582 incl %edx
583 cld
584
585 2:
586 decl %edx
587 jz 3f
588
589 lodsb
590 stosb
591 orb %al,%al
592 jnz 2b
593
594 /* Success -- 0 byte reached */
595 decl %edx
596 xorl %eax,%eax
597 jmp cpystrflt_x
598 3:
599 /* edx is zero - return ENAMETOOLONG or EFAULT */
600 cmpl $VM_MAXUSER_ADDRESS,%esi
601 jae cpystrflt
602 4:
603 movl $ENAMETOOLONG,%eax
604 jmp cpystrflt_x
605
606 cpystrflt:
607 movl $EFAULT,%eax
608
609 cpystrflt_x:
610 /* set *lencopied and return %eax */
611 movl PCPU(CURPCB),%ecx
612 movl $0,PCB_ONFAULT(%ecx)
613 movl 20(%esp),%ecx
614 subl %edx,%ecx
615 movl 24(%esp),%edx
616 testl %edx,%edx
617 jz 1f
618 movl %ecx,(%edx)
619 1:
620 popl %edi
621 popl %esi
622 ret
623 END(copyinstr)
624
625 /*
626 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
627 */
628 ENTRY(copystr)
629 pushl %esi
630 pushl %edi
631
632 movl 12(%esp),%esi /* %esi = from */
633 movl 16(%esp),%edi /* %edi = to */
634 movl 20(%esp),%edx /* %edx = maxlen */
635 incl %edx
636 cld
637 1:
638 decl %edx
639 jz 4f
640 lodsb
641 stosb
642 orb %al,%al
643 jnz 1b
644
645 /* Success -- 0 byte reached */
646 decl %edx
647 xorl %eax,%eax
648 jmp 6f
649 4:
650 /* edx is zero -- return ENAMETOOLONG */
651 movl $ENAMETOOLONG,%eax
652
653 6:
654 /* set *lencopied and return %eax */
655 movl 20(%esp),%ecx
656 subl %edx,%ecx
657 movl 24(%esp),%edx
658 testl %edx,%edx
659 jz 7f
660 movl %ecx,(%edx)
661 7:
662 popl %edi
663 popl %esi
664 ret
665 END(copystr)
666
667 ENTRY(bcmp)
668 pushl %edi
669 pushl %esi
670 movl 12(%esp),%edi
671 movl 16(%esp),%esi
672 movl 20(%esp),%edx
673
674 movl %edx,%ecx
675 shrl $2,%ecx
676 cld /* compare forwards */
677 repe
678 cmpsl
679 jne 1f
680
681 movl %edx,%ecx
682 andl $3,%ecx
683 repe
684 cmpsb
685 1:
686 setne %al
687 movsbl %al,%eax
688 popl %esi
689 popl %edi
690 ret
691 END(bcmp)
692
693 /*
694 * Handling of special 386 registers and descriptor tables etc
695 */
696 /* void lgdt(struct region_descriptor *rdp); */
697 ENTRY(lgdt)
698 #ifndef XEN
699 /* reload the descriptor table */
700 movl 4(%esp),%eax
701 lgdt (%eax)
702 #endif
703
704 /* flush the prefetch q */
705 jmp 1f
706 nop
707 1:
708 /* reload "stale" selectors */
709 movl $KDSEL,%eax
710 movl %eax,%ds
711 movl %eax,%es
712 movl %eax,%gs
713 movl %eax,%ss
714 movl $KPSEL,%eax
715 movl %eax,%fs
716
717 /* reload code selector by turning return into intersegmental return */
718 movl (%esp),%eax
719 pushl %eax
720 movl $KCSEL,4(%esp)
721 MEXITCOUNT
722 lret
723 END(lgdt)
724
725 /* ssdtosd(*ssdp,*sdp) */
726 ENTRY(ssdtosd)
727 pushl %ebx
728 movl 8(%esp),%ecx
729 movl 8(%ecx),%ebx
730 shll $16,%ebx
731 movl (%ecx),%edx
732 roll $16,%edx
733 movb %dh,%bl
734 movb %dl,%bh
735 rorl $8,%ebx
736 movl 4(%ecx),%eax
737 movw %ax,%dx
738 andl $0xf0000,%eax
739 orl %eax,%ebx
740 movl 12(%esp),%ecx
741 movl %edx,(%ecx)
742 movl %ebx,4(%ecx)
743 popl %ebx
744 ret
745 END(ssdtosd)
746
747 /* void reset_dbregs() */
748 ENTRY(reset_dbregs)
749 movl $0,%eax
750 movl %eax,%dr7 /* disable all breakpoints first */
751 movl %eax,%dr0
752 movl %eax,%dr1
753 movl %eax,%dr2
754 movl %eax,%dr3
755 movl %eax,%dr6
756 ret
757 END(reset_dbregs)
758
759 /*****************************************************************************/
760 /* setjump, longjump */
761 /*****************************************************************************/
762
763 ENTRY(setjmp)
764 movl 4(%esp),%eax
765 movl %ebx,(%eax) /* save ebx */
766 movl %esp,4(%eax) /* save esp */
767 movl %ebp,8(%eax) /* save ebp */
768 movl %esi,12(%eax) /* save esi */
769 movl %edi,16(%eax) /* save edi */
770 movl (%esp),%edx /* get rta */
771 movl %edx,20(%eax) /* save eip */
772 xorl %eax,%eax /* return(0); */
773 ret
774 END(setjmp)
775
776 ENTRY(longjmp)
777 movl 4(%esp),%eax
778 movl (%eax),%ebx /* restore ebx */
779 movl 4(%eax),%esp /* restore esp */
780 movl 8(%eax),%ebp /* restore ebp */
781 movl 12(%eax),%esi /* restore esi */
782 movl 16(%eax),%edi /* restore edi */
783 movl 20(%eax),%edx /* get rta */
784 movl %edx,(%esp) /* put in return frame */
785 xorl %eax,%eax /* return(1); */
786 incl %eax
787 ret
788 END(longjmp)
789
790 /*
791 * Support for reading MSRs in the safe manner.
792 */
793 ENTRY(rdmsr_safe)
794 /* int rdmsr_safe(u_int msr, uint64_t *data) */
795 movl PCPU(CURPCB),%ecx
796 movl $msr_onfault,PCB_ONFAULT(%ecx)
797
798 movl 4(%esp),%ecx
799 rdmsr
800 movl 8(%esp),%ecx
801 movl %eax,(%ecx)
802 movl %edx,4(%ecx)
803 xorl %eax,%eax
804
805 movl PCPU(CURPCB),%ecx
806 movl %eax,PCB_ONFAULT(%ecx)
807
808 ret
809
810 /*
811 * Support for writing MSRs in the safe manner.
812 */
813 ENTRY(wrmsr_safe)
814 /* int wrmsr_safe(u_int msr, uint64_t data) */
815 movl PCPU(CURPCB),%ecx
816 movl $msr_onfault,PCB_ONFAULT(%ecx)
817
818 movl 4(%esp),%ecx
819 movl 8(%esp),%eax
820 movl 12(%esp),%edx
821 wrmsr
822 xorl %eax,%eax
823
824 movl PCPU(CURPCB),%ecx
825 movl %eax,PCB_ONFAULT(%ecx)
826
827 ret
828
829 /*
830 * MSR operations fault handler
831 */
832 ALIGN_TEXT
833 msr_onfault:
834 movl PCPU(CURPCB),%ecx
835 movl $0,PCB_ONFAULT(%ecx)
836 movl $EFAULT,%eax
837 ret
Cache object: 7e0b7607d6cdce07191764816d0e66d0
|