1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/11.0/sys/i386/i386/support.s 288917 2015-10-06 11:29:44Z royger $
30 */
31
32 #include "opt_npx.h"
33
34 #include <machine/asmacros.h>
35 #include <machine/cputypes.h>
36 #include <machine/pmap.h>
37 #include <machine/specialreg.h>
38
39 #include "assym.s"
40
41 #define IDXSHIFT 10
42
43 .text
44
45 /*
46 * bcopy family
47 * void bzero(void *buf, u_int len)
48 */
49 ENTRY(bzero)
50 pushl %edi
51 movl 8(%esp),%edi
52 movl 12(%esp),%ecx
53 xorl %eax,%eax
54 shrl $2,%ecx
55 cld
56 rep
57 stosl
58 movl 12(%esp),%ecx
59 andl $3,%ecx
60 rep
61 stosb
62 popl %edi
63 ret
64 END(bzero)
65
66 ENTRY(sse2_pagezero)
67 pushl %ebx
68 movl 8(%esp),%ecx
69 movl %ecx,%eax
70 addl $4096,%eax
71 xor %ebx,%ebx
72 1:
73 movnti %ebx,(%ecx)
74 addl $4,%ecx
75 cmpl %ecx,%eax
76 jne 1b
77 sfence
78 popl %ebx
79 ret
80 END(sse2_pagezero)
81
82 ENTRY(i686_pagezero)
83 pushl %edi
84 pushl %ebx
85
86 movl 12(%esp),%edi
87 movl $1024,%ecx
88 cld
89
90 ALIGN_TEXT
91 1:
92 xorl %eax,%eax
93 repe
94 scasl
95 jnz 2f
96
97 popl %ebx
98 popl %edi
99 ret
100
101 ALIGN_TEXT
102
103 2:
104 incl %ecx
105 subl $4,%edi
106
107 movl %ecx,%edx
108 cmpl $16,%ecx
109
110 jge 3f
111
112 movl %edi,%ebx
113 andl $0x3f,%ebx
114 shrl %ebx
115 shrl %ebx
116 movl $16,%ecx
117 subl %ebx,%ecx
118
119 3:
120 subl %ecx,%edx
121 rep
122 stosl
123
124 movl %edx,%ecx
125 testl %edx,%edx
126 jnz 1b
127
128 popl %ebx
129 popl %edi
130 ret
131 END(i686_pagezero)
132
133 /* fillw(pat, base, cnt) */
134 ENTRY(fillw)
135 pushl %edi
136 movl 8(%esp),%eax
137 movl 12(%esp),%edi
138 movl 16(%esp),%ecx
139 cld
140 rep
141 stosw
142 popl %edi
143 ret
144 END(fillw)
145
146 ENTRY(bcopyb)
147 pushl %esi
148 pushl %edi
149 movl 12(%esp),%esi
150 movl 16(%esp),%edi
151 movl 20(%esp),%ecx
152 movl %edi,%eax
153 subl %esi,%eax
154 cmpl %ecx,%eax /* overlapping && src < dst? */
155 jb 1f
156 cld /* nope, copy forwards */
157 rep
158 movsb
159 popl %edi
160 popl %esi
161 ret
162
163 ALIGN_TEXT
164 1:
165 addl %ecx,%edi /* copy backwards. */
166 addl %ecx,%esi
167 decl %edi
168 decl %esi
169 std
170 rep
171 movsb
172 popl %edi
173 popl %esi
174 cld
175 ret
176 END(bcopyb)
177
178 /*
179 * bcopy(src, dst, cnt)
180 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
181 */
182 ENTRY(bcopy)
183 pushl %ebp
184 movl %esp,%ebp
185 pushl %esi
186 pushl %edi
187 movl 8(%ebp),%esi
188 movl 12(%ebp),%edi
189 movl 16(%ebp),%ecx
190
191 movl %edi,%eax
192 subl %esi,%eax
193 cmpl %ecx,%eax /* overlapping && src < dst? */
194 jb 1f
195
196 shrl $2,%ecx /* copy by 32-bit words */
197 cld /* nope, copy forwards */
198 rep
199 movsl
200 movl 16(%ebp),%ecx
201 andl $3,%ecx /* any bytes left? */
202 rep
203 movsb
204 popl %edi
205 popl %esi
206 popl %ebp
207 ret
208
209 ALIGN_TEXT
210 1:
211 addl %ecx,%edi /* copy backwards */
212 addl %ecx,%esi
213 decl %edi
214 decl %esi
215 andl $3,%ecx /* any fractional bytes? */
216 std
217 rep
218 movsb
219 movl 16(%ebp),%ecx /* copy remainder by 32-bit words */
220 shrl $2,%ecx
221 subl $3,%esi
222 subl $3,%edi
223 rep
224 movsl
225 popl %edi
226 popl %esi
227 cld
228 popl %ebp
229 ret
230 END(bcopy)
231
232 /*
233 * Note: memcpy does not support overlapping copies
234 */
235 ENTRY(memcpy)
236 pushl %edi
237 pushl %esi
238 movl 12(%esp),%edi
239 movl 16(%esp),%esi
240 movl 20(%esp),%ecx
241 movl %edi,%eax
242 shrl $2,%ecx /* copy by 32-bit words */
243 cld /* nope, copy forwards */
244 rep
245 movsl
246 movl 20(%esp),%ecx
247 andl $3,%ecx /* any bytes left? */
248 rep
249 movsb
250 popl %esi
251 popl %edi
252 ret
253 END(memcpy)
254
255 /*****************************************************************************/
256 /* copyout and fubyte family */
257 /*****************************************************************************/
258 /*
259 * Access user memory from inside the kernel. These routines and possibly
260 * the math- and DOS emulators should be the only places that do this.
261 *
262 * We have to access the memory with user's permissions, so use a segment
263 * selector with RPL 3. For writes to user space we have to additionally
264 * check the PTE for write permission, because the 386 does not check
265 * write permissions when we are executing with EPL 0. The 486 does check
266 * this if the WP bit is set in CR0, so we can use a simpler version here.
267 *
268 * These routines set curpcb->pcb_onfault for the time they execute. When a
269 * protection violation occurs inside the functions, the trap handler
270 * returns to *curpcb->pcb_onfault instead of the function.
271 */
272
273 /*
274 * copyout(from_kernel, to_user, len) - MP SAFE
275 */
276 ENTRY(copyout)
277 movl PCPU(CURPCB),%eax
278 movl $copyout_fault,PCB_ONFAULT(%eax)
279 pushl %esi
280 pushl %edi
281 pushl %ebx
282 movl 16(%esp),%esi
283 movl 20(%esp),%edi
284 movl 24(%esp),%ebx
285 testl %ebx,%ebx /* anything to do? */
286 jz done_copyout
287
288 /*
289 * Check explicitly for non-user addresses. If 486 write protection
290 * is being used, this check is essential because we are in kernel
291 * mode so the h/w does not provide any protection against writing
292 * kernel addresses.
293 */
294
295 /*
296 * First, prevent address wrapping.
297 */
298 movl %edi,%eax
299 addl %ebx,%eax
300 jc copyout_fault
301 /*
302 * XXX STOP USING VM_MAXUSER_ADDRESS.
303 * It is an end address, not a max, so every time it is used correctly it
304 * looks like there is an off by one error, and of course it caused an off
305 * by one error in several places.
306 */
307 cmpl $VM_MAXUSER_ADDRESS,%eax
308 ja copyout_fault
309
310 /* bcopy(%esi, %edi, %ebx) */
311 movl %ebx,%ecx
312
313 shrl $2,%ecx
314 cld
315 rep
316 movsl
317 movb %bl,%cl
318 andb $3,%cl
319 rep
320 movsb
321
322 done_copyout:
323 popl %ebx
324 popl %edi
325 popl %esi
326 xorl %eax,%eax
327 movl PCPU(CURPCB),%edx
328 movl %eax,PCB_ONFAULT(%edx)
329 ret
330 END(copyout)
331
332 ALIGN_TEXT
333 copyout_fault:
334 popl %ebx
335 popl %edi
336 popl %esi
337 movl PCPU(CURPCB),%edx
338 movl $0,PCB_ONFAULT(%edx)
339 movl $EFAULT,%eax
340 ret
341
342 /*
343 * copyin(from_user, to_kernel, len) - MP SAFE
344 */
345 ENTRY(copyin)
346 movl PCPU(CURPCB),%eax
347 movl $copyin_fault,PCB_ONFAULT(%eax)
348 pushl %esi
349 pushl %edi
350 movl 12(%esp),%esi /* caddr_t from */
351 movl 16(%esp),%edi /* caddr_t to */
352 movl 20(%esp),%ecx /* size_t len */
353
354 /*
355 * make sure address is valid
356 */
357 movl %esi,%edx
358 addl %ecx,%edx
359 jc copyin_fault
360 cmpl $VM_MAXUSER_ADDRESS,%edx
361 ja copyin_fault
362
363 movb %cl,%al
364 shrl $2,%ecx /* copy longword-wise */
365 cld
366 rep
367 movsl
368 movb %al,%cl
369 andb $3,%cl /* copy remaining bytes */
370 rep
371 movsb
372
373 popl %edi
374 popl %esi
375 xorl %eax,%eax
376 movl PCPU(CURPCB),%edx
377 movl %eax,PCB_ONFAULT(%edx)
378 ret
379 END(copyin)
380
381 ALIGN_TEXT
382 copyin_fault:
383 popl %edi
384 popl %esi
385 movl PCPU(CURPCB),%edx
386 movl $0,PCB_ONFAULT(%edx)
387 movl $EFAULT,%eax
388 ret
389
390 /*
391 * casueword. Compare and set user word. Returns -1 on fault,
392 * 0 on non-faulting access. The current value is in *oldp.
393 */
394 ALTENTRY(casueword32)
395 ENTRY(casueword)
396 movl PCPU(CURPCB),%ecx
397 movl $fusufault,PCB_ONFAULT(%ecx)
398 movl 4(%esp),%edx /* dst */
399 movl 8(%esp),%eax /* old */
400 movl 16(%esp),%ecx /* new */
401
402 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
403 ja fusufault
404
405 #ifdef SMP
406 lock
407 #endif
408 cmpxchgl %ecx,(%edx) /* Compare and set. */
409
410 /*
411 * The old value is in %eax. If the store succeeded it will be the
412 * value we expected (old) from before the store, otherwise it will
413 * be the current value.
414 */
415
416 movl PCPU(CURPCB),%ecx
417 movl $0,PCB_ONFAULT(%ecx)
418 movl 12(%esp),%edx /* oldp */
419 movl %eax,(%edx)
420 xorl %eax,%eax
421 ret
422 END(casueword32)
423 END(casueword)
424
425 /*
426 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
427 * memory.
428 */
429
430 ALTENTRY(fueword32)
431 ENTRY(fueword)
432 movl PCPU(CURPCB),%ecx
433 movl $fusufault,PCB_ONFAULT(%ecx)
434 movl 4(%esp),%edx /* from */
435
436 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
437 ja fusufault
438
439 movl (%edx),%eax
440 movl $0,PCB_ONFAULT(%ecx)
441 movl 8(%esp),%edx
442 movl %eax,(%edx)
443 xorl %eax,%eax
444 ret
445 END(fueword32)
446 END(fueword)
447
448 /*
449 * fuswintr() and suswintr() are specialized variants of fuword16() and
450 * suword16(), respectively. They are called from the profiling code,
451 * potentially at interrupt time. If they fail, that's okay; good things
452 * will happen later. They always fail for now, until the trap code is
453 * able to deal with this.
454 */
455 ALTENTRY(suswintr)
456 ENTRY(fuswintr)
457 movl $-1,%eax
458 ret
459 END(suswintr)
460 END(fuswintr)
461
462 ENTRY(fuword16)
463 movl PCPU(CURPCB),%ecx
464 movl $fusufault,PCB_ONFAULT(%ecx)
465 movl 4(%esp),%edx
466
467 cmpl $VM_MAXUSER_ADDRESS-2,%edx
468 ja fusufault
469
470 movzwl (%edx),%eax
471 movl $0,PCB_ONFAULT(%ecx)
472 ret
473 END(fuword16)
474
475 ENTRY(fubyte)
476 movl PCPU(CURPCB),%ecx
477 movl $fusufault,PCB_ONFAULT(%ecx)
478 movl 4(%esp),%edx
479
480 cmpl $VM_MAXUSER_ADDRESS-1,%edx
481 ja fusufault
482
483 movzbl (%edx),%eax
484 movl $0,PCB_ONFAULT(%ecx)
485 ret
486 END(fubyte)
487
488 ALIGN_TEXT
489 fusufault:
490 movl PCPU(CURPCB),%ecx
491 xorl %eax,%eax
492 movl %eax,PCB_ONFAULT(%ecx)
493 decl %eax
494 ret
495
496 /*
497 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
498 * All these functions are MPSAFE.
499 */
500
501 ALTENTRY(suword32)
502 ENTRY(suword)
503 movl PCPU(CURPCB),%ecx
504 movl $fusufault,PCB_ONFAULT(%ecx)
505 movl 4(%esp),%edx
506
507 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
508 ja fusufault
509
510 movl 8(%esp),%eax
511 movl %eax,(%edx)
512 xorl %eax,%eax
513 movl PCPU(CURPCB),%ecx
514 movl %eax,PCB_ONFAULT(%ecx)
515 ret
516 END(suword32)
517 END(suword)
518
519 ENTRY(suword16)
520 movl PCPU(CURPCB),%ecx
521 movl $fusufault,PCB_ONFAULT(%ecx)
522 movl 4(%esp),%edx
523
524 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
525 ja fusufault
526
527 movw 8(%esp),%ax
528 movw %ax,(%edx)
529 xorl %eax,%eax
530 movl PCPU(CURPCB),%ecx /* restore trashed register */
531 movl %eax,PCB_ONFAULT(%ecx)
532 ret
533 END(suword16)
534
535 ENTRY(subyte)
536 movl PCPU(CURPCB),%ecx
537 movl $fusufault,PCB_ONFAULT(%ecx)
538 movl 4(%esp),%edx
539
540 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
541 ja fusufault
542
543 movb 8(%esp),%al
544 movb %al,(%edx)
545 xorl %eax,%eax
546 movl PCPU(CURPCB),%ecx /* restore trashed register */
547 movl %eax,PCB_ONFAULT(%ecx)
548 ret
549 END(subyte)
550
551 /*
552 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
553 *
554 * copy a string from from to to, stop when a 0 character is reached.
555 * return ENAMETOOLONG if string is longer than maxlen, and
556 * EFAULT on protection violations. If lencopied is non-zero,
557 * return the actual length in *lencopied.
558 */
559 ENTRY(copyinstr)
560 pushl %esi
561 pushl %edi
562 movl PCPU(CURPCB),%ecx
563 movl $cpystrflt,PCB_ONFAULT(%ecx)
564
565 movl 12(%esp),%esi /* %esi = from */
566 movl 16(%esp),%edi /* %edi = to */
567 movl 20(%esp),%edx /* %edx = maxlen */
568
569 movl $VM_MAXUSER_ADDRESS,%eax
570
571 /* make sure 'from' is within bounds */
572 subl %esi,%eax
573 jbe cpystrflt
574
575 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
576 cmpl %edx,%eax
577 jae 1f
578 movl %eax,%edx
579 movl %eax,20(%esp)
580 1:
581 incl %edx
582 cld
583
584 2:
585 decl %edx
586 jz 3f
587
588 lodsb
589 stosb
590 orb %al,%al
591 jnz 2b
592
593 /* Success -- 0 byte reached */
594 decl %edx
595 xorl %eax,%eax
596 jmp cpystrflt_x
597 3:
598 /* edx is zero - return ENAMETOOLONG or EFAULT */
599 cmpl $VM_MAXUSER_ADDRESS,%esi
600 jae cpystrflt
601 4:
602 movl $ENAMETOOLONG,%eax
603 jmp cpystrflt_x
604
605 cpystrflt:
606 movl $EFAULT,%eax
607
608 cpystrflt_x:
609 /* set *lencopied and return %eax */
610 movl PCPU(CURPCB),%ecx
611 movl $0,PCB_ONFAULT(%ecx)
612 movl 20(%esp),%ecx
613 subl %edx,%ecx
614 movl 24(%esp),%edx
615 testl %edx,%edx
616 jz 1f
617 movl %ecx,(%edx)
618 1:
619 popl %edi
620 popl %esi
621 ret
622 END(copyinstr)
623
624 /*
625 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
626 */
627 ENTRY(copystr)
628 pushl %esi
629 pushl %edi
630
631 movl 12(%esp),%esi /* %esi = from */
632 movl 16(%esp),%edi /* %edi = to */
633 movl 20(%esp),%edx /* %edx = maxlen */
634 incl %edx
635 cld
636 1:
637 decl %edx
638 jz 4f
639 lodsb
640 stosb
641 orb %al,%al
642 jnz 1b
643
644 /* Success -- 0 byte reached */
645 decl %edx
646 xorl %eax,%eax
647 jmp 6f
648 4:
649 /* edx is zero -- return ENAMETOOLONG */
650 movl $ENAMETOOLONG,%eax
651
652 6:
653 /* set *lencopied and return %eax */
654 movl 20(%esp),%ecx
655 subl %edx,%ecx
656 movl 24(%esp),%edx
657 testl %edx,%edx
658 jz 7f
659 movl %ecx,(%edx)
660 7:
661 popl %edi
662 popl %esi
663 ret
664 END(copystr)
665
666 ENTRY(bcmp)
667 pushl %edi
668 pushl %esi
669 movl 12(%esp),%edi
670 movl 16(%esp),%esi
671 movl 20(%esp),%edx
672
673 movl %edx,%ecx
674 shrl $2,%ecx
675 cld /* compare forwards */
676 repe
677 cmpsl
678 jne 1f
679
680 movl %edx,%ecx
681 andl $3,%ecx
682 repe
683 cmpsb
684 1:
685 setne %al
686 movsbl %al,%eax
687 popl %esi
688 popl %edi
689 ret
690 END(bcmp)
691
692 /*
693 * Handling of special 386 registers and descriptor tables etc
694 */
695 /* void lgdt(struct region_descriptor *rdp); */
696 ENTRY(lgdt)
697 /* reload the descriptor table */
698 movl 4(%esp),%eax
699 lgdt (%eax)
700
701 /* flush the prefetch q */
702 jmp 1f
703 nop
704 1:
705 /* reload "stale" selectors */
706 movl $KDSEL,%eax
707 movl %eax,%ds
708 movl %eax,%es
709 movl %eax,%gs
710 movl %eax,%ss
711 movl $KPSEL,%eax
712 movl %eax,%fs
713
714 /* reload code selector by turning return into intersegmental return */
715 movl (%esp),%eax
716 pushl %eax
717 movl $KCSEL,4(%esp)
718 MEXITCOUNT
719 lret
720 END(lgdt)
721
722 /* ssdtosd(*ssdp,*sdp) */
723 ENTRY(ssdtosd)
724 pushl %ebx
725 movl 8(%esp),%ecx
726 movl 8(%ecx),%ebx
727 shll $16,%ebx
728 movl (%ecx),%edx
729 roll $16,%edx
730 movb %dh,%bl
731 movb %dl,%bh
732 rorl $8,%ebx
733 movl 4(%ecx),%eax
734 movw %ax,%dx
735 andl $0xf0000,%eax
736 orl %eax,%ebx
737 movl 12(%esp),%ecx
738 movl %edx,(%ecx)
739 movl %ebx,4(%ecx)
740 popl %ebx
741 ret
742 END(ssdtosd)
743
744 /* void reset_dbregs() */
745 ENTRY(reset_dbregs)
746 movl $0,%eax
747 movl %eax,%dr7 /* disable all breakpoints first */
748 movl %eax,%dr0
749 movl %eax,%dr1
750 movl %eax,%dr2
751 movl %eax,%dr3
752 movl %eax,%dr6
753 ret
754 END(reset_dbregs)
755
756 /*****************************************************************************/
757 /* setjump, longjump */
758 /*****************************************************************************/
759
760 ENTRY(setjmp)
761 movl 4(%esp),%eax
762 movl %ebx,(%eax) /* save ebx */
763 movl %esp,4(%eax) /* save esp */
764 movl %ebp,8(%eax) /* save ebp */
765 movl %esi,12(%eax) /* save esi */
766 movl %edi,16(%eax) /* save edi */
767 movl (%esp),%edx /* get rta */
768 movl %edx,20(%eax) /* save eip */
769 xorl %eax,%eax /* return(0); */
770 ret
771 END(setjmp)
772
773 ENTRY(longjmp)
774 movl 4(%esp),%eax
775 movl (%eax),%ebx /* restore ebx */
776 movl 4(%eax),%esp /* restore esp */
777 movl 8(%eax),%ebp /* restore ebp */
778 movl 12(%eax),%esi /* restore esi */
779 movl 16(%eax),%edi /* restore edi */
780 movl 20(%eax),%edx /* get rta */
781 movl %edx,(%esp) /* put in return frame */
782 xorl %eax,%eax /* return(1); */
783 incl %eax
784 ret
785 END(longjmp)
786
787 /*
788 * Support for reading MSRs in the safe manner.
789 */
790 ENTRY(rdmsr_safe)
791 /* int rdmsr_safe(u_int msr, uint64_t *data) */
792 movl PCPU(CURPCB),%ecx
793 movl $msr_onfault,PCB_ONFAULT(%ecx)
794
795 movl 4(%esp),%ecx
796 rdmsr
797 movl 8(%esp),%ecx
798 movl %eax,(%ecx)
799 movl %edx,4(%ecx)
800 xorl %eax,%eax
801
802 movl PCPU(CURPCB),%ecx
803 movl %eax,PCB_ONFAULT(%ecx)
804
805 ret
806
807 /*
808 * Support for writing MSRs in the safe manner.
809 */
810 ENTRY(wrmsr_safe)
811 /* int wrmsr_safe(u_int msr, uint64_t data) */
812 movl PCPU(CURPCB),%ecx
813 movl $msr_onfault,PCB_ONFAULT(%ecx)
814
815 movl 4(%esp),%ecx
816 movl 8(%esp),%eax
817 movl 12(%esp),%edx
818 wrmsr
819 xorl %eax,%eax
820
821 movl PCPU(CURPCB),%ecx
822 movl %eax,PCB_ONFAULT(%ecx)
823
824 ret
825
826 /*
827 * MSR operations fault handler
828 */
829 ALIGN_TEXT
830 msr_onfault:
831 movl PCPU(CURPCB),%ecx
832 movl $0,PCB_ONFAULT(%ecx)
833 movl $EFAULT,%eax
834 ret
Cache object: 5749bd58e7c0de5a4598aa4b23be23c7
|