1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/9.2/sys/i386/i386/support.s 249532 2013-04-16 06:20:35Z kib $
30 */
31
32 #include "opt_npx.h"
33
34 #include <machine/asmacros.h>
35 #include <machine/cputypes.h>
36 #include <machine/intr_machdep.h>
37 #include <machine/pmap.h>
38 #include <machine/specialreg.h>
39
40 #include "assym.s"
41
42 #define IDXSHIFT 10
43
44 .text
45
46 /*
47 * bcopy family
48 * void bzero(void *buf, u_int len)
49 */
50 ENTRY(bzero)
51 pushl %edi
52 movl 8(%esp),%edi
53 movl 12(%esp),%ecx
54 xorl %eax,%eax
55 shrl $2,%ecx
56 cld
57 rep
58 stosl
59 movl 12(%esp),%ecx
60 andl $3,%ecx
61 rep
62 stosb
63 popl %edi
64 ret
65 END(bzero)
66
67 ENTRY(sse2_pagezero)
68 pushl %ebx
69 movl 8(%esp),%ecx
70 movl %ecx,%eax
71 addl $4096,%eax
72 xor %ebx,%ebx
73 1:
74 movnti %ebx,(%ecx)
75 addl $4,%ecx
76 cmpl %ecx,%eax
77 jne 1b
78 sfence
79 popl %ebx
80 ret
81 END(sse2_pagezero)
82
83 ENTRY(i686_pagezero)
84 pushl %edi
85 pushl %ebx
86
87 movl 12(%esp),%edi
88 movl $1024,%ecx
89 cld
90
91 ALIGN_TEXT
92 1:
93 xorl %eax,%eax
94 repe
95 scasl
96 jnz 2f
97
98 popl %ebx
99 popl %edi
100 ret
101
102 ALIGN_TEXT
103
104 2:
105 incl %ecx
106 subl $4,%edi
107
108 movl %ecx,%edx
109 cmpl $16,%ecx
110
111 jge 3f
112
113 movl %edi,%ebx
114 andl $0x3f,%ebx
115 shrl %ebx
116 shrl %ebx
117 movl $16,%ecx
118 subl %ebx,%ecx
119
120 3:
121 subl %ecx,%edx
122 rep
123 stosl
124
125 movl %edx,%ecx
126 testl %edx,%edx
127 jnz 1b
128
129 popl %ebx
130 popl %edi
131 ret
132 END(i686_pagezero)
133
134 /* fillw(pat, base, cnt) */
135 ENTRY(fillw)
136 pushl %edi
137 movl 8(%esp),%eax
138 movl 12(%esp),%edi
139 movl 16(%esp),%ecx
140 cld
141 rep
142 stosw
143 popl %edi
144 ret
145 END(fillw)
146
147 ENTRY(bcopyb)
148 pushl %esi
149 pushl %edi
150 movl 12(%esp),%esi
151 movl 16(%esp),%edi
152 movl 20(%esp),%ecx
153 movl %edi,%eax
154 subl %esi,%eax
155 cmpl %ecx,%eax /* overlapping && src < dst? */
156 jb 1f
157 cld /* nope, copy forwards */
158 rep
159 movsb
160 popl %edi
161 popl %esi
162 ret
163
164 ALIGN_TEXT
165 1:
166 addl %ecx,%edi /* copy backwards. */
167 addl %ecx,%esi
168 decl %edi
169 decl %esi
170 std
171 rep
172 movsb
173 popl %edi
174 popl %esi
175 cld
176 ret
177 END(bcopyb)
178
179 /*
180 * bcopy(src, dst, cnt)
181 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
182 */
183 ENTRY(bcopy)
184 pushl %esi
185 pushl %edi
186 movl 12(%esp),%esi
187 movl 16(%esp),%edi
188 movl 20(%esp),%ecx
189
190 movl %edi,%eax
191 subl %esi,%eax
192 cmpl %ecx,%eax /* overlapping && src < dst? */
193 jb 1f
194
195 shrl $2,%ecx /* copy by 32-bit words */
196 cld /* nope, copy forwards */
197 rep
198 movsl
199 movl 20(%esp),%ecx
200 andl $3,%ecx /* any bytes left? */
201 rep
202 movsb
203 popl %edi
204 popl %esi
205 ret
206
207 ALIGN_TEXT
208 1:
209 addl %ecx,%edi /* copy backwards */
210 addl %ecx,%esi
211 decl %edi
212 decl %esi
213 andl $3,%ecx /* any fractional bytes? */
214 std
215 rep
216 movsb
217 movl 20(%esp),%ecx /* copy remainder by 32-bit words */
218 shrl $2,%ecx
219 subl $3,%esi
220 subl $3,%edi
221 rep
222 movsl
223 popl %edi
224 popl %esi
225 cld
226 ret
227 END(bcopy)
228
229 /*
230 * Note: memcpy does not support overlapping copies
231 */
232 ENTRY(memcpy)
233 pushl %edi
234 pushl %esi
235 movl 12(%esp),%edi
236 movl 16(%esp),%esi
237 movl 20(%esp),%ecx
238 movl %edi,%eax
239 shrl $2,%ecx /* copy by 32-bit words */
240 cld /* nope, copy forwards */
241 rep
242 movsl
243 movl 20(%esp),%ecx
244 andl $3,%ecx /* any bytes left? */
245 rep
246 movsb
247 popl %esi
248 popl %edi
249 ret
250 END(memcpy)
251
252 /*****************************************************************************/
253 /* copyout and fubyte family */
254 /*****************************************************************************/
255 /*
256 * Access user memory from inside the kernel. These routines and possibly
257 * the math- and DOS emulators should be the only places that do this.
258 *
259 * We have to access the memory with user's permissions, so use a segment
260 * selector with RPL 3. For writes to user space we have to additionally
261 * check the PTE for write permission, because the 386 does not check
262 * write permissions when we are executing with EPL 0. The 486 does check
263 * this if the WP bit is set in CR0, so we can use a simpler version here.
264 *
265 * These routines set curpcb->pcb_onfault for the time they execute. When a
266 * protection violation occurs inside the functions, the trap handler
267 * returns to *curpcb->pcb_onfault instead of the function.
268 */
269
270 /*
271 * copyout(from_kernel, to_user, len) - MP SAFE
272 */
273 ENTRY(copyout)
274 movl PCPU(CURPCB),%eax
275 movl $copyout_fault,PCB_ONFAULT(%eax)
276 pushl %esi
277 pushl %edi
278 pushl %ebx
279 movl 16(%esp),%esi
280 movl 20(%esp),%edi
281 movl 24(%esp),%ebx
282 testl %ebx,%ebx /* anything to do? */
283 jz done_copyout
284
285 /*
286 * Check explicitly for non-user addresses. If 486 write protection
287 * is being used, this check is essential because we are in kernel
288 * mode so the h/w does not provide any protection against writing
289 * kernel addresses.
290 */
291
292 /*
293 * First, prevent address wrapping.
294 */
295 movl %edi,%eax
296 addl %ebx,%eax
297 jc copyout_fault
298 /*
299 * XXX STOP USING VM_MAXUSER_ADDRESS.
300 * It is an end address, not a max, so every time it is used correctly it
301 * looks like there is an off by one error, and of course it caused an off
302 * by one error in several places.
303 */
304 cmpl $VM_MAXUSER_ADDRESS,%eax
305 ja copyout_fault
306
307 /* bcopy(%esi, %edi, %ebx) */
308 movl %ebx,%ecx
309
310 shrl $2,%ecx
311 cld
312 rep
313 movsl
314 movb %bl,%cl
315 andb $3,%cl
316 rep
317 movsb
318
319 done_copyout:
320 popl %ebx
321 popl %edi
322 popl %esi
323 xorl %eax,%eax
324 movl PCPU(CURPCB),%edx
325 movl %eax,PCB_ONFAULT(%edx)
326 ret
327 END(copyout)
328
329 ALIGN_TEXT
330 copyout_fault:
331 popl %ebx
332 popl %edi
333 popl %esi
334 movl PCPU(CURPCB),%edx
335 movl $0,PCB_ONFAULT(%edx)
336 movl $EFAULT,%eax
337 ret
338
339 /*
340 * copyin(from_user, to_kernel, len) - MP SAFE
341 */
342 ENTRY(copyin)
343 movl PCPU(CURPCB),%eax
344 movl $copyin_fault,PCB_ONFAULT(%eax)
345 pushl %esi
346 pushl %edi
347 movl 12(%esp),%esi /* caddr_t from */
348 movl 16(%esp),%edi /* caddr_t to */
349 movl 20(%esp),%ecx /* size_t len */
350
351 /*
352 * make sure address is valid
353 */
354 movl %esi,%edx
355 addl %ecx,%edx
356 jc copyin_fault
357 cmpl $VM_MAXUSER_ADDRESS,%edx
358 ja copyin_fault
359
360 movb %cl,%al
361 shrl $2,%ecx /* copy longword-wise */
362 cld
363 rep
364 movsl
365 movb %al,%cl
366 andb $3,%cl /* copy remaining bytes */
367 rep
368 movsb
369
370 popl %edi
371 popl %esi
372 xorl %eax,%eax
373 movl PCPU(CURPCB),%edx
374 movl %eax,PCB_ONFAULT(%edx)
375 ret
376 END(copyin)
377
378 ALIGN_TEXT
379 copyin_fault:
380 popl %edi
381 popl %esi
382 movl PCPU(CURPCB),%edx
383 movl $0,PCB_ONFAULT(%edx)
384 movl $EFAULT,%eax
385 ret
386
387 /*
388 * casuword. Compare and set user word. Returns -1 or the current value.
389 */
390
391 ALTENTRY(casuword32)
392 ENTRY(casuword)
393 movl PCPU(CURPCB),%ecx
394 movl $fusufault,PCB_ONFAULT(%ecx)
395 movl 4(%esp),%edx /* dst */
396 movl 8(%esp),%eax /* old */
397 movl 12(%esp),%ecx /* new */
398
399 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
400 ja fusufault
401
402 #ifdef SMP
403 lock
404 #endif
405 cmpxchgl %ecx,(%edx) /* Compare and set. */
406
407 /*
408 * The old value is in %eax. If the store succeeded it will be the
409 * value we expected (old) from before the store, otherwise it will
410 * be the current value.
411 */
412
413 movl PCPU(CURPCB),%ecx
414 movl $0,PCB_ONFAULT(%ecx)
415 ret
416 END(casuword32)
417 END(casuword)
418
419 /*
420 * Fetch (load) a 32-bit word, a 16-bit word, or an 8-bit byte from user
421 * memory. All these functions are MPSAFE.
422 */
423
424 ALTENTRY(fuword32)
425 ENTRY(fuword)
426 movl PCPU(CURPCB),%ecx
427 movl $fusufault,PCB_ONFAULT(%ecx)
428 movl 4(%esp),%edx /* from */
429
430 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
431 ja fusufault
432
433 movl (%edx),%eax
434 movl $0,PCB_ONFAULT(%ecx)
435 ret
436 END(fuword32)
437 END(fuword)
438
439 /*
440 * fuswintr() and suswintr() are specialized variants of fuword16() and
441 * suword16(), respectively. They are called from the profiling code,
442 * potentially at interrupt time. If they fail, that's okay; good things
443 * will happen later. They always fail for now, until the trap code is
444 * able to deal with this.
445 */
446 ALTENTRY(suswintr)
447 ENTRY(fuswintr)
448 movl $-1,%eax
449 ret
450 END(suswintr)
451 END(fuswintr)
452
453 ENTRY(fuword16)
454 movl PCPU(CURPCB),%ecx
455 movl $fusufault,PCB_ONFAULT(%ecx)
456 movl 4(%esp),%edx
457
458 cmpl $VM_MAXUSER_ADDRESS-2,%edx
459 ja fusufault
460
461 movzwl (%edx),%eax
462 movl $0,PCB_ONFAULT(%ecx)
463 ret
464 END(fuword16)
465
466 ENTRY(fubyte)
467 movl PCPU(CURPCB),%ecx
468 movl $fusufault,PCB_ONFAULT(%ecx)
469 movl 4(%esp),%edx
470
471 cmpl $VM_MAXUSER_ADDRESS-1,%edx
472 ja fusufault
473
474 movzbl (%edx),%eax
475 movl $0,PCB_ONFAULT(%ecx)
476 ret
477 END(fubyte)
478
479 ALIGN_TEXT
480 fusufault:
481 movl PCPU(CURPCB),%ecx
482 xorl %eax,%eax
483 movl %eax,PCB_ONFAULT(%ecx)
484 decl %eax
485 ret
486
487 /*
488 * Store a 32-bit word, a 16-bit word, or an 8-bit byte to user memory.
489 * All these functions are MPSAFE.
490 */
491
492 ALTENTRY(suword32)
493 ENTRY(suword)
494 movl PCPU(CURPCB),%ecx
495 movl $fusufault,PCB_ONFAULT(%ecx)
496 movl 4(%esp),%edx
497
498 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
499 ja fusufault
500
501 movl 8(%esp),%eax
502 movl %eax,(%edx)
503 xorl %eax,%eax
504 movl PCPU(CURPCB),%ecx
505 movl %eax,PCB_ONFAULT(%ecx)
506 ret
507 END(suword32)
508 END(suword)
509
510 ENTRY(suword16)
511 movl PCPU(CURPCB),%ecx
512 movl $fusufault,PCB_ONFAULT(%ecx)
513 movl 4(%esp),%edx
514
515 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
516 ja fusufault
517
518 movw 8(%esp),%ax
519 movw %ax,(%edx)
520 xorl %eax,%eax
521 movl PCPU(CURPCB),%ecx /* restore trashed register */
522 movl %eax,PCB_ONFAULT(%ecx)
523 ret
524 END(suword16)
525
526 ENTRY(subyte)
527 movl PCPU(CURPCB),%ecx
528 movl $fusufault,PCB_ONFAULT(%ecx)
529 movl 4(%esp),%edx
530
531 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
532 ja fusufault
533
534 movb 8(%esp),%al
535 movb %al,(%edx)
536 xorl %eax,%eax
537 movl PCPU(CURPCB),%ecx /* restore trashed register */
538 movl %eax,PCB_ONFAULT(%ecx)
539 ret
540 END(subyte)
541
542 /*
543 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
544 *
545 * copy a string from from to to, stop when a 0 character is reached.
546 * return ENAMETOOLONG if string is longer than maxlen, and
547 * EFAULT on protection violations. If lencopied is non-zero,
548 * return the actual length in *lencopied.
549 */
550 ENTRY(copyinstr)
551 pushl %esi
552 pushl %edi
553 movl PCPU(CURPCB),%ecx
554 movl $cpystrflt,PCB_ONFAULT(%ecx)
555
556 movl 12(%esp),%esi /* %esi = from */
557 movl 16(%esp),%edi /* %edi = to */
558 movl 20(%esp),%edx /* %edx = maxlen */
559
560 movl $VM_MAXUSER_ADDRESS,%eax
561
562 /* make sure 'from' is within bounds */
563 subl %esi,%eax
564 jbe cpystrflt
565
566 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
567 cmpl %edx,%eax
568 jae 1f
569 movl %eax,%edx
570 movl %eax,20(%esp)
571 1:
572 incl %edx
573 cld
574
575 2:
576 decl %edx
577 jz 3f
578
579 lodsb
580 stosb
581 orb %al,%al
582 jnz 2b
583
584 /* Success -- 0 byte reached */
585 decl %edx
586 xorl %eax,%eax
587 jmp cpystrflt_x
588 3:
589 /* edx is zero - return ENAMETOOLONG or EFAULT */
590 cmpl $VM_MAXUSER_ADDRESS,%esi
591 jae cpystrflt
592 4:
593 movl $ENAMETOOLONG,%eax
594 jmp cpystrflt_x
595
596 cpystrflt:
597 movl $EFAULT,%eax
598
599 cpystrflt_x:
600 /* set *lencopied and return %eax */
601 movl PCPU(CURPCB),%ecx
602 movl $0,PCB_ONFAULT(%ecx)
603 movl 20(%esp),%ecx
604 subl %edx,%ecx
605 movl 24(%esp),%edx
606 testl %edx,%edx
607 jz 1f
608 movl %ecx,(%edx)
609 1:
610 popl %edi
611 popl %esi
612 ret
613 END(copyinstr)
614
615 /*
616 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
617 */
618 ENTRY(copystr)
619 pushl %esi
620 pushl %edi
621
622 movl 12(%esp),%esi /* %esi = from */
623 movl 16(%esp),%edi /* %edi = to */
624 movl 20(%esp),%edx /* %edx = maxlen */
625 incl %edx
626 cld
627 1:
628 decl %edx
629 jz 4f
630 lodsb
631 stosb
632 orb %al,%al
633 jnz 1b
634
635 /* Success -- 0 byte reached */
636 decl %edx
637 xorl %eax,%eax
638 jmp 6f
639 4:
640 /* edx is zero -- return ENAMETOOLONG */
641 movl $ENAMETOOLONG,%eax
642
643 6:
644 /* set *lencopied and return %eax */
645 movl 20(%esp),%ecx
646 subl %edx,%ecx
647 movl 24(%esp),%edx
648 testl %edx,%edx
649 jz 7f
650 movl %ecx,(%edx)
651 7:
652 popl %edi
653 popl %esi
654 ret
655 END(copystr)
656
657 ENTRY(bcmp)
658 pushl %edi
659 pushl %esi
660 movl 12(%esp),%edi
661 movl 16(%esp),%esi
662 movl 20(%esp),%edx
663
664 movl %edx,%ecx
665 shrl $2,%ecx
666 cld /* compare forwards */
667 repe
668 cmpsl
669 jne 1f
670
671 movl %edx,%ecx
672 andl $3,%ecx
673 repe
674 cmpsb
675 1:
676 setne %al
677 movsbl %al,%eax
678 popl %esi
679 popl %edi
680 ret
681 END(bcmp)
682
683 /*
684 * Handling of special 386 registers and descriptor tables etc
685 */
686 /* void lgdt(struct region_descriptor *rdp); */
687 ENTRY(lgdt)
688 #ifndef XEN
689 /* reload the descriptor table */
690 movl 4(%esp),%eax
691 lgdt (%eax)
692 #endif
693
694 /* flush the prefetch q */
695 jmp 1f
696 nop
697 1:
698 /* reload "stale" selectors */
699 movl $KDSEL,%eax
700 movl %eax,%ds
701 movl %eax,%es
702 movl %eax,%gs
703 movl %eax,%ss
704 movl $KPSEL,%eax
705 movl %eax,%fs
706
707 /* reload code selector by turning return into intersegmental return */
708 movl (%esp),%eax
709 pushl %eax
710 movl $KCSEL,4(%esp)
711 MEXITCOUNT
712 lret
713 END(lgdt)
714
715 /* ssdtosd(*ssdp,*sdp) */
716 ENTRY(ssdtosd)
717 pushl %ebx
718 movl 8(%esp),%ecx
719 movl 8(%ecx),%ebx
720 shll $16,%ebx
721 movl (%ecx),%edx
722 roll $16,%edx
723 movb %dh,%bl
724 movb %dl,%bh
725 rorl $8,%ebx
726 movl 4(%ecx),%eax
727 movw %ax,%dx
728 andl $0xf0000,%eax
729 orl %eax,%ebx
730 movl 12(%esp),%ecx
731 movl %edx,(%ecx)
732 movl %ebx,4(%ecx)
733 popl %ebx
734 ret
735 END(ssdtosd)
736
737 /* void reset_dbregs() */
738 ENTRY(reset_dbregs)
739 movl $0,%eax
740 movl %eax,%dr7 /* disable all breapoints first */
741 movl %eax,%dr0
742 movl %eax,%dr1
743 movl %eax,%dr2
744 movl %eax,%dr3
745 movl %eax,%dr6
746 ret
747 END(reset_dbregs)
748
749 /*****************************************************************************/
750 /* setjump, longjump */
751 /*****************************************************************************/
752
753 ENTRY(setjmp)
754 movl 4(%esp),%eax
755 movl %ebx,(%eax) /* save ebx */
756 movl %esp,4(%eax) /* save esp */
757 movl %ebp,8(%eax) /* save ebp */
758 movl %esi,12(%eax) /* save esi */
759 movl %edi,16(%eax) /* save edi */
760 movl (%esp),%edx /* get rta */
761 movl %edx,20(%eax) /* save eip */
762 xorl %eax,%eax /* return(0); */
763 ret
764 END(setjmp)
765
766 ENTRY(longjmp)
767 movl 4(%esp),%eax
768 movl (%eax),%ebx /* restore ebx */
769 movl 4(%eax),%esp /* restore esp */
770 movl 8(%eax),%ebp /* restore ebp */
771 movl 12(%eax),%esi /* restore esi */
772 movl 16(%eax),%edi /* restore edi */
773 movl 20(%eax),%edx /* get rta */
774 movl %edx,(%esp) /* put in return frame */
775 xorl %eax,%eax /* return(1); */
776 incl %eax
777 ret
778 END(longjmp)
779
780 /*
781 * Support for reading MSRs in the safe manner.
782 */
783 ENTRY(rdmsr_safe)
784 /* int rdmsr_safe(u_int msr, uint64_t *data) */
785 movl PCPU(CURPCB),%ecx
786 movl $msr_onfault,PCB_ONFAULT(%ecx)
787
788 movl 4(%esp),%ecx
789 rdmsr
790 movl 8(%esp),%ecx
791 movl %eax,(%ecx)
792 movl %edx,4(%ecx)
793 xorl %eax,%eax
794
795 movl PCPU(CURPCB),%ecx
796 movl %eax,PCB_ONFAULT(%ecx)
797
798 ret
799
800 /*
801 * Support for writing MSRs in the safe manner.
802 */
803 ENTRY(wrmsr_safe)
804 /* int wrmsr_safe(u_int msr, uint64_t data) */
805 movl PCPU(CURPCB),%ecx
806 movl $msr_onfault,PCB_ONFAULT(%ecx)
807
808 movl 4(%esp),%ecx
809 movl 8(%esp),%eax
810 movl 12(%esp),%edx
811 wrmsr
812 xorl %eax,%eax
813
814 movl PCPU(CURPCB),%ecx
815 movl %eax,PCB_ONFAULT(%ecx)
816
817 ret
818
819 /*
820 * MSR operations fault handler
821 */
822 ALIGN_TEXT
823 msr_onfault:
824 movl PCPU(CURPCB),%ecx
825 movl $0,PCB_ONFAULT(%ecx)
826 movl $EFAULT,%eax
827 ret
Cache object: b884b8ad9c832a9058d6b78650467009
|