1 /*-
2 * Copyright (c) 1993 The Regents of the University of California.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: releng/10.3/sys/i386/include/cpufunc.h 290189 2015-10-30 10:02:57Z kib $
30 */
31
32 /*
33 * Functions to provide access to special i386 instructions.
34 * This in included in sys/systm.h, and that file should be
35 * used in preference to this.
36 */
37
38 #ifndef _MACHINE_CPUFUNC_H_
39 #define _MACHINE_CPUFUNC_H_
40
41 #ifndef _SYS_CDEFS_H_
42 #error this file needs sys/cdefs.h as a prerequisite
43 #endif
44
45 #ifdef XEN
46 extern void xen_cli(void);
47 extern void xen_sti(void);
48 extern u_int xen_rcr2(void);
49 extern void xen_load_cr3(u_int data);
50 extern void xen_tlb_flush(void);
51 extern void xen_invlpg(u_int addr);
52 extern void write_eflags(u_int eflags);
53 extern u_int read_eflags(void);
54 #endif
55
56 struct region_descriptor;
57
58 #define readb(va) (*(volatile uint8_t *) (va))
59 #define readw(va) (*(volatile uint16_t *) (va))
60 #define readl(va) (*(volatile uint32_t *) (va))
61
62 #define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
63 #define writew(va, d) (*(volatile uint16_t *) (va) = (d))
64 #define writel(va, d) (*(volatile uint32_t *) (va) = (d))
65
66 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
67
68 static __inline void
69 breakpoint(void)
70 {
71 __asm __volatile("int $3");
72 }
73
74 static __inline u_int
75 bsfl(u_int mask)
76 {
77 u_int result;
78
79 __asm("bsfl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
80 return (result);
81 }
82
83 static __inline u_int
84 bsrl(u_int mask)
85 {
86 u_int result;
87
88 __asm("bsrl %1,%0" : "=r" (result) : "rm" (mask) : "cc");
89 return (result);
90 }
91
92 static __inline void
93 clflush(u_long addr)
94 {
95
96 __asm __volatile("clflush %0" : : "m" (*(char *)addr));
97 }
98
99 static __inline void
100 clflushopt(u_long addr)
101 {
102
103 __asm __volatile(".byte 0x66;clflush %0" : : "m" (*(char *)addr));
104 }
105
106 static __inline void
107 clts(void)
108 {
109
110 __asm __volatile("clts");
111 }
112
113 static __inline void
114 disable_intr(void)
115 {
116 #ifdef XEN
117 xen_cli();
118 #else
119 __asm __volatile("cli" : : : "memory");
120 #endif
121 }
122
123 static __inline void
124 do_cpuid(u_int ax, u_int *p)
125 {
126 __asm __volatile("cpuid"
127 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
128 : "" (ax));
129 }
130
131 static __inline void
132 cpuid_count(u_int ax, u_int cx, u_int *p)
133 {
134 __asm __volatile("cpuid"
135 : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
136 : "" (ax), "c" (cx));
137 }
138
139 static __inline void
140 enable_intr(void)
141 {
142 #ifdef XEN
143 xen_sti();
144 #else
145 __asm __volatile("sti");
146 #endif
147 }
148
149 static __inline void
150 cpu_monitor(const void *addr, u_long extensions, u_int hints)
151 {
152
153 __asm __volatile("monitor"
154 : : "a" (addr), "c" (extensions), "d" (hints));
155 }
156
157 static __inline void
158 cpu_mwait(u_long extensions, u_int hints)
159 {
160
161 __asm __volatile("mwait" : : "a" (hints), "c" (extensions));
162 }
163
164 static __inline void
165 lfence(void)
166 {
167
168 __asm __volatile("lfence" : : : "memory");
169 }
170
171 static __inline void
172 mfence(void)
173 {
174
175 __asm __volatile("mfence" : : : "memory");
176 }
177
178 #ifdef _KERNEL
179
180 #define HAVE_INLINE_FFS
181
182 static __inline int
183 ffs(int mask)
184 {
185 /*
186 * Note that gcc-2's builtin ffs would be used if we didn't declare
187 * this inline or turn off the builtin. The builtin is faster but
188 * broken in gcc-2.4.5 and slower but working in gcc-2.5 and later
189 * versions.
190 */
191 return (mask == 0 ? mask : (int)bsfl((u_int)mask) + 1);
192 }
193
194 #define HAVE_INLINE_FFSL
195
196 static __inline int
197 ffsl(long mask)
198 {
199 return (ffs((int)mask));
200 }
201
202 #define HAVE_INLINE_FLS
203
204 static __inline int
205 fls(int mask)
206 {
207 return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
208 }
209
210 #define HAVE_INLINE_FLSL
211
212 static __inline int
213 flsl(long mask)
214 {
215 return (fls((int)mask));
216 }
217
218 #endif /* _KERNEL */
219
220 static __inline void
221 halt(void)
222 {
223 __asm __volatile("hlt");
224 }
225
226 static __inline u_char
227 inb(u_int port)
228 {
229 u_char data;
230
231 __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
232 return (data);
233 }
234
235 static __inline u_int
236 inl(u_int port)
237 {
238 u_int data;
239
240 __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
241 return (data);
242 }
243
244 static __inline void
245 insb(u_int port, void *addr, size_t count)
246 {
247 __asm __volatile("cld; rep; insb"
248 : "+D" (addr), "+c" (count)
249 : "d" (port)
250 : "memory");
251 }
252
253 static __inline void
254 insw(u_int port, void *addr, size_t count)
255 {
256 __asm __volatile("cld; rep; insw"
257 : "+D" (addr), "+c" (count)
258 : "d" (port)
259 : "memory");
260 }
261
262 static __inline void
263 insl(u_int port, void *addr, size_t count)
264 {
265 __asm __volatile("cld; rep; insl"
266 : "+D" (addr), "+c" (count)
267 : "d" (port)
268 : "memory");
269 }
270
271 static __inline void
272 invd(void)
273 {
274 __asm __volatile("invd");
275 }
276
277 static __inline u_short
278 inw(u_int port)
279 {
280 u_short data;
281
282 __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
283 return (data);
284 }
285
286 static __inline void
287 outb(u_int port, u_char data)
288 {
289 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
290 }
291
292 static __inline void
293 outl(u_int port, u_int data)
294 {
295 __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
296 }
297
298 static __inline void
299 outsb(u_int port, const void *addr, size_t count)
300 {
301 __asm __volatile("cld; rep; outsb"
302 : "+S" (addr), "+c" (count)
303 : "d" (port));
304 }
305
306 static __inline void
307 outsw(u_int port, const void *addr, size_t count)
308 {
309 __asm __volatile("cld; rep; outsw"
310 : "+S" (addr), "+c" (count)
311 : "d" (port));
312 }
313
314 static __inline void
315 outsl(u_int port, const void *addr, size_t count)
316 {
317 __asm __volatile("cld; rep; outsl"
318 : "+S" (addr), "+c" (count)
319 : "d" (port));
320 }
321
322 static __inline void
323 outw(u_int port, u_short data)
324 {
325 __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
326 }
327
328 static __inline void
329 ia32_pause(void)
330 {
331 __asm __volatile("pause");
332 }
333
334 static __inline u_int
335 #ifdef XEN
336 _read_eflags(void)
337 #else
338 read_eflags(void)
339 #endif
340 {
341 u_int ef;
342
343 __asm __volatile("pushfl; popl %0" : "=r" (ef));
344 return (ef);
345 }
346
347 static __inline uint64_t
348 rdmsr(u_int msr)
349 {
350 uint64_t rv;
351
352 __asm __volatile("rdmsr" : "=A" (rv) : "c" (msr));
353 return (rv);
354 }
355
356 static __inline uint64_t
357 rdpmc(u_int pmc)
358 {
359 uint64_t rv;
360
361 __asm __volatile("rdpmc" : "=A" (rv) : "c" (pmc));
362 return (rv);
363 }
364
365 static __inline uint64_t
366 rdtsc(void)
367 {
368 uint64_t rv;
369
370 __asm __volatile("rdtsc" : "=A" (rv));
371 return (rv);
372 }
373
374 static __inline uint32_t
375 rdtsc32(void)
376 {
377 uint32_t rv;
378
379 __asm __volatile("rdtsc" : "=a" (rv) : : "edx");
380 return (rv);
381 }
382
383 static __inline void
384 wbinvd(void)
385 {
386 __asm __volatile("wbinvd");
387 }
388
389 static __inline void
390 #ifdef XEN
391 _write_eflags(u_int ef)
392 #else
393 write_eflags(u_int ef)
394 #endif
395 {
396 __asm __volatile("pushl %0; popfl" : : "r" (ef));
397 }
398
399 static __inline void
400 wrmsr(u_int msr, uint64_t newval)
401 {
402 __asm __volatile("wrmsr" : : "A" (newval), "c" (msr));
403 }
404
405 static __inline void
406 load_cr0(u_int data)
407 {
408
409 __asm __volatile("movl %0,%%cr0" : : "r" (data));
410 }
411
412 static __inline u_int
413 rcr0(void)
414 {
415 u_int data;
416
417 __asm __volatile("movl %%cr0,%0" : "=r" (data));
418 return (data);
419 }
420
421 static __inline u_int
422 rcr2(void)
423 {
424 u_int data;
425
426 #ifdef XEN
427 return (xen_rcr2());
428 #endif
429 __asm __volatile("movl %%cr2,%0" : "=r" (data));
430 return (data);
431 }
432
433 static __inline void
434 load_cr3(u_int data)
435 {
436 #ifdef XEN
437 xen_load_cr3(data);
438 #else
439 __asm __volatile("movl %0,%%cr3" : : "r" (data) : "memory");
440 #endif
441 }
442
443 static __inline u_int
444 rcr3(void)
445 {
446 u_int data;
447
448 __asm __volatile("movl %%cr3,%0" : "=r" (data));
449 return (data);
450 }
451
452 static __inline void
453 load_cr4(u_int data)
454 {
455 __asm __volatile("movl %0,%%cr4" : : "r" (data));
456 }
457
458 static __inline u_int
459 rcr4(void)
460 {
461 u_int data;
462
463 __asm __volatile("movl %%cr4,%0" : "=r" (data));
464 return (data);
465 }
466
467 static __inline uint64_t
468 rxcr(u_int reg)
469 {
470 u_int low, high;
471
472 __asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
473 return (low | ((uint64_t)high << 32));
474 }
475
476 static __inline void
477 load_xcr(u_int reg, uint64_t val)
478 {
479 u_int low, high;
480
481 low = val;
482 high = val >> 32;
483 __asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
484 }
485
486 /*
487 * Global TLB flush (except for thise for pages marked PG_G)
488 */
489 static __inline void
490 invltlb(void)
491 {
492 #ifdef XEN
493 xen_tlb_flush();
494 #else
495 load_cr3(rcr3());
496 #endif
497 }
498
499 /*
500 * TLB flush for an individual page (even if it has PG_G).
501 * Only works on 486+ CPUs (i386 does not have PG_G).
502 */
503 static __inline void
504 invlpg(u_int addr)
505 {
506
507 #ifdef XEN
508 xen_invlpg(addr);
509 #else
510 __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
511 #endif
512 }
513
514 static __inline u_short
515 rfs(void)
516 {
517 u_short sel;
518 __asm __volatile("movw %%fs,%0" : "=rm" (sel));
519 return (sel);
520 }
521
522 static __inline uint64_t
523 rgdt(void)
524 {
525 uint64_t gdtr;
526 __asm __volatile("sgdt %0" : "=m" (gdtr));
527 return (gdtr);
528 }
529
530 static __inline u_short
531 rgs(void)
532 {
533 u_short sel;
534 __asm __volatile("movw %%gs,%0" : "=rm" (sel));
535 return (sel);
536 }
537
538 static __inline uint64_t
539 ridt(void)
540 {
541 uint64_t idtr;
542 __asm __volatile("sidt %0" : "=m" (idtr));
543 return (idtr);
544 }
545
546 static __inline u_short
547 rldt(void)
548 {
549 u_short ldtr;
550 __asm __volatile("sldt %0" : "=g" (ldtr));
551 return (ldtr);
552 }
553
554 static __inline u_short
555 rss(void)
556 {
557 u_short sel;
558 __asm __volatile("movw %%ss,%0" : "=rm" (sel));
559 return (sel);
560 }
561
562 static __inline u_short
563 rtr(void)
564 {
565 u_short tr;
566 __asm __volatile("str %0" : "=g" (tr));
567 return (tr);
568 }
569
570 static __inline void
571 load_fs(u_short sel)
572 {
573 __asm __volatile("movw %0,%%fs" : : "rm" (sel));
574 }
575
576 static __inline void
577 load_gs(u_short sel)
578 {
579 __asm __volatile("movw %0,%%gs" : : "rm" (sel));
580 }
581
582 static __inline void
583 lidt(struct region_descriptor *addr)
584 {
585 __asm __volatile("lidt (%0)" : : "r" (addr));
586 }
587
588 static __inline void
589 lldt(u_short sel)
590 {
591 __asm __volatile("lldt %0" : : "r" (sel));
592 }
593
594 static __inline void
595 ltr(u_short sel)
596 {
597 __asm __volatile("ltr %0" : : "r" (sel));
598 }
599
600 static __inline u_int
601 rdr0(void)
602 {
603 u_int data;
604 __asm __volatile("movl %%dr0,%0" : "=r" (data));
605 return (data);
606 }
607
608 static __inline void
609 load_dr0(u_int dr0)
610 {
611 __asm __volatile("movl %0,%%dr0" : : "r" (dr0));
612 }
613
614 static __inline u_int
615 rdr1(void)
616 {
617 u_int data;
618 __asm __volatile("movl %%dr1,%0" : "=r" (data));
619 return (data);
620 }
621
622 static __inline void
623 load_dr1(u_int dr1)
624 {
625 __asm __volatile("movl %0,%%dr1" : : "r" (dr1));
626 }
627
628 static __inline u_int
629 rdr2(void)
630 {
631 u_int data;
632 __asm __volatile("movl %%dr2,%0" : "=r" (data));
633 return (data);
634 }
635
636 static __inline void
637 load_dr2(u_int dr2)
638 {
639 __asm __volatile("movl %0,%%dr2" : : "r" (dr2));
640 }
641
642 static __inline u_int
643 rdr3(void)
644 {
645 u_int data;
646 __asm __volatile("movl %%dr3,%0" : "=r" (data));
647 return (data);
648 }
649
650 static __inline void
651 load_dr3(u_int dr3)
652 {
653 __asm __volatile("movl %0,%%dr3" : : "r" (dr3));
654 }
655
656 static __inline u_int
657 rdr4(void)
658 {
659 u_int data;
660 __asm __volatile("movl %%dr4,%0" : "=r" (data));
661 return (data);
662 }
663
664 static __inline void
665 load_dr4(u_int dr4)
666 {
667 __asm __volatile("movl %0,%%dr4" : : "r" (dr4));
668 }
669
670 static __inline u_int
671 rdr5(void)
672 {
673 u_int data;
674 __asm __volatile("movl %%dr5,%0" : "=r" (data));
675 return (data);
676 }
677
678 static __inline void
679 load_dr5(u_int dr5)
680 {
681 __asm __volatile("movl %0,%%dr5" : : "r" (dr5));
682 }
683
684 static __inline u_int
685 rdr6(void)
686 {
687 u_int data;
688 __asm __volatile("movl %%dr6,%0" : "=r" (data));
689 return (data);
690 }
691
692 static __inline void
693 load_dr6(u_int dr6)
694 {
695 __asm __volatile("movl %0,%%dr6" : : "r" (dr6));
696 }
697
698 static __inline u_int
699 rdr7(void)
700 {
701 u_int data;
702 __asm __volatile("movl %%dr7,%0" : "=r" (data));
703 return (data);
704 }
705
706 static __inline void
707 load_dr7(u_int dr7)
708 {
709 __asm __volatile("movl %0,%%dr7" : : "r" (dr7));
710 }
711
712 static __inline u_char
713 read_cyrix_reg(u_char reg)
714 {
715 outb(0x22, reg);
716 return inb(0x23);
717 }
718
719 static __inline void
720 write_cyrix_reg(u_char reg, u_char data)
721 {
722 outb(0x22, reg);
723 outb(0x23, data);
724 }
725
726 static __inline register_t
727 intr_disable(void)
728 {
729 register_t eflags;
730
731 eflags = read_eflags();
732 disable_intr();
733 return (eflags);
734 }
735
736 static __inline void
737 intr_restore(register_t eflags)
738 {
739 write_eflags(eflags);
740 }
741
742 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
743
744 int breakpoint(void);
745 u_int bsfl(u_int mask);
746 u_int bsrl(u_int mask);
747 void clflush(u_long addr);
748 void clts(void);
749 void cpuid_count(u_int ax, u_int cx, u_int *p);
750 void disable_intr(void);
751 void do_cpuid(u_int ax, u_int *p);
752 void enable_intr(void);
753 void halt(void);
754 void ia32_pause(void);
755 u_char inb(u_int port);
756 u_int inl(u_int port);
757 void insb(u_int port, void *addr, size_t count);
758 void insl(u_int port, void *addr, size_t count);
759 void insw(u_int port, void *addr, size_t count);
760 register_t intr_disable(void);
761 void intr_restore(register_t ef);
762 void invd(void);
763 void invlpg(u_int addr);
764 void invltlb(void);
765 u_short inw(u_int port);
766 void lidt(struct region_descriptor *addr);
767 void lldt(u_short sel);
768 void load_cr0(u_int cr0);
769 void load_cr3(u_int cr3);
770 void load_cr4(u_int cr4);
771 void load_dr0(u_int dr0);
772 void load_dr1(u_int dr1);
773 void load_dr2(u_int dr2);
774 void load_dr3(u_int dr3);
775 void load_dr4(u_int dr4);
776 void load_dr5(u_int dr5);
777 void load_dr6(u_int dr6);
778 void load_dr7(u_int dr7);
779 void load_fs(u_short sel);
780 void load_gs(u_short sel);
781 void ltr(u_short sel);
782 void outb(u_int port, u_char data);
783 void outl(u_int port, u_int data);
784 void outsb(u_int port, const void *addr, size_t count);
785 void outsl(u_int port, const void *addr, size_t count);
786 void outsw(u_int port, const void *addr, size_t count);
787 void outw(u_int port, u_short data);
788 u_int rcr0(void);
789 u_int rcr2(void);
790 u_int rcr3(void);
791 u_int rcr4(void);
792 uint64_t rdmsr(u_int msr);
793 uint64_t rdpmc(u_int pmc);
794 u_int rdr0(void);
795 u_int rdr1(void);
796 u_int rdr2(void);
797 u_int rdr3(void);
798 u_int rdr4(void);
799 u_int rdr5(void);
800 u_int rdr6(void);
801 u_int rdr7(void);
802 uint64_t rdtsc(void);
803 u_char read_cyrix_reg(u_char reg);
804 u_int read_eflags(void);
805 u_int rfs(void);
806 uint64_t rgdt(void);
807 u_int rgs(void);
808 uint64_t ridt(void);
809 u_short rldt(void);
810 u_short rtr(void);
811 void wbinvd(void);
812 void write_cyrix_reg(u_char reg, u_char data);
813 void write_eflags(u_int ef);
814 void wrmsr(u_int msr, uint64_t newval);
815
816 #endif /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
817
818 void reset_dbregs(void);
819
820 #ifdef _KERNEL
821 int rdmsr_safe(u_int msr, uint64_t *val);
822 int wrmsr_safe(u_int msr, uint64_t newval);
823 #endif
824
825 #endif /* !_MACHINE_CPUFUNC_H_ */
Cache object: 380693c753b0a06c03ce20ac603a4b08
|