The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/amd64/include/cpufunc.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2003 Peter Wemm.
    3  * Copyright (c) 1993 The Regents of the University of California.
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 4. Neither the name of the University nor the names of its contributors
   15  *    may be used to endorse or promote products derived from this software
   16  *    without specific prior written permission.
   17  *
   18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
   19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
   22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   28  * SUCH DAMAGE.
   29  *
   30  * $FreeBSD: releng/9.0/sys/amd64/include/cpufunc.h 223796 2011-07-05 18:42:10Z jkim $
   31  */
   32 
   33 /*
   34  * Functions to provide access to special i386 instructions.
   35  * This in included in sys/systm.h, and that file should be
   36  * used in preference to this.
   37  */
   38 
   39 #ifndef _MACHINE_CPUFUNC_H_
   40 #define _MACHINE_CPUFUNC_H_
   41 
   42 #ifndef _SYS_CDEFS_H_
   43 #error this file needs sys/cdefs.h as a prerequisite
   44 #endif
   45 
   46 struct region_descriptor;
   47 
   48 #define readb(va)       (*(volatile uint8_t *) (va))
   49 #define readw(va)       (*(volatile uint16_t *) (va))
   50 #define readl(va)       (*(volatile uint32_t *) (va))
   51 #define readq(va)       (*(volatile uint64_t *) (va))
   52 
   53 #define writeb(va, d)   (*(volatile uint8_t *) (va) = (d))
   54 #define writew(va, d)   (*(volatile uint16_t *) (va) = (d))
   55 #define writel(va, d)   (*(volatile uint32_t *) (va) = (d))
   56 #define writeq(va, d)   (*(volatile uint64_t *) (va) = (d))
   57 
   58 #if defined(__GNUCLIKE_ASM) && defined(__CC_SUPPORTS___INLINE)
   59 
   60 static __inline void
   61 breakpoint(void)
   62 {
   63         __asm __volatile("int $3");
   64 }
   65 
   66 static __inline u_int
   67 bsfl(u_int mask)
   68 {
   69         u_int   result;
   70 
   71         __asm __volatile("bsfl %1,%0" : "=r" (result) : "rm" (mask));
   72         return (result);
   73 }
   74 
   75 static __inline u_long
   76 bsfq(u_long mask)
   77 {
   78         u_long  result;
   79 
   80         __asm __volatile("bsfq %1,%0" : "=r" (result) : "rm" (mask));
   81         return (result);
   82 }
   83 
   84 static __inline u_int
   85 bsrl(u_int mask)
   86 {
   87         u_int   result;
   88 
   89         __asm __volatile("bsrl %1,%0" : "=r" (result) : "rm" (mask));
   90         return (result);
   91 }
   92 
   93 static __inline u_long
   94 bsrq(u_long mask)
   95 {
   96         u_long  result;
   97 
   98         __asm __volatile("bsrq %1,%0" : "=r" (result) : "rm" (mask));
   99         return (result);
  100 }
  101 
  102 static __inline void
  103 clflush(u_long addr)
  104 {
  105 
  106         __asm __volatile("clflush %0" : : "m" (*(char *)addr));
  107 }
  108 
  109 static __inline void
  110 disable_intr(void)
  111 {
  112         __asm __volatile("cli" : : : "memory");
  113 }
  114 
  115 static __inline void
  116 do_cpuid(u_int ax, u_int *p)
  117 {
  118         __asm __volatile("cpuid"
  119                          : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
  120                          :  "" (ax));
  121 }
  122 
  123 static __inline void
  124 cpuid_count(u_int ax, u_int cx, u_int *p)
  125 {
  126         __asm __volatile("cpuid"
  127                          : "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
  128                          :  "" (ax), "c" (cx));
  129 }
  130 
  131 static __inline void
  132 enable_intr(void)
  133 {
  134         __asm __volatile("sti");
  135 }
  136 
  137 #ifdef _KERNEL
  138 
  139 #define HAVE_INLINE_FFS
  140 #define        ffs(x)  __builtin_ffs(x)
  141 
  142 #define HAVE_INLINE_FFSL
  143 
  144 static __inline int
  145 ffsl(long mask)
  146 {
  147         return (mask == 0 ? mask : (int)bsfq((u_long)mask) + 1);
  148 }
  149 
  150 #define HAVE_INLINE_FLS
  151 
  152 static __inline int
  153 fls(int mask)
  154 {
  155         return (mask == 0 ? mask : (int)bsrl((u_int)mask) + 1);
  156 }
  157 
  158 #define HAVE_INLINE_FLSL
  159 
  160 static __inline int
  161 flsl(long mask)
  162 {
  163         return (mask == 0 ? mask : (int)bsrq((u_long)mask) + 1);
  164 }
  165 
  166 #endif /* _KERNEL */
  167 
  168 static __inline void
  169 halt(void)
  170 {
  171         __asm __volatile("hlt");
  172 }
  173 
  174 static __inline u_char
  175 inb(u_int port)
  176 {
  177         u_char  data;
  178 
  179         __asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
  180         return (data);
  181 }
  182 
  183 static __inline u_int
  184 inl(u_int port)
  185 {
  186         u_int   data;
  187 
  188         __asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
  189         return (data);
  190 }
  191 
  192 static __inline void
  193 insb(u_int port, void *addr, size_t count)
  194 {
  195         __asm __volatile("cld; rep; insb"
  196                          : "+D" (addr), "+c" (count)
  197                          : "d" (port)
  198                          : "memory");
  199 }
  200 
  201 static __inline void
  202 insw(u_int port, void *addr, size_t count)
  203 {
  204         __asm __volatile("cld; rep; insw"
  205                          : "+D" (addr), "+c" (count)
  206                          : "d" (port)
  207                          : "memory");
  208 }
  209 
  210 static __inline void
  211 insl(u_int port, void *addr, size_t count)
  212 {
  213         __asm __volatile("cld; rep; insl"
  214                          : "+D" (addr), "+c" (count)
  215                          : "d" (port)
  216                          : "memory");
  217 }
  218 
  219 static __inline void
  220 invd(void)
  221 {
  222         __asm __volatile("invd");
  223 }
  224 
  225 static __inline u_short
  226 inw(u_int port)
  227 {
  228         u_short data;
  229 
  230         __asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
  231         return (data);
  232 }
  233 
  234 static __inline void
  235 outb(u_int port, u_char data)
  236 {
  237         __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
  238 }
  239 
  240 static __inline void
  241 outl(u_int port, u_int data)
  242 {
  243         __asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
  244 }
  245 
  246 static __inline void
  247 outsb(u_int port, const void *addr, size_t count)
  248 {
  249         __asm __volatile("cld; rep; outsb"
  250                          : "+S" (addr), "+c" (count)
  251                          : "d" (port));
  252 }
  253 
  254 static __inline void
  255 outsw(u_int port, const void *addr, size_t count)
  256 {
  257         __asm __volatile("cld; rep; outsw"
  258                          : "+S" (addr), "+c" (count)
  259                          : "d" (port));
  260 }
  261 
  262 static __inline void
  263 outsl(u_int port, const void *addr, size_t count)
  264 {
  265         __asm __volatile("cld; rep; outsl"
  266                          : "+S" (addr), "+c" (count)
  267                          : "d" (port));
  268 }
  269 
  270 static __inline void
  271 outw(u_int port, u_short data)
  272 {
  273         __asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
  274 }
  275 
  276 static __inline void
  277 mfence(void)
  278 {
  279 
  280         __asm __volatile("mfence" : : : "memory");
  281 }
  282 
  283 static __inline void
  284 ia32_pause(void)
  285 {
  286         __asm __volatile("pause");
  287 }
  288 
  289 static __inline u_long
  290 read_rflags(void)
  291 {
  292         u_long  rf;
  293 
  294         __asm __volatile("pushfq; popq %0" : "=r" (rf));
  295         return (rf);
  296 }
  297 
  298 static __inline uint64_t
  299 rdmsr(u_int msr)
  300 {
  301         uint32_t low, high;
  302 
  303         __asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
  304         return (low | ((uint64_t)high << 32));
  305 }
  306 
  307 static __inline uint64_t
  308 rdpmc(u_int pmc)
  309 {
  310         uint32_t low, high;
  311 
  312         __asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
  313         return (low | ((uint64_t)high << 32));
  314 }
  315 
  316 static __inline uint64_t
  317 rdtsc(void)
  318 {
  319         uint32_t low, high;
  320 
  321         __asm __volatile("rdtsc" : "=a" (low), "=d" (high));
  322         return (low | ((uint64_t)high << 32));
  323 }
  324 
  325 static __inline uint32_t
  326 rdtsc32(void)
  327 {
  328         uint32_t rv;
  329 
  330         __asm __volatile("rdtsc" : "=a" (rv) : : "edx");
  331         return (rv);
  332 }
  333 
  334 static __inline void
  335 wbinvd(void)
  336 {
  337         __asm __volatile("wbinvd");
  338 }
  339 
  340 static __inline void
  341 write_rflags(u_long rf)
  342 {
  343         __asm __volatile("pushq %0;  popfq" : : "r" (rf));
  344 }
  345 
  346 static __inline void
  347 wrmsr(u_int msr, uint64_t newval)
  348 {
  349         uint32_t low, high;
  350 
  351         low = newval;
  352         high = newval >> 32;
  353         __asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
  354 }
  355 
  356 static __inline void
  357 load_cr0(u_long data)
  358 {
  359 
  360         __asm __volatile("movq %0,%%cr0" : : "r" (data));
  361 }
  362 
  363 static __inline u_long
  364 rcr0(void)
  365 {
  366         u_long  data;
  367 
  368         __asm __volatile("movq %%cr0,%0" : "=r" (data));
  369         return (data);
  370 }
  371 
  372 static __inline u_long
  373 rcr2(void)
  374 {
  375         u_long  data;
  376 
  377         __asm __volatile("movq %%cr2,%0" : "=r" (data));
  378         return (data);
  379 }
  380 
  381 static __inline void
  382 load_cr3(u_long data)
  383 {
  384 
  385         __asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
  386 }
  387 
  388 static __inline u_long
  389 rcr3(void)
  390 {
  391         u_long  data;
  392 
  393         __asm __volatile("movq %%cr3,%0" : "=r" (data));
  394         return (data);
  395 }
  396 
  397 static __inline void
  398 load_cr4(u_long data)
  399 {
  400         __asm __volatile("movq %0,%%cr4" : : "r" (data));
  401 }
  402 
  403 static __inline u_long
  404 rcr4(void)
  405 {
  406         u_long  data;
  407 
  408         __asm __volatile("movq %%cr4,%0" : "=r" (data));
  409         return (data);
  410 }
  411 
  412 /*
  413  * Global TLB flush (except for thise for pages marked PG_G)
  414  */
  415 static __inline void
  416 invltlb(void)
  417 {
  418 
  419         load_cr3(rcr3());
  420 }
  421 
  422 /*
  423  * TLB flush for an individual page (even if it has PG_G).
  424  * Only works on 486+ CPUs (i386 does not have PG_G).
  425  */
  426 static __inline void
  427 invlpg(u_long addr)
  428 {
  429 
  430         __asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
  431 }
  432 
  433 static __inline u_short
  434 rfs(void)
  435 {
  436         u_short sel;
  437         __asm __volatile("movw %%fs,%0" : "=rm" (sel));
  438         return (sel);
  439 }
  440 
  441 static __inline u_short
  442 rgs(void)
  443 {
  444         u_short sel;
  445         __asm __volatile("movw %%gs,%0" : "=rm" (sel));
  446         return (sel);
  447 }
  448 
  449 static __inline u_short
  450 rss(void)
  451 {
  452         u_short sel;
  453         __asm __volatile("movw %%ss,%0" : "=rm" (sel));
  454         return (sel);
  455 }
  456 
  457 static __inline void
  458 load_ds(u_short sel)
  459 {
  460         __asm __volatile("movw %0,%%ds" : : "rm" (sel));
  461 }
  462 
  463 static __inline void
  464 load_es(u_short sel)
  465 {
  466         __asm __volatile("movw %0,%%es" : : "rm" (sel));
  467 }
  468 
  469 static __inline void
  470 cpu_monitor(const void *addr, u_long extensions, u_int hints)
  471 {
  472 
  473         __asm __volatile("monitor"
  474             : : "a" (addr), "c" (extensions), "d" (hints));
  475 }
  476 
  477 static __inline void
  478 cpu_mwait(u_long extensions, u_int hints)
  479 {
  480 
  481         __asm __volatile("mwait" : : "a" (hints), "c" (extensions));
  482 }
  483 
  484 #ifdef _KERNEL
  485 /* This is defined in <machine/specialreg.h> but is too painful to get to */
  486 #ifndef MSR_FSBASE
  487 #define MSR_FSBASE      0xc0000100
  488 #endif
  489 static __inline void
  490 load_fs(u_short sel)
  491 {
  492         /* Preserve the fsbase value across the selector load */
  493         __asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
  494             : : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
  495 }
  496 
  497 #ifndef MSR_GSBASE
  498 #define MSR_GSBASE      0xc0000101
  499 #endif
  500 static __inline void
  501 load_gs(u_short sel)
  502 {
  503         /*
  504          * Preserve the gsbase value across the selector load.
  505          * Note that we have to disable interrupts because the gsbase
  506          * being trashed happens to be the kernel gsbase at the time.
  507          */
  508         __asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
  509             : : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
  510 }
  511 #else
  512 /* Usable by userland */
  513 static __inline void
  514 load_fs(u_short sel)
  515 {
  516         __asm __volatile("movw %0,%%fs" : : "rm" (sel));
  517 }
  518 
  519 static __inline void
  520 load_gs(u_short sel)
  521 {
  522         __asm __volatile("movw %0,%%gs" : : "rm" (sel));
  523 }
  524 #endif
  525 
  526 static __inline void
  527 lidt(struct region_descriptor *addr)
  528 {
  529         __asm __volatile("lidt (%0)" : : "r" (addr));
  530 }
  531 
  532 static __inline void
  533 lldt(u_short sel)
  534 {
  535         __asm __volatile("lldt %0" : : "r" (sel));
  536 }
  537 
  538 static __inline void
  539 ltr(u_short sel)
  540 {
  541         __asm __volatile("ltr %0" : : "r" (sel));
  542 }
  543 
  544 static __inline uint64_t
  545 rdr0(void)
  546 {
  547         uint64_t data;
  548         __asm __volatile("movq %%dr0,%0" : "=r" (data));
  549         return (data);
  550 }
  551 
  552 static __inline void
  553 load_dr0(uint64_t dr0)
  554 {
  555         __asm __volatile("movq %0,%%dr0" : : "r" (dr0));
  556 }
  557 
  558 static __inline uint64_t
  559 rdr1(void)
  560 {
  561         uint64_t data;
  562         __asm __volatile("movq %%dr1,%0" : "=r" (data));
  563         return (data);
  564 }
  565 
  566 static __inline void
  567 load_dr1(uint64_t dr1)
  568 {
  569         __asm __volatile("movq %0,%%dr1" : : "r" (dr1));
  570 }
  571 
  572 static __inline uint64_t
  573 rdr2(void)
  574 {
  575         uint64_t data;
  576         __asm __volatile("movq %%dr2,%0" : "=r" (data));
  577         return (data);
  578 }
  579 
  580 static __inline void
  581 load_dr2(uint64_t dr2)
  582 {
  583         __asm __volatile("movq %0,%%dr2" : : "r" (dr2));
  584 }
  585 
  586 static __inline uint64_t
  587 rdr3(void)
  588 {
  589         uint64_t data;
  590         __asm __volatile("movq %%dr3,%0" : "=r" (data));
  591         return (data);
  592 }
  593 
  594 static __inline void
  595 load_dr3(uint64_t dr3)
  596 {
  597         __asm __volatile("movq %0,%%dr3" : : "r" (dr3));
  598 }
  599 
  600 static __inline uint64_t
  601 rdr4(void)
  602 {
  603         uint64_t data;
  604         __asm __volatile("movq %%dr4,%0" : "=r" (data));
  605         return (data);
  606 }
  607 
  608 static __inline void
  609 load_dr4(uint64_t dr4)
  610 {
  611         __asm __volatile("movq %0,%%dr4" : : "r" (dr4));
  612 }
  613 
  614 static __inline uint64_t
  615 rdr5(void)
  616 {
  617         uint64_t data;
  618         __asm __volatile("movq %%dr5,%0" : "=r" (data));
  619         return (data);
  620 }
  621 
  622 static __inline void
  623 load_dr5(uint64_t dr5)
  624 {
  625         __asm __volatile("movq %0,%%dr5" : : "r" (dr5));
  626 }
  627 
  628 static __inline uint64_t
  629 rdr6(void)
  630 {
  631         uint64_t data;
  632         __asm __volatile("movq %%dr6,%0" : "=r" (data));
  633         return (data);
  634 }
  635 
  636 static __inline void
  637 load_dr6(uint64_t dr6)
  638 {
  639         __asm __volatile("movq %0,%%dr6" : : "r" (dr6));
  640 }
  641 
  642 static __inline uint64_t
  643 rdr7(void)
  644 {
  645         uint64_t data;
  646         __asm __volatile("movq %%dr7,%0" : "=r" (data));
  647         return (data);
  648 }
  649 
  650 static __inline void
  651 load_dr7(uint64_t dr7)
  652 {
  653         __asm __volatile("movq %0,%%dr7" : : "r" (dr7));
  654 }
  655 
  656 static __inline register_t
  657 intr_disable(void)
  658 {
  659         register_t rflags;
  660 
  661         rflags = read_rflags();
  662         disable_intr();
  663         return (rflags);
  664 }
  665 
  666 static __inline void
  667 intr_restore(register_t rflags)
  668 {
  669         write_rflags(rflags);
  670 }
  671 
  672 #else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
  673 
  674 int     breakpoint(void);
  675 u_int   bsfl(u_int mask);
  676 u_int   bsrl(u_int mask);
  677 void    disable_intr(void);
  678 void    do_cpuid(u_int ax, u_int *p);
  679 void    enable_intr(void);
  680 void    halt(void);
  681 void    ia32_pause(void);
  682 u_char  inb(u_int port);
  683 u_int   inl(u_int port);
  684 void    insb(u_int port, void *addr, size_t count);
  685 void    insl(u_int port, void *addr, size_t count);
  686 void    insw(u_int port, void *addr, size_t count);
  687 register_t      intr_disable(void);
  688 void    intr_restore(register_t rf);
  689 void    invd(void);
  690 void    invlpg(u_int addr);
  691 void    invltlb(void);
  692 u_short inw(u_int port);
  693 void    lidt(struct region_descriptor *addr);
  694 void    lldt(u_short sel);
  695 void    load_cr0(u_long cr0);
  696 void    load_cr3(u_long cr3);
  697 void    load_cr4(u_long cr4);
  698 void    load_dr0(uint64_t dr0);
  699 void    load_dr1(uint64_t dr1);
  700 void    load_dr2(uint64_t dr2);
  701 void    load_dr3(uint64_t dr3);
  702 void    load_dr4(uint64_t dr4);
  703 void    load_dr5(uint64_t dr5);
  704 void    load_dr6(uint64_t dr6);
  705 void    load_dr7(uint64_t dr7);
  706 void    load_fs(u_short sel);
  707 void    load_gs(u_short sel);
  708 void    ltr(u_short sel);
  709 void    outb(u_int port, u_char data);
  710 void    outl(u_int port, u_int data);
  711 void    outsb(u_int port, const void *addr, size_t count);
  712 void    outsl(u_int port, const void *addr, size_t count);
  713 void    outsw(u_int port, const void *addr, size_t count);
  714 void    outw(u_int port, u_short data);
  715 u_long  rcr0(void);
  716 u_long  rcr2(void);
  717 u_long  rcr3(void);
  718 u_long  rcr4(void);
  719 uint64_t rdmsr(u_int msr);
  720 uint64_t rdpmc(u_int pmc);
  721 uint64_t rdr0(void);
  722 uint64_t rdr1(void);
  723 uint64_t rdr2(void);
  724 uint64_t rdr3(void);
  725 uint64_t rdr4(void);
  726 uint64_t rdr5(void);
  727 uint64_t rdr6(void);
  728 uint64_t rdr7(void);
  729 uint64_t rdtsc(void);
  730 u_int   read_rflags(void);
  731 u_int   rfs(void);
  732 u_int   rgs(void);
  733 void    wbinvd(void);
  734 void    write_rflags(u_int rf);
  735 void    wrmsr(u_int msr, uint64_t newval);
  736 
  737 #endif  /* __GNUCLIKE_ASM && __CC_SUPPORTS___INLINE */
  738 
  739 void    reset_dbregs(void);
  740 
  741 #ifdef _KERNEL
  742 int     rdmsr_safe(u_int msr, uint64_t *val);
  743 int     wrmsr_safe(u_int msr, uint64_t newval);
  744 #endif
  745 
  746 #endif /* !_MACHINE_CPUFUNC_H_ */

Cache object: 28081ffde16060fd7fceb1ed5ce06d3b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.