The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/x86bios/x86bios.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
    3  * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD: releng/9.0/sys/compat/x86bios/x86bios.c 219430 2011-03-09 16:16:38Z jkim $");
   30 
   31 #include "opt_x86bios.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/kernel.h>
   36 #include <sys/lock.h>
   37 #include <sys/malloc.h>
   38 #include <sys/module.h>
   39 #include <sys/mutex.h>
   40 #include <sys/sysctl.h>
   41 
   42 #include <contrib/x86emu/x86emu.h>
   43 #include <contrib/x86emu/x86emu_regs.h>
   44 #include <compat/x86bios/x86bios.h>
   45 
   46 #include <dev/pci/pcireg.h>
   47 #include <dev/pci/pcivar.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 
   52 #ifdef __amd64__
   53 #define X86BIOS_NATIVE_ARCH
   54 #endif
   55 #ifdef __i386__
   56 #define X86BIOS_NATIVE_VM86
   57 #endif
   58 
   59 #define X86BIOS_MEM_SIZE        0x00100000      /* 1M */
   60 
   61 #define X86BIOS_TRACE(h, n, r)  do {                                    \
   62         printf(__STRING(h)                                              \
   63             " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
   64             (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX,            \
   65             (r)->R_ES, (r)->R_DI);                                      \
   66 } while (0)
   67 
   68 static struct mtx x86bios_lock;
   69 
   70 SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL, "x86bios debugging");
   71 static int x86bios_trace_call;
   72 TUNABLE_INT("debug.x86bios.call", &x86bios_trace_call);
   73 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RW, &x86bios_trace_call, 0,
   74     "Trace far function calls");
   75 static int x86bios_trace_int;
   76 TUNABLE_INT("debug.x86bios.int", &x86bios_trace_int);
   77 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RW, &x86bios_trace_int, 0,
   78     "Trace software interrupt handlers");
   79 
   80 #ifdef X86BIOS_NATIVE_VM86
   81 
   82 #include <machine/vm86.h>
   83 #include <machine/vmparam.h>
   84 #include <machine/pc/bios.h>
   85 
   86 struct vm86context x86bios_vmc;
   87 
   88 static void
   89 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
   90 {
   91 
   92         vmf->vmf_ds = regs->R_DS;
   93         vmf->vmf_es = regs->R_ES;
   94         vmf->vmf_ax = regs->R_AX;
   95         vmf->vmf_bx = regs->R_BX;
   96         vmf->vmf_cx = regs->R_CX;
   97         vmf->vmf_dx = regs->R_DX;
   98         vmf->vmf_bp = regs->R_BP;
   99         vmf->vmf_si = regs->R_SI;
  100         vmf->vmf_di = regs->R_DI;
  101 }
  102 
  103 static void
  104 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
  105 {
  106 
  107         regs->R_DS = vmf->vmf_ds;
  108         regs->R_ES = vmf->vmf_es;
  109         regs->R_FLG = vmf->vmf_flags;
  110         regs->R_AX = vmf->vmf_ax;
  111         regs->R_BX = vmf->vmf_bx;
  112         regs->R_CX = vmf->vmf_cx;
  113         regs->R_DX = vmf->vmf_dx;
  114         regs->R_BP = vmf->vmf_bp;
  115         regs->R_SI = vmf->vmf_si;
  116         regs->R_DI = vmf->vmf_di;
  117 }
  118 
  119 void *
  120 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  121 {
  122         void *vaddr;
  123         int i;
  124 
  125         if (offset == NULL || size == 0)
  126                 return (NULL);
  127         vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
  128             PAGE_SIZE, 0);
  129         if (vaddr != NULL) {
  130                 *offset = vtophys(vaddr);
  131                 mtx_lock(&x86bios_lock);
  132                 for (i = 0; i < atop(round_page(size)); i++)
  133                         vm86_addpage(&x86bios_vmc, atop(*offset) + i,
  134                             (vm_offset_t)vaddr + ptoa(i));
  135                 mtx_unlock(&x86bios_lock);
  136         }
  137 
  138         return (vaddr);
  139 }
  140 
  141 void
  142 x86bios_free(void *addr, size_t size)
  143 {
  144         vm_paddr_t paddr;
  145         int i, nfree;
  146 
  147         if (addr == NULL || size == 0)
  148                 return;
  149         paddr = vtophys(addr);
  150         if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
  151                 return;
  152         mtx_lock(&x86bios_lock);
  153         for (i = 0; i < x86bios_vmc.npages; i++)
  154                 if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
  155                         break;
  156         if (i >= x86bios_vmc.npages) {
  157                 mtx_unlock(&x86bios_lock);
  158                 return;
  159         }
  160         nfree = atop(round_page(size));
  161         bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
  162         if (i + nfree == x86bios_vmc.npages) {
  163                 x86bios_vmc.npages -= nfree;
  164                 while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
  165                         x86bios_vmc.npages--;
  166         }
  167         mtx_unlock(&x86bios_lock);
  168         contigfree(addr, size, M_DEVBUF);
  169 }
  170 
  171 void
  172 x86bios_init_regs(struct x86regs *regs)
  173 {
  174 
  175         bzero(regs, sizeof(*regs));
  176 }
  177 
  178 void
  179 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  180 {
  181         struct vm86frame vmf;
  182 
  183         if (x86bios_trace_call)
  184                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  185 
  186         bzero(&vmf, sizeof(vmf));
  187         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  188         vmf.vmf_cs = seg;
  189         vmf.vmf_ip = off;
  190         mtx_lock(&x86bios_lock);
  191         vm86_datacall(-1, &vmf, &x86bios_vmc);
  192         mtx_unlock(&x86bios_lock);
  193         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  194 
  195         if (x86bios_trace_call)
  196                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  197 }
  198 
  199 uint32_t
  200 x86bios_get_intr(int intno)
  201 {
  202 
  203         return (readl(BIOS_PADDRTOVADDR(intno * 4)));
  204 }
  205 
  206 void
  207 x86bios_set_intr(int intno, uint32_t saddr)
  208 {
  209 
  210         writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
  211 }
  212 
  213 void
  214 x86bios_intr(struct x86regs *regs, int intno)
  215 {
  216         struct vm86frame vmf;
  217 
  218         if (x86bios_trace_int)
  219                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  220 
  221         bzero(&vmf, sizeof(vmf));
  222         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  223         mtx_lock(&x86bios_lock);
  224         vm86_datacall(intno, &vmf, &x86bios_vmc);
  225         mtx_unlock(&x86bios_lock);
  226         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  227 
  228         if (x86bios_trace_int)
  229                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  230 }
  231 
  232 void *
  233 x86bios_offset(uint32_t offset)
  234 {
  235         vm_offset_t addr;
  236 
  237         addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
  238             X86BIOS_PHYSTOOFF(offset));
  239         if (addr == 0)
  240                 addr = BIOS_PADDRTOVADDR(offset);
  241 
  242         return ((void *)addr);
  243 }
  244 
  245 static int
  246 x86bios_init(void)
  247 {
  248 
  249         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  250         bzero(&x86bios_vmc, sizeof(x86bios_vmc));
  251 
  252         return (0);
  253 }
  254 
  255 static int
  256 x86bios_uninit(void)
  257 {
  258 
  259         mtx_destroy(&x86bios_lock);
  260 
  261         return (0);
  262 }
  263 
  264 #else
  265 
  266 #include <machine/iodev.h>
  267 
  268 #define X86BIOS_PAGE_SIZE       0x00001000      /* 4K */
  269 
  270 #define X86BIOS_IVT_SIZE        0x00000500      /* 1K + 256 (BDA) */
  271 
  272 #define X86BIOS_IVT_BASE        0x00000000
  273 #define X86BIOS_RAM_BASE        0x00001000
  274 #define X86BIOS_ROM_BASE        0x000a0000
  275 
  276 #define X86BIOS_ROM_SIZE        (X86BIOS_MEM_SIZE - x86bios_rom_phys)
  277 #define X86BIOS_SEG_SIZE        X86BIOS_PAGE_SIZE
  278 
  279 #define X86BIOS_PAGES           (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
  280 
  281 #define X86BIOS_R_SS            _pad2
  282 #define X86BIOS_R_SP            _pad3.I16_reg.x_reg
  283 
  284 static struct x86emu x86bios_emu;
  285 
  286 static void *x86bios_ivt;
  287 static void *x86bios_rom;
  288 static void *x86bios_seg;
  289 
  290 static vm_offset_t *x86bios_map;
  291 
  292 static vm_paddr_t x86bios_rom_phys;
  293 static vm_paddr_t x86bios_seg_phys;
  294 
  295 static int x86bios_fault;
  296 static uint32_t x86bios_fault_addr;
  297 static uint16_t x86bios_fault_cs;
  298 static uint16_t x86bios_fault_ip;
  299 
  300 static void
  301 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
  302 {
  303 
  304         x86bios_fault = 1;
  305         x86bios_fault_addr = addr;
  306         x86bios_fault_cs = emu->x86.R_CS;
  307         x86bios_fault_ip = emu->x86.R_IP;
  308         x86emu_halt_sys(emu);
  309 }
  310 
  311 static void *
  312 x86bios_get_pages(uint32_t offset, size_t size)
  313 {
  314         vm_offset_t addr;
  315 
  316         if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
  317                 return (NULL);
  318 
  319         if (offset >= X86BIOS_MEM_SIZE)
  320                 offset -= X86BIOS_MEM_SIZE;
  321         addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
  322         if (addr != 0)
  323                 addr += offset % X86BIOS_PAGE_SIZE;
  324 
  325         return ((void *)addr);
  326 }
  327 
  328 static void
  329 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
  330 {
  331         int i, j;
  332 
  333         for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
  334             j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
  335                 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
  336 }
  337 
  338 static uint8_t
  339 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
  340 {
  341         uint8_t *va;
  342 
  343         va = x86bios_get_pages(addr, sizeof(*va));
  344         if (va == NULL)
  345                 x86bios_set_fault(emu, addr);
  346 
  347         return (*va);
  348 }
  349 
  350 static uint16_t
  351 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
  352 {
  353         uint16_t *va;
  354 
  355         va = x86bios_get_pages(addr, sizeof(*va));
  356         if (va == NULL)
  357                 x86bios_set_fault(emu, addr);
  358 
  359 #ifndef __NO_STRICT_ALIGNMENT
  360         if ((addr & 1) != 0)
  361                 return (le16dec(va));
  362         else
  363 #endif
  364         return (le16toh(*va));
  365 }
  366 
  367 static uint32_t
  368 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
  369 {
  370         uint32_t *va;
  371 
  372         va = x86bios_get_pages(addr, sizeof(*va));
  373         if (va == NULL)
  374                 x86bios_set_fault(emu, addr);
  375 
  376 #ifndef __NO_STRICT_ALIGNMENT
  377         if ((addr & 3) != 0)
  378                 return (le32dec(va));
  379         else
  380 #endif
  381         return (le32toh(*va));
  382 }
  383 
  384 static void
  385 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
  386 {
  387         uint8_t *va;
  388 
  389         va = x86bios_get_pages(addr, sizeof(*va));
  390         if (va == NULL)
  391                 x86bios_set_fault(emu, addr);
  392 
  393         *va = val;
  394 }
  395 
  396 static void
  397 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
  398 {
  399         uint16_t *va;
  400 
  401         va = x86bios_get_pages(addr, sizeof(*va));
  402         if (va == NULL)
  403                 x86bios_set_fault(emu, addr);
  404 
  405 #ifndef __NO_STRICT_ALIGNMENT
  406         if ((addr & 1) != 0)
  407                 le16enc(va, val);
  408         else
  409 #endif
  410         *va = htole16(val);
  411 }
  412 
  413 static void
  414 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
  415 {
  416         uint32_t *va;
  417 
  418         va = x86bios_get_pages(addr, sizeof(*va));
  419         if (va == NULL)
  420                 x86bios_set_fault(emu, addr);
  421 
  422 #ifndef __NO_STRICT_ALIGNMENT
  423         if ((addr & 3) != 0)
  424                 le32enc(va, val);
  425         else
  426 #endif
  427         *va = htole32(val);
  428 }
  429 
  430 static uint8_t
  431 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
  432 {
  433 
  434 #ifndef X86BIOS_NATIVE_ARCH
  435         if (port == 0xb2) /* APM scratch register */
  436                 return (0);
  437         if (port >= 0x80 && port < 0x88) /* POST status register */
  438                 return (0);
  439 #endif
  440 
  441         return (iodev_read_1(port));
  442 }
  443 
  444 static uint16_t
  445 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
  446 {
  447         uint16_t val;
  448 
  449 #ifndef X86BIOS_NATIVE_ARCH
  450         if (port >= 0x80 && port < 0x88) /* POST status register */
  451                 return (0);
  452 
  453         if ((port & 1) != 0) {
  454                 val = iodev_read_1(port);
  455                 val |= iodev_read_1(port + 1) << 8;
  456         } else
  457 #endif
  458         val = iodev_read_2(port);
  459 
  460         return (val);
  461 }
  462 
  463 static uint32_t
  464 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
  465 {
  466         uint32_t val;
  467 
  468 #ifndef X86BIOS_NATIVE_ARCH
  469         if (port >= 0x80 && port < 0x88) /* POST status register */
  470                 return (0);
  471 
  472         if ((port & 1) != 0) {
  473                 val = iodev_read_1(port);
  474                 val |= iodev_read_2(port + 1) << 8;
  475                 val |= iodev_read_1(port + 3) << 24;
  476         } else if ((port & 2) != 0) {
  477                 val = iodev_read_2(port);
  478                 val |= iodev_read_2(port + 2) << 16;
  479         } else
  480 #endif
  481         val = iodev_read_4(port);
  482 
  483         return (val);
  484 }
  485 
  486 static void
  487 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
  488 {
  489 
  490 #ifndef X86BIOS_NATIVE_ARCH
  491         if (port == 0xb2) /* APM scratch register */
  492                 return;
  493         if (port >= 0x80 && port < 0x88) /* POST status register */
  494                 return;
  495 #endif
  496 
  497         iodev_write_1(port, val);
  498 }
  499 
  500 static void
  501 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
  502 {
  503 
  504 #ifndef X86BIOS_NATIVE_ARCH
  505         if (port >= 0x80 && port < 0x88) /* POST status register */
  506                 return;
  507 
  508         if ((port & 1) != 0) {
  509                 iodev_write_1(port, val);
  510                 iodev_write_1(port + 1, val >> 8);
  511         } else
  512 #endif
  513         iodev_write_2(port, val);
  514 }
  515 
  516 static void
  517 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
  518 {
  519 
  520 #ifndef X86BIOS_NATIVE_ARCH
  521         if (port >= 0x80 && port < 0x88) /* POST status register */
  522                 return;
  523 
  524         if ((port & 1) != 0) {
  525                 iodev_write_1(port, val);
  526                 iodev_write_2(port + 1, val >> 8);
  527                 iodev_write_1(port + 3, val >> 24);
  528         } else if ((port & 2) != 0) {
  529                 iodev_write_2(port, val);
  530                 iodev_write_2(port + 2, val >> 16);
  531         } else
  532 #endif
  533         iodev_write_4(port, val);
  534 }
  535 
  536 void *
  537 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  538 {
  539         void *vaddr;
  540 
  541         if (offset == NULL || size == 0)
  542                 return (NULL);
  543         vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
  544             x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  545         if (vaddr != NULL) {
  546                 *offset = vtophys(vaddr);
  547                 mtx_lock(&x86bios_lock);
  548                 x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
  549                 mtx_unlock(&x86bios_lock);
  550         }
  551 
  552         return (vaddr);
  553 }
  554 
  555 void
  556 x86bios_free(void *addr, size_t size)
  557 {
  558         vm_paddr_t paddr;
  559 
  560         if (addr == NULL || size == 0)
  561                 return;
  562         paddr = vtophys(addr);
  563         if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
  564             paddr % X86BIOS_PAGE_SIZE != 0)
  565                 return;
  566         mtx_lock(&x86bios_lock);
  567         bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
  568             sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
  569         mtx_unlock(&x86bios_lock);
  570         contigfree(addr, size, M_DEVBUF);
  571 }
  572 
  573 void
  574 x86bios_init_regs(struct x86regs *regs)
  575 {
  576 
  577         bzero(regs, sizeof(*regs));
  578         regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
  579         regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
  580 }
  581 
  582 void
  583 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  584 {
  585 
  586         if (x86bios_trace_call)
  587                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  588 
  589         mtx_lock(&x86bios_lock);
  590         memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
  591         x86bios_fault = 0;
  592         spinlock_enter();
  593         x86emu_exec_call(&x86bios_emu, seg, off);
  594         spinlock_exit();
  595         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  596         mtx_unlock(&x86bios_lock);
  597 
  598         if (x86bios_trace_call) {
  599                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  600                 if (x86bios_fault)
  601                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  602                             x86bios_fault_addr, x86bios_fault_cs,
  603                             x86bios_fault_ip);
  604         }
  605 }
  606 
  607 uint32_t
  608 x86bios_get_intr(int intno)
  609 {
  610 
  611         return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
  612 }
  613 
  614 void
  615 x86bios_set_intr(int intno, uint32_t saddr)
  616 {
  617 
  618         *((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
  619 }
  620 
  621 void
  622 x86bios_intr(struct x86regs *regs, int intno)
  623 {
  624 
  625         if (intno < 0 || intno > 255)
  626                 return;
  627 
  628         if (x86bios_trace_int)
  629                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  630 
  631         mtx_lock(&x86bios_lock);
  632         memcpy(&x86bios_emu.x86, regs, sizeof(*regs));
  633         x86bios_fault = 0;
  634         spinlock_enter();
  635         x86emu_exec_intr(&x86bios_emu, intno);
  636         spinlock_exit();
  637         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  638         mtx_unlock(&x86bios_lock);
  639 
  640         if (x86bios_trace_int) {
  641                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  642                 if (x86bios_fault)
  643                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  644                             x86bios_fault_addr, x86bios_fault_cs,
  645                             x86bios_fault_ip);
  646         }
  647 }
  648 
  649 void *
  650 x86bios_offset(uint32_t offset)
  651 {
  652 
  653         return (x86bios_get_pages(offset, 1));
  654 }
  655 
  656 static __inline void
  657 x86bios_unmap_mem(void)
  658 {
  659 
  660         free(x86bios_map, M_DEVBUF);
  661         if (x86bios_ivt != NULL)
  662 #ifdef X86BIOS_NATIVE_ARCH
  663                 pmap_unmapbios((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
  664 #else
  665                 free(x86bios_ivt, M_DEVBUF);
  666 #endif
  667         if (x86bios_rom != NULL)
  668                 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
  669         if (x86bios_seg != NULL)
  670                 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
  671 }
  672 
  673 static __inline int
  674 x86bios_map_mem(void)
  675 {
  676 
  677         x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
  678             M_WAITOK | M_ZERO);
  679 
  680 #ifdef X86BIOS_NATIVE_ARCH
  681         x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
  682 
  683         /* Probe EBDA via BDA. */
  684         x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
  685         x86bios_rom_phys = x86bios_rom_phys << 4;
  686         if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
  687             X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
  688                 x86bios_rom_phys =
  689                     rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
  690         else
  691 #else
  692         x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
  693 #endif
  694 
  695         x86bios_rom_phys = X86BIOS_ROM_BASE;
  696         x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
  697         if (x86bios_rom == NULL)
  698                 goto fail;
  699 #ifdef X86BIOS_NATIVE_ARCH
  700         /* Change attribute for EBDA. */
  701         if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
  702             pmap_change_attr((vm_offset_t)x86bios_rom,
  703             X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
  704                 goto fail;
  705 #endif
  706 
  707         x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
  708             X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  709         x86bios_seg_phys = vtophys(x86bios_seg);
  710 
  711         x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
  712             X86BIOS_IVT_SIZE);
  713         x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
  714             X86BIOS_ROM_SIZE);
  715         x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
  716             X86BIOS_SEG_SIZE);
  717 
  718         if (bootverbose) {
  719                 printf("x86bios:  IVT 0x%06jx-0x%06jx at %p\n",
  720                     (vm_paddr_t)X86BIOS_IVT_BASE,
  721                     (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
  722                     x86bios_ivt);
  723                 printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
  724                     x86bios_seg_phys,
  725                     (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
  726                     x86bios_seg);
  727                 if (x86bios_rom_phys < X86BIOS_ROM_BASE)
  728                         printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
  729                             x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
  730                             x86bios_rom);
  731                 printf("x86bios:  ROM 0x%06jx-0x%06jx at %p\n",
  732                     (vm_paddr_t)X86BIOS_ROM_BASE,
  733                     (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
  734                     (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
  735         }
  736 
  737         return (0);
  738 
  739 fail:
  740         x86bios_unmap_mem();
  741 
  742         return (1);
  743 }
  744 
  745 static int
  746 x86bios_init(void)
  747 {
  748 
  749         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  750 
  751         if (x86bios_map_mem() != 0)
  752                 return (ENOMEM);
  753 
  754         bzero(&x86bios_emu, sizeof(x86bios_emu));
  755 
  756         x86bios_emu.emu_rdb = x86bios_emu_rdb;
  757         x86bios_emu.emu_rdw = x86bios_emu_rdw;
  758         x86bios_emu.emu_rdl = x86bios_emu_rdl;
  759         x86bios_emu.emu_wrb = x86bios_emu_wrb;
  760         x86bios_emu.emu_wrw = x86bios_emu_wrw;
  761         x86bios_emu.emu_wrl = x86bios_emu_wrl;
  762 
  763         x86bios_emu.emu_inb = x86bios_emu_inb;
  764         x86bios_emu.emu_inw = x86bios_emu_inw;
  765         x86bios_emu.emu_inl = x86bios_emu_inl;
  766         x86bios_emu.emu_outb = x86bios_emu_outb;
  767         x86bios_emu.emu_outw = x86bios_emu_outw;
  768         x86bios_emu.emu_outl = x86bios_emu_outl;
  769 
  770         return (0);
  771 }
  772 
  773 static int
  774 x86bios_uninit(void)
  775 {
  776 
  777         x86bios_unmap_mem();
  778         mtx_destroy(&x86bios_lock);
  779 
  780         return (0);
  781 }
  782 
  783 #endif
  784 
  785 void *
  786 x86bios_get_orm(uint32_t offset)
  787 {
  788         uint8_t *p;
  789 
  790         /* Does the shadow ROM contain BIOS POST code for x86? */
  791         p = x86bios_offset(offset);
  792         if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
  793             (p[3] != 0xe9 && p[3] != 0xeb))
  794                 return (NULL);
  795 
  796         return (p);
  797 }
  798 
  799 int
  800 x86bios_match_device(uint32_t offset, device_t dev)
  801 {
  802         uint8_t *p;
  803         uint16_t device, vendor;
  804         uint8_t class, progif, subclass;
  805 
  806         /* Does the shadow ROM contain BIOS POST code for x86? */
  807         p = x86bios_get_orm(offset);
  808         if (p == NULL)
  809                 return (0);
  810 
  811         /* Does it contain PCI data structure? */
  812         p += le16toh(*(uint16_t *)(p + 0x18));
  813         if (bcmp(p, "PCIR", 4) != 0 ||
  814             le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
  815                 return (0);
  816 
  817         /* Does it match the vendor, device, and classcode? */
  818         vendor = le16toh(*(uint16_t *)(p + 0x04));
  819         device = le16toh(*(uint16_t *)(p + 0x06));
  820         progif = *(p + 0x0d);
  821         subclass = *(p + 0x0e);
  822         class = *(p + 0x0f);
  823         if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
  824             class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
  825             progif != pci_get_progif(dev))
  826                 return (0);
  827 
  828         return (1);
  829 }
  830 
  831 static int
  832 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
  833 {
  834 
  835         switch (type) {
  836         case MOD_LOAD:
  837                 return (x86bios_init());
  838         case MOD_UNLOAD:
  839                 return (x86bios_uninit());
  840         default:
  841                 return (ENOTSUP);
  842         }
  843 }
  844 
  845 static moduledata_t x86bios_mod = {
  846         "x86bios",
  847         x86bios_modevent,
  848         NULL,
  849 };
  850 
  851 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
  852 MODULE_VERSION(x86bios, 1);

Cache object: 01d5aaf33e1a6feb5bfe2494c50416de


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.