The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/x86bios/x86bios.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
    3  * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include "opt_x86bios.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/kernel.h>
   36 #include <sys/lock.h>
   37 #include <sys/malloc.h>
   38 #include <sys/module.h>
   39 #include <sys/mutex.h>
   40 #include <sys/sysctl.h>
   41 
   42 #include <contrib/x86emu/x86emu.h>
   43 #include <contrib/x86emu/x86emu_regs.h>
   44 #include <compat/x86bios/x86bios.h>
   45 
   46 #include <dev/pci/pcireg.h>
   47 #include <dev/pci/pcivar.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 
   52 #ifdef __amd64__
   53 #define X86BIOS_NATIVE_ARCH
   54 #endif
   55 #ifdef __i386__
   56 #define X86BIOS_NATIVE_VM86
   57 #endif
   58 
   59 #define X86BIOS_MEM_SIZE        0x00100000      /* 1M */
   60 
   61 #define X86BIOS_TRACE(h, n, r)  do {                                    \
   62         printf(__STRING(h)                                              \
   63             " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
   64             (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX,            \
   65             (r)->R_ES, (r)->R_DI);                                      \
   66 } while (0)
   67 
   68 static struct mtx x86bios_lock;
   69 
   70 static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
   71     "x86bios debugging");
   72 static int x86bios_trace_call;
   73 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RWTUN, &x86bios_trace_call, 0,
   74     "Trace far function calls");
   75 static int x86bios_trace_int;
   76 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RWTUN, &x86bios_trace_int, 0,
   77     "Trace software interrupt handlers");
   78 
   79 #ifdef X86BIOS_NATIVE_VM86
   80 
   81 #include <machine/vm86.h>
   82 #include <machine/vmparam.h>
   83 #include <machine/pc/bios.h>
   84 
   85 struct vm86context x86bios_vmc;
   86 
   87 static void
   88 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
   89 {
   90 
   91         vmf->vmf_ds = regs->R_DS;
   92         vmf->vmf_es = regs->R_ES;
   93         vmf->vmf_ax = regs->R_AX;
   94         vmf->vmf_bx = regs->R_BX;
   95         vmf->vmf_cx = regs->R_CX;
   96         vmf->vmf_dx = regs->R_DX;
   97         vmf->vmf_bp = regs->R_BP;
   98         vmf->vmf_si = regs->R_SI;
   99         vmf->vmf_di = regs->R_DI;
  100 }
  101 
  102 static void
  103 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
  104 {
  105 
  106         regs->R_DS = vmf->vmf_ds;
  107         regs->R_ES = vmf->vmf_es;
  108         regs->R_FLG = vmf->vmf_flags;
  109         regs->R_AX = vmf->vmf_ax;
  110         regs->R_BX = vmf->vmf_bx;
  111         regs->R_CX = vmf->vmf_cx;
  112         regs->R_DX = vmf->vmf_dx;
  113         regs->R_BP = vmf->vmf_bp;
  114         regs->R_SI = vmf->vmf_si;
  115         regs->R_DI = vmf->vmf_di;
  116 }
  117 
  118 void *
  119 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  120 {
  121         void *vaddr;
  122         u_int i;
  123 
  124         if (offset == NULL || size == 0)
  125                 return (NULL);
  126         vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
  127             PAGE_SIZE, 0);
  128         if (vaddr != NULL) {
  129                 *offset = vtophys(vaddr);
  130                 mtx_lock(&x86bios_lock);
  131                 for (i = 0; i < atop(round_page(size)); i++)
  132                         vm86_addpage(&x86bios_vmc, atop(*offset) + i,
  133                             (vm_offset_t)vaddr + ptoa(i));
  134                 mtx_unlock(&x86bios_lock);
  135         }
  136 
  137         return (vaddr);
  138 }
  139 
  140 void
  141 x86bios_free(void *addr, size_t size)
  142 {
  143         vm_paddr_t paddr;
  144         int i, nfree;
  145 
  146         if (addr == NULL || size == 0)
  147                 return;
  148         paddr = vtophys(addr);
  149         if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
  150                 return;
  151         mtx_lock(&x86bios_lock);
  152         for (i = 0; i < x86bios_vmc.npages; i++)
  153                 if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
  154                         break;
  155         if (i >= x86bios_vmc.npages) {
  156                 mtx_unlock(&x86bios_lock);
  157                 return;
  158         }
  159         nfree = atop(round_page(size));
  160         bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
  161         if (i + nfree == x86bios_vmc.npages) {
  162                 x86bios_vmc.npages -= nfree;
  163                 while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
  164                         x86bios_vmc.npages--;
  165         }
  166         mtx_unlock(&x86bios_lock);
  167         contigfree(addr, size, M_DEVBUF);
  168 }
  169 
  170 void
  171 x86bios_init_regs(struct x86regs *regs)
  172 {
  173 
  174         bzero(regs, sizeof(*regs));
  175 }
  176 
  177 void
  178 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  179 {
  180         struct vm86frame vmf;
  181 
  182         if (x86bios_trace_call)
  183                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  184 
  185         bzero(&vmf, sizeof(vmf));
  186         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  187         vmf.vmf_cs = seg;
  188         vmf.vmf_ip = off;
  189         mtx_lock(&x86bios_lock);
  190         vm86_datacall(-1, &vmf, &x86bios_vmc);
  191         mtx_unlock(&x86bios_lock);
  192         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  193 
  194         if (x86bios_trace_call)
  195                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  196 }
  197 
  198 uint32_t
  199 x86bios_get_intr(int intno)
  200 {
  201 
  202         return (readl(BIOS_PADDRTOVADDR(intno * 4)));
  203 }
  204 
  205 void
  206 x86bios_set_intr(int intno, uint32_t saddr)
  207 {
  208 
  209         writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
  210 }
  211 
  212 void
  213 x86bios_intr(struct x86regs *regs, int intno)
  214 {
  215         struct vm86frame vmf;
  216 
  217         if (x86bios_trace_int)
  218                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  219 
  220         bzero(&vmf, sizeof(vmf));
  221         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  222         mtx_lock(&x86bios_lock);
  223         vm86_datacall(intno, &vmf, &x86bios_vmc);
  224         mtx_unlock(&x86bios_lock);
  225         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  226 
  227         if (x86bios_trace_int)
  228                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  229 }
  230 
  231 void *
  232 x86bios_offset(uint32_t offset)
  233 {
  234         vm_offset_t addr;
  235 
  236         addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
  237             X86BIOS_PHYSTOOFF(offset));
  238         if (addr == 0)
  239                 addr = BIOS_PADDRTOVADDR(offset);
  240 
  241         return ((void *)addr);
  242 }
  243 
  244 static int
  245 x86bios_init(void)
  246 {
  247 
  248         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  249         bzero(&x86bios_vmc, sizeof(x86bios_vmc));
  250 
  251         return (0);
  252 }
  253 
  254 static int
  255 x86bios_uninit(void)
  256 {
  257 
  258         mtx_destroy(&x86bios_lock);
  259 
  260         return (0);
  261 }
  262 
  263 #else
  264 
  265 #include <machine/iodev.h>
  266 
  267 #define X86BIOS_PAGE_SIZE       0x00001000      /* 4K */
  268 
  269 #define X86BIOS_IVT_SIZE        0x00000500      /* 1K + 256 (BDA) */
  270 
  271 #define X86BIOS_IVT_BASE        0x00000000
  272 #define X86BIOS_RAM_BASE        0x00001000
  273 #define X86BIOS_ROM_BASE        0x000a0000
  274 
  275 #define X86BIOS_ROM_SIZE        (X86BIOS_MEM_SIZE - x86bios_rom_phys)
  276 #define X86BIOS_SEG_SIZE        X86BIOS_PAGE_SIZE
  277 
  278 #define X86BIOS_PAGES           (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
  279 
  280 #define X86BIOS_R_SS            _pad2
  281 #define X86BIOS_R_SP            _pad3.I16_reg.x_reg
  282 
  283 static struct x86emu x86bios_emu;
  284 
  285 static void *x86bios_ivt;
  286 static void *x86bios_rom;
  287 static void *x86bios_seg;
  288 
  289 static vm_offset_t *x86bios_map;
  290 
  291 static vm_paddr_t x86bios_rom_phys;
  292 static vm_paddr_t x86bios_seg_phys;
  293 
  294 static int x86bios_fault;
  295 static uint32_t x86bios_fault_addr;
  296 static uint16_t x86bios_fault_cs;
  297 static uint16_t x86bios_fault_ip;
  298 
  299 static void
  300 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
  301 {
  302 
  303         x86bios_fault = 1;
  304         x86bios_fault_addr = addr;
  305         x86bios_fault_cs = emu->x86.R_CS;
  306         x86bios_fault_ip = emu->x86.R_IP;
  307         x86emu_halt_sys(emu);
  308 }
  309 
  310 static void *
  311 x86bios_get_pages(uint32_t offset, size_t size)
  312 {
  313         vm_offset_t addr;
  314 
  315         if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
  316                 return (NULL);
  317 
  318         if (offset >= X86BIOS_MEM_SIZE)
  319                 offset -= X86BIOS_MEM_SIZE;
  320         addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
  321         if (addr != 0)
  322                 addr += offset % X86BIOS_PAGE_SIZE;
  323 
  324         return ((void *)addr);
  325 }
  326 
  327 static void
  328 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
  329 {
  330         int i, j;
  331 
  332         for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
  333             j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
  334                 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
  335 }
  336 
  337 static uint8_t
  338 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
  339 {
  340         uint8_t *va;
  341 
  342         va = x86bios_get_pages(addr, sizeof(*va));
  343         if (va == NULL)
  344                 x86bios_set_fault(emu, addr);
  345 
  346         return (*va);
  347 }
  348 
  349 static uint16_t
  350 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
  351 {
  352         uint16_t *va;
  353 
  354         va = x86bios_get_pages(addr, sizeof(*va));
  355         if (va == NULL)
  356                 x86bios_set_fault(emu, addr);
  357 
  358 #ifndef __NO_STRICT_ALIGNMENT
  359         if ((addr & 1) != 0)
  360                 return (le16dec(va));
  361         else
  362 #endif
  363         return (le16toh(*va));
  364 }
  365 
  366 static uint32_t
  367 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
  368 {
  369         uint32_t *va;
  370 
  371         va = x86bios_get_pages(addr, sizeof(*va));
  372         if (va == NULL)
  373                 x86bios_set_fault(emu, addr);
  374 
  375 #ifndef __NO_STRICT_ALIGNMENT
  376         if ((addr & 3) != 0)
  377                 return (le32dec(va));
  378         else
  379 #endif
  380         return (le32toh(*va));
  381 }
  382 
  383 static void
  384 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
  385 {
  386         uint8_t *va;
  387 
  388         va = x86bios_get_pages(addr, sizeof(*va));
  389         if (va == NULL)
  390                 x86bios_set_fault(emu, addr);
  391 
  392         *va = val;
  393 }
  394 
  395 static void
  396 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
  397 {
  398         uint16_t *va;
  399 
  400         va = x86bios_get_pages(addr, sizeof(*va));
  401         if (va == NULL)
  402                 x86bios_set_fault(emu, addr);
  403 
  404 #ifndef __NO_STRICT_ALIGNMENT
  405         if ((addr & 1) != 0)
  406                 le16enc(va, val);
  407         else
  408 #endif
  409         *va = htole16(val);
  410 }
  411 
  412 static void
  413 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
  414 {
  415         uint32_t *va;
  416 
  417         va = x86bios_get_pages(addr, sizeof(*va));
  418         if (va == NULL)
  419                 x86bios_set_fault(emu, addr);
  420 
  421 #ifndef __NO_STRICT_ALIGNMENT
  422         if ((addr & 3) != 0)
  423                 le32enc(va, val);
  424         else
  425 #endif
  426         *va = htole32(val);
  427 }
  428 
  429 static uint8_t
  430 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
  431 {
  432 
  433 #ifndef X86BIOS_NATIVE_ARCH
  434         if (port == 0xb2) /* APM scratch register */
  435                 return (0);
  436         if (port >= 0x80 && port < 0x88) /* POST status register */
  437                 return (0);
  438 #endif
  439 
  440         return (iodev_read_1(port));
  441 }
  442 
  443 static uint16_t
  444 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
  445 {
  446         uint16_t val;
  447 
  448 #ifndef X86BIOS_NATIVE_ARCH
  449         if (port >= 0x80 && port < 0x88) /* POST status register */
  450                 return (0);
  451 
  452         if ((port & 1) != 0) {
  453                 val = iodev_read_1(port);
  454                 val |= iodev_read_1(port + 1) << 8;
  455         } else
  456 #endif
  457         val = iodev_read_2(port);
  458 
  459         return (val);
  460 }
  461 
  462 static uint32_t
  463 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
  464 {
  465         uint32_t val;
  466 
  467 #ifndef X86BIOS_NATIVE_ARCH
  468         if (port >= 0x80 && port < 0x88) /* POST status register */
  469                 return (0);
  470 
  471         if ((port & 1) != 0) {
  472                 val = iodev_read_1(port);
  473                 val |= iodev_read_2(port + 1) << 8;
  474                 val |= iodev_read_1(port + 3) << 24;
  475         } else if ((port & 2) != 0) {
  476                 val = iodev_read_2(port);
  477                 val |= iodev_read_2(port + 2) << 16;
  478         } else
  479 #endif
  480         val = iodev_read_4(port);
  481 
  482         return (val);
  483 }
  484 
  485 static void
  486 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
  487 {
  488 
  489 #ifndef X86BIOS_NATIVE_ARCH
  490         if (port == 0xb2) /* APM scratch register */
  491                 return;
  492         if (port >= 0x80 && port < 0x88) /* POST status register */
  493                 return;
  494 #endif
  495 
  496         iodev_write_1(port, val);
  497 }
  498 
  499 static void
  500 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
  501 {
  502 
  503 #ifndef X86BIOS_NATIVE_ARCH
  504         if (port >= 0x80 && port < 0x88) /* POST status register */
  505                 return;
  506 
  507         if ((port & 1) != 0) {
  508                 iodev_write_1(port, val);
  509                 iodev_write_1(port + 1, val >> 8);
  510         } else
  511 #endif
  512         iodev_write_2(port, val);
  513 }
  514 
  515 static void
  516 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
  517 {
  518 
  519 #ifndef X86BIOS_NATIVE_ARCH
  520         if (port >= 0x80 && port < 0x88) /* POST status register */
  521                 return;
  522 
  523         if ((port & 1) != 0) {
  524                 iodev_write_1(port, val);
  525                 iodev_write_2(port + 1, val >> 8);
  526                 iodev_write_1(port + 3, val >> 24);
  527         } else if ((port & 2) != 0) {
  528                 iodev_write_2(port, val);
  529                 iodev_write_2(port + 2, val >> 16);
  530         } else
  531 #endif
  532         iodev_write_4(port, val);
  533 }
  534 
  535 void *
  536 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  537 {
  538         void *vaddr;
  539 
  540         if (offset == NULL || size == 0)
  541                 return (NULL);
  542         vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
  543             x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  544         if (vaddr != NULL) {
  545                 *offset = vtophys(vaddr);
  546                 mtx_lock(&x86bios_lock);
  547                 x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
  548                 mtx_unlock(&x86bios_lock);
  549         }
  550 
  551         return (vaddr);
  552 }
  553 
  554 void
  555 x86bios_free(void *addr, size_t size)
  556 {
  557         vm_paddr_t paddr;
  558 
  559         if (addr == NULL || size == 0)
  560                 return;
  561         paddr = vtophys(addr);
  562         if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
  563             paddr % X86BIOS_PAGE_SIZE != 0)
  564                 return;
  565         mtx_lock(&x86bios_lock);
  566         bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
  567             sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
  568         mtx_unlock(&x86bios_lock);
  569         contigfree(addr, size, M_DEVBUF);
  570 }
  571 
  572 void
  573 x86bios_init_regs(struct x86regs *regs)
  574 {
  575 
  576         bzero(regs, sizeof(*regs));
  577         regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
  578         regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
  579 }
  580 
  581 void
  582 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  583 {
  584 
  585         if (x86bios_trace_call)
  586                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  587 
  588         mtx_lock(&x86bios_lock);
  589         memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
  590         x86bios_fault = 0;
  591         spinlock_enter();
  592         x86emu_exec_call(&x86bios_emu, seg, off);
  593         spinlock_exit();
  594         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  595         mtx_unlock(&x86bios_lock);
  596 
  597         if (x86bios_trace_call) {
  598                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  599                 if (x86bios_fault)
  600                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  601                             x86bios_fault_addr, x86bios_fault_cs,
  602                             x86bios_fault_ip);
  603         }
  604 }
  605 
  606 uint32_t
  607 x86bios_get_intr(int intno)
  608 {
  609 
  610         return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
  611 }
  612 
  613 void
  614 x86bios_set_intr(int intno, uint32_t saddr)
  615 {
  616 
  617         *((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
  618 }
  619 
  620 void
  621 x86bios_intr(struct x86regs *regs, int intno)
  622 {
  623 
  624         if (intno < 0 || intno > 255)
  625                 return;
  626 
  627         if (x86bios_trace_int)
  628                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  629 
  630         mtx_lock(&x86bios_lock);
  631         memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
  632         x86bios_fault = 0;
  633         spinlock_enter();
  634         x86emu_exec_intr(&x86bios_emu, intno);
  635         spinlock_exit();
  636         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  637         mtx_unlock(&x86bios_lock);
  638 
  639         if (x86bios_trace_int) {
  640                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  641                 if (x86bios_fault)
  642                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  643                             x86bios_fault_addr, x86bios_fault_cs,
  644                             x86bios_fault_ip);
  645         }
  646 }
  647 
  648 void *
  649 x86bios_offset(uint32_t offset)
  650 {
  651 
  652         return (x86bios_get_pages(offset, 1));
  653 }
  654 
  655 static __inline void
  656 x86bios_unmap_mem(void)
  657 {
  658 
  659         if (x86bios_map != NULL) {
  660                 free(x86bios_map, M_DEVBUF);
  661                 x86bios_map = NULL;
  662         }
  663         if (x86bios_ivt != NULL) {
  664 #ifdef X86BIOS_NATIVE_ARCH
  665                 pmap_unmapbios(x86bios_ivt, X86BIOS_IVT_SIZE);
  666 #else
  667                 free(x86bios_ivt, M_DEVBUF);
  668                 x86bios_ivt = NULL;
  669 #endif
  670         }
  671         if (x86bios_rom != NULL)
  672                 pmap_unmapdev(x86bios_rom, X86BIOS_ROM_SIZE);
  673         if (x86bios_seg != NULL) {
  674                 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
  675                 x86bios_seg = NULL;
  676         }
  677 }
  678 
  679 static __inline int
  680 x86bios_map_mem(void)
  681 {
  682 
  683         x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
  684             M_NOWAIT | M_ZERO);
  685         if (x86bios_map == NULL)
  686                 goto fail;
  687 
  688 #ifdef X86BIOS_NATIVE_ARCH
  689         x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
  690 
  691         /* Probe EBDA via BDA. */
  692         x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
  693         x86bios_rom_phys = x86bios_rom_phys << 4;
  694         if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
  695             X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
  696                 x86bios_rom_phys =
  697                     rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
  698         else
  699 #else
  700         x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
  701         if (x86bios_ivt == NULL)
  702                 goto fail;
  703 #endif
  704 
  705         x86bios_rom_phys = X86BIOS_ROM_BASE;
  706         x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
  707         if (x86bios_rom == NULL)
  708                 goto fail;
  709 #ifdef X86BIOS_NATIVE_ARCH
  710         /* Change attribute for EBDA. */
  711         if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
  712             pmap_change_attr((vm_offset_t)x86bios_rom,
  713             X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
  714                 goto fail;
  715 #endif
  716 
  717         x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_NOWAIT,
  718             X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  719         if (x86bios_seg == NULL)
  720             goto fail;
  721         x86bios_seg_phys = vtophys(x86bios_seg);
  722 
  723         x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
  724             X86BIOS_IVT_SIZE);
  725         x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
  726             X86BIOS_ROM_SIZE);
  727         x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
  728             X86BIOS_SEG_SIZE);
  729 
  730         if (bootverbose) {
  731                 printf("x86bios:  IVT 0x%06jx-0x%06jx at %p\n",
  732                     (vm_paddr_t)X86BIOS_IVT_BASE,
  733                     (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
  734                     x86bios_ivt);
  735                 printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
  736                     x86bios_seg_phys,
  737                     (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
  738                     x86bios_seg);
  739                 if (x86bios_rom_phys < X86BIOS_ROM_BASE)
  740                         printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
  741                             x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
  742                             x86bios_rom);
  743                 printf("x86bios:  ROM 0x%06jx-0x%06jx at %p\n",
  744                     (vm_paddr_t)X86BIOS_ROM_BASE,
  745                     (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
  746                     (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
  747         }
  748 
  749         return (0);
  750 
  751 fail:
  752         x86bios_unmap_mem();
  753 
  754         return (1);
  755 }
  756 
  757 static int
  758 x86bios_init(void)
  759 {
  760 
  761         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  762 
  763         if (x86bios_map_mem() != 0)
  764                 return (ENOMEM);
  765 
  766         bzero(&x86bios_emu, sizeof(x86bios_emu));
  767 
  768         x86bios_emu.emu_rdb = x86bios_emu_rdb;
  769         x86bios_emu.emu_rdw = x86bios_emu_rdw;
  770         x86bios_emu.emu_rdl = x86bios_emu_rdl;
  771         x86bios_emu.emu_wrb = x86bios_emu_wrb;
  772         x86bios_emu.emu_wrw = x86bios_emu_wrw;
  773         x86bios_emu.emu_wrl = x86bios_emu_wrl;
  774 
  775         x86bios_emu.emu_inb = x86bios_emu_inb;
  776         x86bios_emu.emu_inw = x86bios_emu_inw;
  777         x86bios_emu.emu_inl = x86bios_emu_inl;
  778         x86bios_emu.emu_outb = x86bios_emu_outb;
  779         x86bios_emu.emu_outw = x86bios_emu_outw;
  780         x86bios_emu.emu_outl = x86bios_emu_outl;
  781 
  782         return (0);
  783 }
  784 
  785 static int
  786 x86bios_uninit(void)
  787 {
  788 
  789         x86bios_unmap_mem();
  790         mtx_destroy(&x86bios_lock);
  791 
  792         return (0);
  793 }
  794 
  795 #endif
  796 
  797 void *
  798 x86bios_get_orm(uint32_t offset)
  799 {
  800         uint8_t *p;
  801 
  802         /* Does the shadow ROM contain BIOS POST code for x86? */
  803         p = x86bios_offset(offset);
  804         if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
  805             (p[3] != 0xe9 && p[3] != 0xeb))
  806                 return (NULL);
  807 
  808         return (p);
  809 }
  810 
  811 int
  812 x86bios_match_device(uint32_t offset, device_t dev)
  813 {
  814         uint8_t *p;
  815         uint16_t device, vendor;
  816         uint8_t class, progif, subclass;
  817 
  818         /* Does the shadow ROM contain BIOS POST code for x86? */
  819         p = x86bios_get_orm(offset);
  820         if (p == NULL)
  821                 return (0);
  822 
  823         /* Does it contain PCI data structure? */
  824         p += le16toh(*(uint16_t *)(p + 0x18));
  825         if (bcmp(p, "PCIR", 4) != 0 ||
  826             le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
  827                 return (0);
  828 
  829         /* Does it match the vendor, device, and classcode? */
  830         vendor = le16toh(*(uint16_t *)(p + 0x04));
  831         device = le16toh(*(uint16_t *)(p + 0x06));
  832         progif = *(p + 0x0d);
  833         subclass = *(p + 0x0e);
  834         class = *(p + 0x0f);
  835         if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
  836             class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
  837             progif != pci_get_progif(dev))
  838                 return (0);
  839 
  840         return (1);
  841 }
  842 
  843 static int
  844 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
  845 {
  846 
  847         switch (type) {
  848         case MOD_LOAD:
  849                 return (x86bios_init());
  850         case MOD_UNLOAD:
  851                 return (x86bios_uninit());
  852         default:
  853                 return (ENOTSUP);
  854         }
  855 }
  856 
  857 static moduledata_t x86bios_mod = {
  858         "x86bios",
  859         x86bios_modevent,
  860         NULL,
  861 };
  862 
  863 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
  864 MODULE_VERSION(x86bios, 1);

Cache object: 5583d5ca200047f8e89e11a2ac5f8e85


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.