The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/compat/x86bios/x86bios.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2009 Alex Keda <admin@lissyara.su>
    3  * Copyright (c) 2009-2010 Jung-uk Kim <jkim@FreeBSD.org>
    4  * All rights reserved.
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include "opt_x86bios.h"
   32 
   33 #include <sys/param.h>
   34 #include <sys/bus.h>
   35 #include <sys/kernel.h>
   36 #include <sys/lock.h>
   37 #include <sys/malloc.h>
   38 #include <sys/module.h>
   39 #include <sys/mutex.h>
   40 #include <sys/sysctl.h>
   41 
   42 #include <contrib/x86emu/x86emu.h>
   43 #include <contrib/x86emu/x86emu_regs.h>
   44 #include <compat/x86bios/x86bios.h>
   45 
   46 #include <dev/pci/pcireg.h>
   47 #include <dev/pci/pcivar.h>
   48 
   49 #include <vm/vm.h>
   50 #include <vm/pmap.h>
   51 
   52 #ifdef __amd64__
   53 #define X86BIOS_NATIVE_ARCH
   54 #endif
   55 #ifdef __i386__
   56 #define X86BIOS_NATIVE_VM86
   57 #endif
   58 
   59 #define X86BIOS_MEM_SIZE        0x00100000      /* 1M */
   60 
   61 #define X86BIOS_TRACE(h, n, r)  do {                                    \
   62         printf(__STRING(h)                                              \
   63             " (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
   64             (n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX,            \
   65             (r)->R_ES, (r)->R_DI);                                      \
   66 } while (0)
   67 
   68 static struct mtx x86bios_lock;
   69 
   70 static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD, NULL,
   71     "x86bios debugging");
   72 static int x86bios_trace_call;
   73 SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RWTUN, &x86bios_trace_call, 0,
   74     "Trace far function calls");
   75 static int x86bios_trace_int;
   76 SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RWTUN, &x86bios_trace_int, 0,
   77     "Trace software interrupt handlers");
   78 
   79 #ifdef X86BIOS_NATIVE_VM86
   80 
   81 #include <machine/vm86.h>
   82 #include <machine/vmparam.h>
   83 #include <machine/pc/bios.h>
   84 
   85 struct vm86context x86bios_vmc;
   86 
   87 static void
   88 x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
   89 {
   90 
   91         vmf->vmf_ds = regs->R_DS;
   92         vmf->vmf_es = regs->R_ES;
   93         vmf->vmf_ax = regs->R_AX;
   94         vmf->vmf_bx = regs->R_BX;
   95         vmf->vmf_cx = regs->R_CX;
   96         vmf->vmf_dx = regs->R_DX;
   97         vmf->vmf_bp = regs->R_BP;
   98         vmf->vmf_si = regs->R_SI;
   99         vmf->vmf_di = regs->R_DI;
  100 }
  101 
  102 static void
  103 x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
  104 {
  105 
  106         regs->R_DS = vmf->vmf_ds;
  107         regs->R_ES = vmf->vmf_es;
  108         regs->R_FLG = vmf->vmf_flags;
  109         regs->R_AX = vmf->vmf_ax;
  110         regs->R_BX = vmf->vmf_bx;
  111         regs->R_CX = vmf->vmf_cx;
  112         regs->R_DX = vmf->vmf_dx;
  113         regs->R_BP = vmf->vmf_bp;
  114         regs->R_SI = vmf->vmf_si;
  115         regs->R_DI = vmf->vmf_di;
  116 }
  117 
  118 void *
  119 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  120 {
  121         void *vaddr;
  122         u_int i;
  123 
  124         if (offset == NULL || size == 0)
  125                 return (NULL);
  126         vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
  127             PAGE_SIZE, 0);
  128         if (vaddr != NULL) {
  129                 *offset = vtophys(vaddr);
  130                 mtx_lock(&x86bios_lock);
  131                 for (i = 0; i < atop(round_page(size)); i++)
  132                         vm86_addpage(&x86bios_vmc, atop(*offset) + i,
  133                             (vm_offset_t)vaddr + ptoa(i));
  134                 mtx_unlock(&x86bios_lock);
  135         }
  136 
  137         return (vaddr);
  138 }
  139 
  140 void
  141 x86bios_free(void *addr, size_t size)
  142 {
  143         vm_paddr_t paddr;
  144         int i, nfree;
  145 
  146         if (addr == NULL || size == 0)
  147                 return;
  148         paddr = vtophys(addr);
  149         if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
  150                 return;
  151         mtx_lock(&x86bios_lock);
  152         for (i = 0; i < x86bios_vmc.npages; i++)
  153                 if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
  154                         break;
  155         if (i >= x86bios_vmc.npages) {
  156                 mtx_unlock(&x86bios_lock);
  157                 return;
  158         }
  159         nfree = atop(round_page(size));
  160         bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
  161         if (i + nfree == x86bios_vmc.npages) {
  162                 x86bios_vmc.npages -= nfree;
  163                 while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
  164                         x86bios_vmc.npages--;
  165         }
  166         mtx_unlock(&x86bios_lock);
  167         contigfree(addr, size, M_DEVBUF);
  168 }
  169 
  170 void
  171 x86bios_init_regs(struct x86regs *regs)
  172 {
  173 
  174         bzero(regs, sizeof(*regs));
  175 }
  176 
  177 void
  178 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  179 {
  180         struct vm86frame vmf;
  181 
  182         if (x86bios_trace_call)
  183                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  184 
  185         bzero(&vmf, sizeof(vmf));
  186         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  187         vmf.vmf_cs = seg;
  188         vmf.vmf_ip = off;
  189         mtx_lock(&x86bios_lock);
  190         vm86_datacall(-1, &vmf, &x86bios_vmc);
  191         mtx_unlock(&x86bios_lock);
  192         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  193 
  194         if (x86bios_trace_call)
  195                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  196 }
  197 
  198 uint32_t
  199 x86bios_get_intr(int intno)
  200 {
  201 
  202         return (readl(BIOS_PADDRTOVADDR(intno * 4)));
  203 }
  204 
  205 void
  206 x86bios_set_intr(int intno, uint32_t saddr)
  207 {
  208 
  209         writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
  210 }
  211 
  212 void
  213 x86bios_intr(struct x86regs *regs, int intno)
  214 {
  215         struct vm86frame vmf;
  216 
  217         if (x86bios_trace_int)
  218                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  219 
  220         bzero(&vmf, sizeof(vmf));
  221         x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
  222         mtx_lock(&x86bios_lock);
  223         vm86_datacall(intno, &vmf, &x86bios_vmc);
  224         mtx_unlock(&x86bios_lock);
  225         x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
  226 
  227         if (x86bios_trace_int)
  228                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  229 }
  230 
  231 void *
  232 x86bios_offset(uint32_t offset)
  233 {
  234         vm_offset_t addr;
  235 
  236         addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
  237             X86BIOS_PHYSTOOFF(offset));
  238         if (addr == 0)
  239                 addr = BIOS_PADDRTOVADDR(offset);
  240 
  241         return ((void *)addr);
  242 }
  243 
  244 static int
  245 x86bios_init(void)
  246 {
  247 
  248         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  249         bzero(&x86bios_vmc, sizeof(x86bios_vmc));
  250 
  251         return (0);
  252 }
  253 
  254 static int
  255 x86bios_uninit(void)
  256 {
  257 
  258         mtx_destroy(&x86bios_lock);
  259 
  260         return (0);
  261 }
  262 
  263 #else
  264 
  265 #include <machine/iodev.h>
  266 
  267 #define X86BIOS_PAGE_SIZE       0x00001000      /* 4K */
  268 
  269 #define X86BIOS_IVT_SIZE        0x00000500      /* 1K + 256 (BDA) */
  270 
  271 #define X86BIOS_IVT_BASE        0x00000000
  272 #define X86BIOS_RAM_BASE        0x00001000
  273 #define X86BIOS_ROM_BASE        0x000a0000
  274 
  275 #define X86BIOS_ROM_SIZE        (X86BIOS_MEM_SIZE - x86bios_rom_phys)
  276 #define X86BIOS_SEG_SIZE        X86BIOS_PAGE_SIZE
  277 
  278 #define X86BIOS_PAGES           (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
  279 
  280 #define X86BIOS_R_SS            _pad2
  281 #define X86BIOS_R_SP            _pad3.I16_reg.x_reg
  282 
  283 static struct x86emu x86bios_emu;
  284 
  285 static void *x86bios_ivt;
  286 static void *x86bios_rom;
  287 static void *x86bios_seg;
  288 
  289 static vm_offset_t *x86bios_map;
  290 
  291 static vm_paddr_t x86bios_rom_phys;
  292 static vm_paddr_t x86bios_seg_phys;
  293 
  294 static int x86bios_fault;
  295 static uint32_t x86bios_fault_addr;
  296 static uint16_t x86bios_fault_cs;
  297 static uint16_t x86bios_fault_ip;
  298 
  299 static void
  300 x86bios_set_fault(struct x86emu *emu, uint32_t addr)
  301 {
  302 
  303         x86bios_fault = 1;
  304         x86bios_fault_addr = addr;
  305         x86bios_fault_cs = emu->x86.R_CS;
  306         x86bios_fault_ip = emu->x86.R_IP;
  307         x86emu_halt_sys(emu);
  308 }
  309 
  310 static void *
  311 x86bios_get_pages(uint32_t offset, size_t size)
  312 {
  313         vm_offset_t addr;
  314 
  315         if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
  316                 return (NULL);
  317 
  318         if (offset >= X86BIOS_MEM_SIZE)
  319                 offset -= X86BIOS_MEM_SIZE;
  320         addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
  321         if (addr != 0)
  322                 addr += offset % X86BIOS_PAGE_SIZE;
  323 
  324         return ((void *)addr);
  325 }
  326 
  327 static void
  328 x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
  329 {
  330         int i, j;
  331 
  332         for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
  333             j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
  334                 x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
  335 }
  336 
  337 static uint8_t
  338 x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
  339 {
  340         uint8_t *va;
  341 
  342         va = x86bios_get_pages(addr, sizeof(*va));
  343         if (va == NULL)
  344                 x86bios_set_fault(emu, addr);
  345 
  346         return (*va);
  347 }
  348 
  349 static uint16_t
  350 x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
  351 {
  352         uint16_t *va;
  353 
  354         va = x86bios_get_pages(addr, sizeof(*va));
  355         if (va == NULL)
  356                 x86bios_set_fault(emu, addr);
  357 
  358 #ifndef __NO_STRICT_ALIGNMENT
  359         if ((addr & 1) != 0)
  360                 return (le16dec(va));
  361         else
  362 #endif
  363         return (le16toh(*va));
  364 }
  365 
  366 static uint32_t
  367 x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
  368 {
  369         uint32_t *va;
  370 
  371         va = x86bios_get_pages(addr, sizeof(*va));
  372         if (va == NULL)
  373                 x86bios_set_fault(emu, addr);
  374 
  375 #ifndef __NO_STRICT_ALIGNMENT
  376         if ((addr & 3) != 0)
  377                 return (le32dec(va));
  378         else
  379 #endif
  380         return (le32toh(*va));
  381 }
  382 
  383 static void
  384 x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
  385 {
  386         uint8_t *va;
  387 
  388         va = x86bios_get_pages(addr, sizeof(*va));
  389         if (va == NULL)
  390                 x86bios_set_fault(emu, addr);
  391 
  392         *va = val;
  393 }
  394 
  395 static void
  396 x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
  397 {
  398         uint16_t *va;
  399 
  400         va = x86bios_get_pages(addr, sizeof(*va));
  401         if (va == NULL)
  402                 x86bios_set_fault(emu, addr);
  403 
  404 #ifndef __NO_STRICT_ALIGNMENT
  405         if ((addr & 1) != 0)
  406                 le16enc(va, val);
  407         else
  408 #endif
  409         *va = htole16(val);
  410 }
  411 
  412 static void
  413 x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
  414 {
  415         uint32_t *va;
  416 
  417         va = x86bios_get_pages(addr, sizeof(*va));
  418         if (va == NULL)
  419                 x86bios_set_fault(emu, addr);
  420 
  421 #ifndef __NO_STRICT_ALIGNMENT
  422         if ((addr & 3) != 0)
  423                 le32enc(va, val);
  424         else
  425 #endif
  426         *va = htole32(val);
  427 }
  428 
  429 static uint8_t
  430 x86bios_emu_inb(struct x86emu *emu, uint16_t port)
  431 {
  432 
  433 #ifndef X86BIOS_NATIVE_ARCH
  434         if (port == 0xb2) /* APM scratch register */
  435                 return (0);
  436         if (port >= 0x80 && port < 0x88) /* POST status register */
  437                 return (0);
  438 #endif
  439 
  440         return (iodev_read_1(port));
  441 }
  442 
  443 static uint16_t
  444 x86bios_emu_inw(struct x86emu *emu, uint16_t port)
  445 {
  446         uint16_t val;
  447 
  448 #ifndef X86BIOS_NATIVE_ARCH
  449         if (port >= 0x80 && port < 0x88) /* POST status register */
  450                 return (0);
  451 
  452         if ((port & 1) != 0) {
  453                 val = iodev_read_1(port);
  454                 val |= iodev_read_1(port + 1) << 8;
  455         } else
  456 #endif
  457         val = iodev_read_2(port);
  458 
  459         return (val);
  460 }
  461 
  462 static uint32_t
  463 x86bios_emu_inl(struct x86emu *emu, uint16_t port)
  464 {
  465         uint32_t val;
  466 
  467 #ifndef X86BIOS_NATIVE_ARCH
  468         if (port >= 0x80 && port < 0x88) /* POST status register */
  469                 return (0);
  470 
  471         if ((port & 1) != 0) {
  472                 val = iodev_read_1(port);
  473                 val |= iodev_read_2(port + 1) << 8;
  474                 val |= iodev_read_1(port + 3) << 24;
  475         } else if ((port & 2) != 0) {
  476                 val = iodev_read_2(port);
  477                 val |= iodev_read_2(port + 2) << 16;
  478         } else
  479 #endif
  480         val = iodev_read_4(port);
  481 
  482         return (val);
  483 }
  484 
  485 static void
  486 x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
  487 {
  488 
  489 #ifndef X86BIOS_NATIVE_ARCH
  490         if (port == 0xb2) /* APM scratch register */
  491                 return;
  492         if (port >= 0x80 && port < 0x88) /* POST status register */
  493                 return;
  494 #endif
  495 
  496         iodev_write_1(port, val);
  497 }
  498 
  499 static void
  500 x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
  501 {
  502 
  503 #ifndef X86BIOS_NATIVE_ARCH
  504         if (port >= 0x80 && port < 0x88) /* POST status register */
  505                 return;
  506 
  507         if ((port & 1) != 0) {
  508                 iodev_write_1(port, val);
  509                 iodev_write_1(port + 1, val >> 8);
  510         } else
  511 #endif
  512         iodev_write_2(port, val);
  513 }
  514 
  515 static void
  516 x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
  517 {
  518 
  519 #ifndef X86BIOS_NATIVE_ARCH
  520         if (port >= 0x80 && port < 0x88) /* POST status register */
  521                 return;
  522 
  523         if ((port & 1) != 0) {
  524                 iodev_write_1(port, val);
  525                 iodev_write_2(port + 1, val >> 8);
  526                 iodev_write_1(port + 3, val >> 24);
  527         } else if ((port & 2) != 0) {
  528                 iodev_write_2(port, val);
  529                 iodev_write_2(port + 2, val >> 16);
  530         } else
  531 #endif
  532         iodev_write_4(port, val);
  533 }
  534 
  535 void *
  536 x86bios_alloc(uint32_t *offset, size_t size, int flags)
  537 {
  538         void *vaddr;
  539 
  540         if (offset == NULL || size == 0)
  541                 return (NULL);
  542         vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
  543             x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  544         if (vaddr != NULL) {
  545                 *offset = vtophys(vaddr);
  546                 mtx_lock(&x86bios_lock);
  547                 x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
  548                 mtx_unlock(&x86bios_lock);
  549         }
  550 
  551         return (vaddr);
  552 }
  553 
  554 void
  555 x86bios_free(void *addr, size_t size)
  556 {
  557         vm_paddr_t paddr;
  558 
  559         if (addr == NULL || size == 0)
  560                 return;
  561         paddr = vtophys(addr);
  562         if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
  563             paddr % X86BIOS_PAGE_SIZE != 0)
  564                 return;
  565         mtx_lock(&x86bios_lock);
  566         bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
  567             sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
  568         mtx_unlock(&x86bios_lock);
  569         contigfree(addr, size, M_DEVBUF);
  570 }
  571 
  572 void
  573 x86bios_init_regs(struct x86regs *regs)
  574 {
  575 
  576         bzero(regs, sizeof(*regs));
  577         regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
  578         regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
  579 }
  580 
  581 void
  582 x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
  583 {
  584 
  585         if (x86bios_trace_call)
  586                 X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
  587 
  588         mtx_lock(&x86bios_lock);
  589         memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
  590         x86bios_fault = 0;
  591         spinlock_enter();
  592         x86emu_exec_call(&x86bios_emu, seg, off);
  593         spinlock_exit();
  594         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  595         mtx_unlock(&x86bios_lock);
  596 
  597         if (x86bios_trace_call) {
  598                 X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
  599                 if (x86bios_fault)
  600                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  601                             x86bios_fault_addr, x86bios_fault_cs,
  602                             x86bios_fault_ip);
  603         }
  604 }
  605 
  606 uint32_t
  607 x86bios_get_intr(int intno)
  608 {
  609 
  610         return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
  611 }
  612 
  613 void
  614 x86bios_set_intr(int intno, uint32_t saddr)
  615 {
  616 
  617         *((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
  618 }
  619 
  620 void
  621 x86bios_intr(struct x86regs *regs, int intno)
  622 {
  623 
  624         if (intno < 0 || intno > 255)
  625                 return;
  626 
  627         if (x86bios_trace_int)
  628                 X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
  629 
  630         mtx_lock(&x86bios_lock);
  631         memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
  632         x86bios_fault = 0;
  633         spinlock_enter();
  634         x86emu_exec_intr(&x86bios_emu, intno);
  635         spinlock_exit();
  636         memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
  637         mtx_unlock(&x86bios_lock);
  638 
  639         if (x86bios_trace_int) {
  640                 X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
  641                 if (x86bios_fault)
  642                         printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
  643                             x86bios_fault_addr, x86bios_fault_cs,
  644                             x86bios_fault_ip);
  645         }
  646 }
  647 
  648 void *
  649 x86bios_offset(uint32_t offset)
  650 {
  651 
  652         return (x86bios_get_pages(offset, 1));
  653 }
  654 
  655 static __inline void
  656 x86bios_unmap_mem(void)
  657 {
  658 
  659         free(x86bios_map, M_DEVBUF);
  660         if (x86bios_ivt != NULL)
  661 #ifdef X86BIOS_NATIVE_ARCH
  662                 pmap_unmapbios((vm_offset_t)x86bios_ivt, X86BIOS_IVT_SIZE);
  663 #else
  664                 free(x86bios_ivt, M_DEVBUF);
  665 #endif
  666         if (x86bios_rom != NULL)
  667                 pmap_unmapdev((vm_offset_t)x86bios_rom, X86BIOS_ROM_SIZE);
  668         if (x86bios_seg != NULL)
  669                 contigfree(x86bios_seg, X86BIOS_SEG_SIZE, M_DEVBUF);
  670 }
  671 
  672 static __inline int
  673 x86bios_map_mem(void)
  674 {
  675 
  676         x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
  677             M_WAITOK | M_ZERO);
  678 
  679 #ifdef X86BIOS_NATIVE_ARCH
  680         x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
  681 
  682         /* Probe EBDA via BDA. */
  683         x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
  684         x86bios_rom_phys = x86bios_rom_phys << 4;
  685         if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
  686             X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
  687                 x86bios_rom_phys =
  688                     rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
  689         else
  690 #else
  691         x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_ZERO | M_WAITOK);
  692 #endif
  693 
  694         x86bios_rom_phys = X86BIOS_ROM_BASE;
  695         x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
  696         if (x86bios_rom == NULL)
  697                 goto fail;
  698 #ifdef X86BIOS_NATIVE_ARCH
  699         /* Change attribute for EBDA. */
  700         if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
  701             pmap_change_attr((vm_offset_t)x86bios_rom,
  702             X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
  703                 goto fail;
  704 #endif
  705 
  706         x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_WAITOK,
  707             X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
  708         x86bios_seg_phys = vtophys(x86bios_seg);
  709 
  710         x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
  711             X86BIOS_IVT_SIZE);
  712         x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
  713             X86BIOS_ROM_SIZE);
  714         x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
  715             X86BIOS_SEG_SIZE);
  716 
  717         if (bootverbose) {
  718                 printf("x86bios:  IVT 0x%06jx-0x%06jx at %p\n",
  719                     (vm_paddr_t)X86BIOS_IVT_BASE,
  720                     (vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
  721                     x86bios_ivt);
  722                 printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
  723                     x86bios_seg_phys,
  724                     (vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
  725                     x86bios_seg);
  726                 if (x86bios_rom_phys < X86BIOS_ROM_BASE)
  727                         printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
  728                             x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
  729                             x86bios_rom);
  730                 printf("x86bios:  ROM 0x%06jx-0x%06jx at %p\n",
  731                     (vm_paddr_t)X86BIOS_ROM_BASE,
  732                     (vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
  733                     (caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
  734         }
  735 
  736         return (0);
  737 
  738 fail:
  739         x86bios_unmap_mem();
  740 
  741         return (1);
  742 }
  743 
  744 static int
  745 x86bios_init(void)
  746 {
  747 
  748         mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
  749 
  750         if (x86bios_map_mem() != 0)
  751                 return (ENOMEM);
  752 
  753         bzero(&x86bios_emu, sizeof(x86bios_emu));
  754 
  755         x86bios_emu.emu_rdb = x86bios_emu_rdb;
  756         x86bios_emu.emu_rdw = x86bios_emu_rdw;
  757         x86bios_emu.emu_rdl = x86bios_emu_rdl;
  758         x86bios_emu.emu_wrb = x86bios_emu_wrb;
  759         x86bios_emu.emu_wrw = x86bios_emu_wrw;
  760         x86bios_emu.emu_wrl = x86bios_emu_wrl;
  761 
  762         x86bios_emu.emu_inb = x86bios_emu_inb;
  763         x86bios_emu.emu_inw = x86bios_emu_inw;
  764         x86bios_emu.emu_inl = x86bios_emu_inl;
  765         x86bios_emu.emu_outb = x86bios_emu_outb;
  766         x86bios_emu.emu_outw = x86bios_emu_outw;
  767         x86bios_emu.emu_outl = x86bios_emu_outl;
  768 
  769         return (0);
  770 }
  771 
  772 static int
  773 x86bios_uninit(void)
  774 {
  775 
  776         x86bios_unmap_mem();
  777         mtx_destroy(&x86bios_lock);
  778 
  779         return (0);
  780 }
  781 
  782 #endif
  783 
  784 void *
  785 x86bios_get_orm(uint32_t offset)
  786 {
  787         uint8_t *p;
  788 
  789         /* Does the shadow ROM contain BIOS POST code for x86? */
  790         p = x86bios_offset(offset);
  791         if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
  792             (p[3] != 0xe9 && p[3] != 0xeb))
  793                 return (NULL);
  794 
  795         return (p);
  796 }
  797 
  798 int
  799 x86bios_match_device(uint32_t offset, device_t dev)
  800 {
  801         uint8_t *p;
  802         uint16_t device, vendor;
  803         uint8_t class, progif, subclass;
  804 
  805         /* Does the shadow ROM contain BIOS POST code for x86? */
  806         p = x86bios_get_orm(offset);
  807         if (p == NULL)
  808                 return (0);
  809 
  810         /* Does it contain PCI data structure? */
  811         p += le16toh(*(uint16_t *)(p + 0x18));
  812         if (bcmp(p, "PCIR", 4) != 0 ||
  813             le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
  814                 return (0);
  815 
  816         /* Does it match the vendor, device, and classcode? */
  817         vendor = le16toh(*(uint16_t *)(p + 0x04));
  818         device = le16toh(*(uint16_t *)(p + 0x06));
  819         progif = *(p + 0x0d);
  820         subclass = *(p + 0x0e);
  821         class = *(p + 0x0f);
  822         if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
  823             class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
  824             progif != pci_get_progif(dev))
  825                 return (0);
  826 
  827         return (1);
  828 }
  829 
  830 static int
  831 x86bios_modevent(module_t mod __unused, int type, void *data __unused)
  832 {
  833 
  834         switch (type) {
  835         case MOD_LOAD:
  836                 return (x86bios_init());
  837         case MOD_UNLOAD:
  838                 return (x86bios_uninit());
  839         default:
  840                 return (ENOTSUP);
  841         }
  842 }
  843 
  844 static moduledata_t x86bios_mod = {
  845         "x86bios",
  846         x86bios_modevent,
  847         NULL,
  848 };
  849 
  850 DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
  851 MODULE_VERSION(x86bios, 1);

Cache object: 63d8d6c6651d4843a8de66ad639ed927


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.