The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm/mv/mv_machdep.c

Version: -  FREEBSD  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-2  -  FREEBSD-11-1  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-4  -  FREEBSD-10-3  -  FREEBSD-10-2  -  FREEBSD-10-1  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-3  -  FREEBSD-9-2  -  FREEBSD-9-1  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-4  -  FREEBSD-8-3  -  FREEBSD-8-2  -  FREEBSD-8-1  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-4  -  FREEBSD-7-3  -  FREEBSD-7-2  -  FREEBSD-7-1  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-4  -  FREEBSD-6-3  -  FREEBSD-6-2  -  FREEBSD-6-1  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-5  -  FREEBSD-5-4  -  FREEBSD-5-3  -  FREEBSD-5-2  -  FREEBSD-5-1  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  linux-2.6  -  linux-2.4.22  -  MK83  -  MK84  -  PLAN9  -  DFBSD  -  NETBSD  -  NETBSD5  -  NETBSD4  -  NETBSD3  -  NETBSD20  -  OPENBSD  -  xnu-517  -  xnu-792  -  xnu-792.6.70  -  xnu-1228  -  xnu-1456.1.26  -  xnu-1699.24.8  -  xnu-2050.18.24  -  OPENSOLARIS  -  minix-3-1-1 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 1994-1998 Mark Brinicombe.
    3  * Copyright (c) 1994 Brini.
    4  * All rights reserved.
    5  *
    6  * This code is derived from software written for Brini by Mark Brinicombe
    7  *
    8  * Redistribution and use in source and binary forms, with or without
    9  * modification, are permitted provided that the following conditions
   10  * are met:
   11  * 1. Redistributions of source code must retain the above copyright
   12  *    notice, this list of conditions and the following disclaimer.
   13  * 2. Redistributions in binary form must reproduce the above copyright
   14  *    notice, this list of conditions and the following disclaimer in the
   15  *    documentation and/or other materials provided with the distribution.
   16  * 3. All advertising materials mentioning features or use of this software
   17  *    must display the following acknowledgement:
   18  *      This product includes software developed by Brini.
   19  * 4. The name of the company nor the name of the author may be used to
   20  *    endorse or promote products derived from this software without specific
   21  *    prior written permission.
   22  *
   23  * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
   24  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
   25  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
   26  * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
   27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
   28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
   29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   33  * SUCH DAMAGE.
   34  *
   35  * from: FreeBSD: //depot/projects/arm/src/sys/arm/at91/kb920x_machdep.c, rev 45
   36  */
   37 
   38 #include "opt_ddb.h"
   39 #include "opt_platform.h"
   40 
   41 #include <sys/cdefs.h>
   42 __FBSDID("$FreeBSD: releng/9.1/sys/arm/mv/mv_machdep.c 224051 2011-07-15 02:29:10Z marcel $");
   43 
   44 #define _ARM32_BUS_DMA_PRIVATE
   45 #include <sys/param.h>
   46 #include <sys/systm.h>
   47 #include <sys/sysproto.h>
   48 #include <sys/signalvar.h>
   49 #include <sys/imgact.h>
   50 #include <sys/kernel.h>
   51 #include <sys/ktr.h>
   52 #include <sys/linker.h>
   53 #include <sys/lock.h>
   54 #include <sys/malloc.h>
   55 #include <sys/mutex.h>
   56 #include <sys/pcpu.h>
   57 #include <sys/proc.h>
   58 #include <sys/ptrace.h>
   59 #include <sys/cons.h>
   60 #include <sys/bio.h>
   61 #include <sys/bus.h>
   62 #include <sys/buf.h>
   63 #include <sys/exec.h>
   64 #include <sys/kdb.h>
   65 #include <sys/msgbuf.h>
   66 #include <machine/reg.h>
   67 #include <machine/cpu.h>
   68 #include <machine/fdt.h>
   69 
   70 #include <dev/fdt/fdt_common.h>
   71 #include <dev/ofw/openfirm.h>
   72 
   73 #include <vm/vm.h>
   74 #include <vm/pmap.h>
   75 #include <vm/vm_object.h>
   76 #include <vm/vm_page.h>
   77 #include <vm/vm_pager.h>
   78 #include <vm/vm_map.h>
   79 #include <machine/pte.h>
   80 #include <machine/pmap.h>
   81 #include <machine/vmparam.h>
   82 #include <machine/pcb.h>
   83 #include <machine/undefined.h>
   84 #include <machine/machdep.h>
   85 #include <machine/metadata.h>
   86 #include <machine/armreg.h>
   87 #include <machine/bus.h>
   88 #include <sys/reboot.h>
   89 
   90 #include <arm/mv/mvreg.h>       /* XXX */
   91 #include <arm/mv/mvvar.h>       /* XXX eventually this should be eliminated */
   92 #include <arm/mv/mvwin.h>
   93 
   94 #define DEBUG
   95 #undef DEBUG
   96 
   97 #ifdef  DEBUG
   98 #define debugf(fmt, args...) printf(fmt, ##args)
   99 #else
  100 #define debugf(fmt, args...)
  101 #endif
  102 
  103 /*
  104  * This is the number of L2 page tables required for covering max
  105  * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf,
  106  * stacks etc.), uprounded to be divisible by 4.
  107  */
  108 #define KERNEL_PT_MAX   78
  109 
  110 /* Define various stack sizes in pages */
  111 #define IRQ_STACK_SIZE  1
  112 #define ABT_STACK_SIZE  1
  113 #define UND_STACK_SIZE  1
  114 
  115 extern unsigned char kernbase[];
  116 extern unsigned char _etext[];
  117 extern unsigned char _edata[];
  118 extern unsigned char __bss_start[];
  119 extern unsigned char _end[];
  120 
  121 #ifdef DDB
  122 extern vm_offset_t ksym_start, ksym_end;
  123 #endif
  124 
  125 extern u_int data_abort_handler_address;
  126 extern u_int prefetch_abort_handler_address;
  127 extern u_int undefined_handler_address;
  128 
  129 extern vm_offset_t pmap_bootstrap_lastaddr;
  130 extern int *end;
  131 
  132 struct pv_addr kernel_pt_table[KERNEL_PT_MAX];
  133 struct pcpu __pcpu;
  134 struct pcpu *pcpup = &__pcpu;
  135 
  136 /* Physical and virtual addresses for some global pages */
  137 
  138 vm_paddr_t phys_avail[10];
  139 vm_paddr_t dump_avail[4];
  140 vm_offset_t physical_pages;
  141 vm_offset_t pmap_bootstrap_lastaddr;
  142 
  143 const struct pmap_devmap *pmap_devmap_bootstrap_table;
  144 struct pv_addr systempage;
  145 struct pv_addr msgbufpv;
  146 struct pv_addr irqstack;
  147 struct pv_addr undstack;
  148 struct pv_addr abtstack;
  149 struct pv_addr kernelstack;
  150 
  151 static struct trapframe proc0_tf;
  152 
  153 static struct mem_region availmem_regions[FDT_MEM_REGIONS];
  154 static int availmem_regions_sz;
  155 
  156 static void print_kenv(void);
  157 static void print_kernel_section_addr(void);
  158 
  159 static void physmap_init(void);
  160 static int platform_devmap_init(void);
  161 static int platform_mpp_init(void);
  162 
  163 static char *
  164 kenv_next(char *cp)
  165 {
  166 
  167         if (cp != NULL) {
  168                 while (*cp != 0)
  169                         cp++;
  170                 cp++;
  171                 if (*cp == 0)
  172                         cp = NULL;
  173         }
  174         return (cp);
  175 }
  176 
  177 static void
  178 print_kenv(void)
  179 {
  180         int len;
  181         char *cp;
  182 
  183         debugf("loader passed (static) kenv:\n");
  184         if (kern_envp == NULL) {
  185                 debugf(" no env, null ptr\n");
  186                 return;
  187         }
  188         debugf(" kern_envp = 0x%08x\n", (uint32_t)kern_envp);
  189 
  190         len = 0;
  191         for (cp = kern_envp; cp != NULL; cp = kenv_next(cp))
  192                 debugf(" %x %s\n", (uint32_t)cp, cp);
  193 }
  194 
  195 static void
  196 print_kernel_section_addr(void)
  197 {
  198 
  199         debugf("kernel image addresses:\n");
  200         debugf(" kernbase       = 0x%08x\n", (uint32_t)kernbase);
  201         debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext);
  202         debugf(" _edata         = 0x%08x\n", (uint32_t)_edata);
  203         debugf(" __bss_start    = 0x%08x\n", (uint32_t)__bss_start);
  204         debugf(" _end           = 0x%08x\n", (uint32_t)_end);
  205 }
  206 
  207 static void
  208 physmap_init(void)
  209 {
  210         int i, j, cnt;
  211         vm_offset_t phys_kernelend, kernload;
  212         uint32_t s, e, sz;
  213         struct mem_region *mp, *mp1;
  214 
  215         phys_kernelend = KERNPHYSADDR + (virtual_avail - KERNVIRTADDR);
  216         kernload = KERNPHYSADDR;
  217 
  218         /*
  219          * Remove kernel physical address range from avail
  220          * regions list. Page align all regions.
  221          * Non-page aligned memory isn't very interesting to us.
  222          * Also, sort the entries for ascending addresses.
  223          */
  224         sz = 0;
  225         cnt = availmem_regions_sz;
  226         debugf("processing avail regions:\n");
  227         for (mp = availmem_regions; mp->mr_size; mp++) {
  228                 s = mp->mr_start;
  229                 e = mp->mr_start + mp->mr_size;
  230                 debugf(" %08x-%08x -> ", s, e);
  231                 /* Check whether this region holds all of the kernel. */
  232                 if (s < kernload && e > phys_kernelend) {
  233                         availmem_regions[cnt].mr_start = phys_kernelend;
  234                         availmem_regions[cnt++].mr_size = e - phys_kernelend;
  235                         e = kernload;
  236                 }
  237                 /* Look whether this regions starts within the kernel. */
  238                 if (s >= kernload && s < phys_kernelend) {
  239                         if (e <= phys_kernelend)
  240                                 goto empty;
  241                         s = phys_kernelend;
  242                 }
  243                 /* Now look whether this region ends within the kernel. */
  244                 if (e > kernload && e <= phys_kernelend) {
  245                         if (s >= kernload) {
  246                                 goto empty;
  247                         }
  248                         e = kernload;
  249                 }
  250                 /* Now page align the start and size of the region. */
  251                 s = round_page(s);
  252                 e = trunc_page(e);
  253                 if (e < s)
  254                         e = s;
  255                 sz = e - s;
  256                 debugf("%08x-%08x = %x\n", s, e, sz);
  257 
  258                 /* Check whether some memory is left here. */
  259                 if (sz == 0) {
  260                 empty:
  261                         printf("skipping\n");
  262                         bcopy(mp + 1, mp,
  263                             (cnt - (mp - availmem_regions)) * sizeof(*mp));
  264                         cnt--;
  265                         mp--;
  266                         continue;
  267                 }
  268 
  269                 /* Do an insertion sort. */
  270                 for (mp1 = availmem_regions; mp1 < mp; mp1++)
  271                         if (s < mp1->mr_start)
  272                                 break;
  273                 if (mp1 < mp) {
  274                         bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
  275                         mp1->mr_start = s;
  276                         mp1->mr_size = sz;
  277                 } else {
  278                         mp->mr_start = s;
  279                         mp->mr_size = sz;
  280                 }
  281         }
  282         availmem_regions_sz = cnt;
  283 
  284         /* Fill in phys_avail table, based on availmem_regions */
  285         debugf("fill in phys_avail:\n");
  286         for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) {
  287 
  288                 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n",
  289                     availmem_regions[i].mr_start,
  290                     availmem_regions[i].mr_start + availmem_regions[i].mr_size,
  291                     availmem_regions[i].mr_size);
  292 
  293                 phys_avail[j] = availmem_regions[i].mr_start;
  294                 phys_avail[j + 1] = availmem_regions[i].mr_start +
  295                     availmem_regions[i].mr_size;
  296         }
  297         phys_avail[j] = 0;
  298         phys_avail[j + 1] = 0;
  299 }
  300 
  301 void *
  302 initarm(void *mdp, void *unused __unused)
  303 {
  304         struct pv_addr kernel_l1pt;
  305         struct pv_addr dpcpu;
  306         vm_offset_t dtbp, freemempos, l2_start, lastaddr;
  307         uint32_t memsize, l2size;
  308         void *kmdp;
  309         u_int l1pagetable;
  310         int i = 0, j = 0;
  311 
  312         kmdp = NULL;
  313         lastaddr = 0;
  314         memsize = 0;
  315         dtbp = (vm_offset_t)NULL;
  316 
  317         set_cpufuncs();
  318 
  319         /*
  320          * Mask metadata pointer: it is supposed to be on page boundary. If
  321          * the first argument (mdp) doesn't point to a valid address the
  322          * bootloader must have passed us something else than the metadata
  323          * ptr... In this case we want to fall back to some built-in settings.
  324          */
  325         mdp = (void *)((uint32_t)mdp & ~PAGE_MASK);
  326 
  327         /* Parse metadata and fetch parameters */
  328         if (mdp != NULL) {
  329                 preload_metadata = mdp;
  330                 kmdp = preload_search_by_type("elf kernel");
  331                 if (kmdp != NULL) {
  332                         boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
  333                         kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
  334                         dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
  335                         lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND,
  336                             vm_offset_t);
  337 #ifdef DDB
  338                         ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t);
  339                         ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t);
  340 #endif
  341                 }
  342 
  343                 preload_addr_relocate = KERNVIRTADDR - KERNPHYSADDR;
  344         } else {
  345                 /* Fall back to hardcoded metadata. */
  346                 lastaddr = fake_preload_metadata();
  347         }
  348 
  349 #if defined(FDT_DTB_STATIC)
  350         /*
  351          * In case the device tree blob was not retrieved (from metadata) try
  352          * to use the statically embedded one.
  353          */
  354         if (dtbp == (vm_offset_t)NULL)
  355                 dtbp = (vm_offset_t)&fdt_static_dtb;
  356 #endif
  357 
  358         if (OF_install(OFW_FDT, 0) == FALSE)
  359                 while (1);
  360 
  361         if (OF_init((void *)dtbp) != 0)
  362                 while (1);
  363 
  364         /* Grab physical memory regions information from device tree. */
  365         if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz,
  366             &memsize) != 0)
  367                 while(1);
  368 
  369         if (fdt_immr_addr(MV_BASE) != 0)
  370                 while (1);
  371 
  372         /* Platform-specific initialisation */
  373         pmap_bootstrap_lastaddr = fdt_immr_va - ARM_NOCACHE_KVA_SIZE;
  374 
  375         pcpu_init(pcpup, 0, sizeof(struct pcpu));
  376         PCPU_SET(curthread, &thread0);
  377 
  378         /* Calculate number of L2 tables needed for mapping vm_page_array */
  379         l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
  380         l2size = (l2size >> L1_S_SHIFT) + 1;
  381 
  382         /*
  383          * Add one table for end of kernel map, one for stacks, msgbuf and
  384          * L1 and L2 tables map and one for vectors map.
  385          */
  386         l2size += 3;
  387 
  388         /* Make it divisible by 4 */
  389         l2size = (l2size + 3) & ~3;
  390 
  391 #define KERNEL_TEXT_BASE (KERNBASE)
  392         freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
  393 
  394         /* Define a macro to simplify memory allocation */
  395 #define valloc_pages(var, np)                   \
  396         alloc_pages((var).pv_va, (np));         \
  397         (var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
  398 
  399 #define alloc_pages(var, np)                    \
  400         (var) = freemempos;             \
  401         freemempos += (np * PAGE_SIZE);         \
  402         memset((char *)(var), 0, ((np) * PAGE_SIZE));
  403 
  404         while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
  405                 freemempos += PAGE_SIZE;
  406         valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
  407 
  408         for (i = 0; i < l2size; ++i) {
  409                 if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
  410                         valloc_pages(kernel_pt_table[i],
  411                             L2_TABLE_SIZE / PAGE_SIZE);
  412                         j = i;
  413                 } else {
  414                         kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va +
  415                             L2_TABLE_SIZE_REAL * (i - j);
  416                         kernel_pt_table[i].pv_pa =
  417                             kernel_pt_table[i].pv_va - KERNVIRTADDR +
  418                             KERNPHYSADDR;
  419 
  420                 }
  421         }
  422         /*
  423          * Allocate a page for the system page mapped to 0x00000000
  424          * or 0xffff0000. This page will just contain the system vectors
  425          * and can be shared by all processes.
  426          */
  427         valloc_pages(systempage, 1);
  428 
  429         /* Allocate dynamic per-cpu area. */
  430         valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
  431         dpcpu_init((void *)dpcpu.pv_va, 0);
  432 
  433         /* Allocate stacks for all modes */
  434         valloc_pages(irqstack, IRQ_STACK_SIZE);
  435         valloc_pages(abtstack, ABT_STACK_SIZE);
  436         valloc_pages(undstack, UND_STACK_SIZE);
  437         valloc_pages(kernelstack, KSTACK_PAGES);
  438 
  439         init_param1();
  440 
  441         valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
  442 
  443         /*
  444          * Now we start construction of the L1 page table
  445          * We start by mapping the L2 page tables into the L1.
  446          * This means that we can replace L1 mappings later on if necessary
  447          */
  448         l1pagetable = kernel_l1pt.pv_va;
  449 
  450         /*
  451          * Try to map as much as possible of kernel text and data using
  452          * 1MB section mapping and for the rest of initial kernel address
  453          * space use L2 coarse tables.
  454          *
  455          * Link L2 tables for mapping remainder of kernel (modulo 1MB)
  456          * and kernel structures
  457          */
  458         l2_start = lastaddr & ~(L1_S_OFFSET);
  459         for (i = 0 ; i < l2size - 1; i++)
  460                 pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE,
  461                     &kernel_pt_table[i]);
  462 
  463         pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE;
  464         
  465         /* Map kernel code and data */
  466         pmap_map_chunk(l1pagetable, KERNVIRTADDR, KERNPHYSADDR,
  467            (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK,
  468             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  469 
  470 
  471         /* Map L1 directory and allocated L2 page tables */
  472         pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
  473             L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  474 
  475         pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va,
  476             kernel_pt_table[0].pv_pa,
  477             L2_TABLE_SIZE_REAL * l2size,
  478             VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
  479 
  480         /* Map allocated DPCPU, stacks and msgbuf */
  481         pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa,
  482             freemempos - dpcpu.pv_va,
  483             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  484 
  485         /* Link and map the vector page */
  486         pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH,
  487             &kernel_pt_table[l2size - 1]);
  488         pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa,
  489             VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
  490 
  491         /* Map pmap_devmap[] entries */
  492         if (platform_devmap_init() != 0)
  493                 while (1);
  494         pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table);
  495 
  496         cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
  497             DOMAIN_CLIENT);
  498         setttb(kernel_l1pt.pv_pa);
  499         cpu_tlb_flushID();
  500         cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
  501 
  502         /*
  503          * Only after the SOC registers block is mapped we can perform device
  504          * tree fixups, as they may attempt to read parameters from hardware.
  505          */
  506         OF_interpret("perform-fixup", 0);
  507 
  508         /*
  509          * Re-initialise MPP. It is important to call this prior to using
  510          * console as the physical connection can be routed via MPP.
  511          */
  512         if (platform_mpp_init() != 0)
  513                 while (1);
  514 
  515         cninit();
  516 
  517         physmem = memsize / PAGE_SIZE;
  518 
  519         debugf("initarm: console initialized\n");
  520         debugf(" arg1 mdp = 0x%08x\n", (uint32_t)mdp);
  521         debugf(" boothowto = 0x%08x\n", boothowto);
  522         printf(" dtbp = 0x%08x\n", (uint32_t)dtbp);
  523         print_kernel_section_addr();
  524         print_kenv();
  525 
  526         /*
  527          * Re-initialise decode windows
  528          */
  529         if (soc_decode_win() != 0)
  530                 printf("WARNING: could not re-initialise decode windows! "
  531                     "Running with existing settings...\n");
  532         /*
  533          * Pages were allocated during the secondary bootstrap for the
  534          * stacks for different CPU modes.
  535          * We must now set the r13 registers in the different CPU modes to
  536          * point to these stacks.
  537          * Since the ARM stacks use STMFD etc. we must set r13 to the top end
  538          * of the stack memory.
  539          */
  540         cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE);
  541         set_stackptr(PSR_IRQ32_MODE,
  542             irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
  543         set_stackptr(PSR_ABT32_MODE,
  544             abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
  545         set_stackptr(PSR_UND32_MODE,
  546             undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
  547 
  548         /*
  549          * We must now clean the cache again....
  550          * Cleaning may be done by reading new data to displace any
  551          * dirty data in the cache. This will have happened in setttb()
  552          * but since we are boot strapping the addresses used for the read
  553          * may have just been remapped and thus the cache could be out
  554          * of sync. A re-clean after the switch will cure this.
  555          * After booting there are no gross relocations of the kernel thus
  556          * this problem will not occur after initarm().
  557          */
  558         cpu_idcache_wbinv_all();
  559 
  560         /* Set stack for exception handlers */
  561         data_abort_handler_address = (u_int)data_abort_handler;
  562         prefetch_abort_handler_address = (u_int)prefetch_abort_handler;
  563         undefined_handler_address = (u_int)undefinedinstruction_bounce;
  564         undefined_init();
  565 
  566         proc_linkup0(&proc0, &thread0);
  567         thread0.td_kstack = kernelstack.pv_va;
  568         thread0.td_kstack_pages = KSTACK_PAGES;
  569         thread0.td_pcb = (struct pcb *)
  570             (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
  571         thread0.td_pcb->pcb_flags = 0;
  572         thread0.td_frame = &proc0_tf;
  573         pcpup->pc_curpcb = thread0.td_pcb;
  574 
  575         arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL);
  576 
  577         dump_avail[0] = 0;
  578         dump_avail[1] = memsize;
  579         dump_avail[2] = 0;
  580         dump_avail[3] = 0;
  581 
  582         pmap_bootstrap(freemempos, pmap_bootstrap_lastaddr, &kernel_l1pt);
  583         msgbufp = (void *)msgbufpv.pv_va;
  584         msgbufinit(msgbufp, msgbufsize);
  585         mutex_init();
  586 
  587         /*
  588          * Prepare map of physical memory regions available to vm subsystem.
  589          */
  590         physmap_init();
  591 
  592         /* Do basic tuning, hz etc */
  593         init_param2(physmem);
  594         kdb_init();
  595         return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP -
  596             sizeof(struct pcb)));
  597 }
  598 
  599 #define MPP_PIN_MAX             50
  600 #define MPP_PIN_CELLS           2
  601 #define MPP_PINS_PER_REG        8
  602 #define MPP_SEL(pin,func)       (((func) & 0xf) <<              \
  603     (((pin) % MPP_PINS_PER_REG) * 4))
  604 
  605 static int
  606 platform_mpp_init(void)
  607 {
  608         pcell_t pinmap[MPP_PIN_MAX * MPP_PIN_CELLS];
  609         int mpp[MPP_PIN_MAX];
  610         uint32_t ctrl_val, ctrl_offset;
  611         pcell_t reg[4];
  612         u_long start, size;
  613         phandle_t node;
  614         pcell_t pin_cells, *pinmap_ptr, pin_count;
  615         ssize_t len;
  616         int par_addr_cells, par_size_cells;
  617         int tuple_size, tuples, rv, pins, i, j;
  618         int mpp_pin, mpp_function;
  619 
  620         /*
  621          * Try to access the MPP node directly i.e. through /aliases/mpp.
  622          */
  623         if ((node = OF_finddevice("mpp")) != 0)
  624                 if (fdt_is_compatible(node, "mrvl,mpp"))
  625                         goto moveon;
  626         /*
  627          * Find the node the long way.
  628          */
  629         if ((node = OF_finddevice("/")) == 0)
  630                 return (ENXIO);
  631 
  632         if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0)
  633                 return (ENXIO);
  634 
  635         if ((node = fdt_find_compatible(node, "mrvl,mpp", 0)) == 0)
  636                 return (ENXIO);
  637 moveon:
  638         /*
  639          * Process 'reg' prop.
  640          */
  641         if ((rv = fdt_addrsize_cells(OF_parent(node), &par_addr_cells,
  642             &par_size_cells)) != 0)
  643                 return(ENXIO);
  644 
  645         tuple_size = sizeof(pcell_t) * (par_addr_cells + par_size_cells);
  646         len = OF_getprop(node, "reg", reg, sizeof(reg));
  647         tuples = len / tuple_size;
  648         if (tuple_size <= 0)
  649                 return (EINVAL);
  650 
  651         /*
  652          * Get address/size. XXX we assume only the first 'reg' tuple is used.
  653          */
  654         rv = fdt_data_to_res(reg, par_addr_cells, par_size_cells,
  655             &start, &size);
  656         if (rv != 0)
  657                 return (rv);
  658         start += fdt_immr_va;
  659 
  660         /*
  661          * Process 'pin-count' and 'pin-map' props.
  662          */
  663         if (OF_getprop(node, "pin-count", &pin_count, sizeof(pin_count)) <= 0)
  664                 return (ENXIO);
  665         pin_count = fdt32_to_cpu(pin_count);
  666         if (pin_count > MPP_PIN_MAX)
  667                 return (ERANGE);
  668 
  669         if (OF_getprop(node, "#pin-cells", &pin_cells, sizeof(pin_cells)) <= 0)
  670                 pin_cells = MPP_PIN_CELLS;
  671         pin_cells = fdt32_to_cpu(pin_cells);
  672         if (pin_cells > MPP_PIN_CELLS)
  673                 return (ERANGE);
  674         tuple_size = sizeof(pcell_t) * pin_cells;
  675 
  676         bzero(pinmap, sizeof(pinmap));
  677         len = OF_getprop(node, "pin-map", pinmap, sizeof(pinmap));
  678         if (len <= 0)
  679                 return (ERANGE);
  680         if (len % tuple_size)
  681                 return (ERANGE);
  682         pins = len / tuple_size;
  683         if (pins > pin_count)
  684                 return (ERANGE);
  685         /*
  686          * Fill out a "mpp[pin] => function" table. All pins unspecified in
  687          * the 'pin-map' property are defaulted to 0 function i.e. GPIO.
  688          */
  689         bzero(mpp, sizeof(mpp));
  690         pinmap_ptr = pinmap;
  691         for (i = 0; i < pins; i++) {
  692                 mpp_pin = fdt32_to_cpu(*pinmap_ptr);
  693                 mpp_function = fdt32_to_cpu(*(pinmap_ptr + 1));
  694                 mpp[mpp_pin] = mpp_function;
  695                 pinmap_ptr += pin_cells;
  696         }
  697 
  698         /*
  699          * Prepare and program MPP control register values.
  700          */
  701         ctrl_offset = 0;
  702         for (i = 0; i < pin_count;) {
  703                 ctrl_val = 0;
  704 
  705                 for (j = 0; j < MPP_PINS_PER_REG; j++) {
  706                         if (i + j == pin_count - 1)
  707                                 break;
  708                         ctrl_val |= MPP_SEL(i + j, mpp[i + j]);
  709                 }
  710                 i += MPP_PINS_PER_REG;
  711                 bus_space_write_4(fdtbus_bs_tag, start, ctrl_offset,
  712                     ctrl_val);
  713 
  714 #if defined(SOC_MV_ORION)
  715                 /*
  716                  * Third MPP reg on Orion SoC is placed
  717                  * non-linearly (with different offset).
  718                  */
  719                 if (i ==  (2 * MPP_PINS_PER_REG))
  720                         ctrl_offset = 0x50;
  721                 else
  722 #endif
  723                         ctrl_offset += 4;
  724         }
  725 
  726         return (0);
  727 }
  728 
  729 #define FDT_DEVMAP_MAX  (1 + 2 + 1 + 1)
  730 static struct pmap_devmap fdt_devmap[FDT_DEVMAP_MAX] = {
  731         { 0, 0, 0, 0, 0, }
  732 };
  733 
  734 /*
  735  * Construct pmap_devmap[] with DT-derived config data.
  736  */
  737 static int
  738 platform_devmap_init(void)
  739 {
  740         phandle_t root, child;
  741         u_long base, size;
  742         int i;
  743 
  744         /*
  745          * IMMR range.
  746          */
  747         i = 0;
  748         fdt_devmap[i].pd_va = fdt_immr_va;
  749         fdt_devmap[i].pd_pa = fdt_immr_pa;
  750         fdt_devmap[i].pd_size = fdt_immr_size;
  751         fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE;
  752         fdt_devmap[i].pd_cache = PTE_NOCACHE;
  753         i++;
  754 
  755         /*
  756          * PCI range(s).
  757          */
  758         if ((root = OF_finddevice("/")) == 0)
  759                 return (ENXIO);
  760 
  761         for (child = OF_child(root); child != 0; child = OF_peer(child))
  762                 if (fdt_is_type(child, "pci")) {
  763                         /*
  764                          * Check space: each PCI node will consume 2 devmap
  765                          * entries.
  766                          */
  767                         if (i + 1 >= FDT_DEVMAP_MAX) {
  768                                 return (ENOMEM);
  769                                 break;
  770                         }
  771 
  772                         /*
  773                          * XXX this should account for PCI and multiple ranges
  774                          * of a given kind.
  775                          */
  776                         if (fdt_pci_devmap(child, &fdt_devmap[i],
  777                             MV_PCIE_IO_BASE, MV_PCIE_MEM_BASE) != 0)
  778                                 return (ENXIO);
  779                         i += 2;
  780                 }
  781 
  782         /*
  783          * CESA SRAM range.
  784          */
  785         if ((child = OF_finddevice("sram")) != 0)
  786                 if (fdt_is_compatible(child, "mrvl,cesa-sram"))
  787                         goto moveon;
  788 
  789         if ((child = fdt_find_compatible(root, "mrvl,cesa-sram", 0)) == 0)
  790                 /* No CESA SRAM node. */
  791                 goto out;
  792 moveon:
  793         if (i >= FDT_DEVMAP_MAX)
  794                 return (ENOMEM);
  795 
  796         if (fdt_regsize(child, &base, &size) != 0)
  797                 return (EINVAL);
  798 
  799         fdt_devmap[i].pd_va = MV_CESA_SRAM_BASE; /* XXX */
  800         fdt_devmap[i].pd_pa = base;
  801         fdt_devmap[i].pd_size = size;
  802         fdt_devmap[i].pd_prot = VM_PROT_READ | VM_PROT_WRITE;
  803         fdt_devmap[i].pd_cache = PTE_NOCACHE;
  804 
  805 out:
  806         pmap_devmap_bootstrap_table = &fdt_devmap[0];
  807         return (0);
  808 }
  809 
  810 struct arm32_dma_range *
  811 bus_dma_get_range(void)
  812 {
  813 
  814         return (NULL);
  815 }
  816 
  817 int
  818 bus_dma_get_range_nb(void)
  819 {
  820 
  821         return (0);
  822 }

Cache object: 3d8f836a9b0dcc85a0a4d65173e50969


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.