The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/mips/mips/cache_mipsNN.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: cache_mipsNN.c,v 1.10 2005/12/24 20:07:19 perry Exp $  */
    2 
    3 /*
    4  * Copyright 2001 Wasabi Systems, Inc.
    5  * All rights reserved.
    6  *
    7  * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
    8  *
    9  * Redistribution and use in source and binary forms, with or without
   10  * modification, are permitted provided that the following conditions
   11  * are met:
   12  * 1. Redistributions of source code must retain the above copyright
   13  *    notice, this list of conditions and the following disclaimer.
   14  * 2. Redistributions in binary form must reproduce the above copyright
   15  *    notice, this list of conditions and the following disclaimer in the
   16  *    documentation and/or other materials provided with the distribution.
   17  * 3. All advertising materials mentioning features or use of this software
   18  *    must display the following acknowledgement:
   19  *      This product includes software developed for the NetBSD Project by
   20  *      Wasabi Systems, Inc.
   21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
   22  *    or promote products derived from this software without specific prior
   23  *    written permission.
   24  *
   25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
   26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
   29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   35  * POSSIBILITY OF SUCH DAMAGE.
   36  */
   37 
   38 #include <sys/cdefs.h>
   39 __FBSDID("$FreeBSD$");
   40 
   41 #include "opt_cputype.h"
   42 
   43 #include <sys/types.h>
   44 #include <sys/systm.h>
   45 #include <sys/param.h>
   46 
   47 #include <machine/cache.h>
   48 #include <machine/cache_r4k.h>
   49 #include <machine/cpuinfo.h>
   50 
   51 #define round_line16(x)         (((x) + 15) & ~15)
   52 #define trunc_line16(x)         ((x) & ~15)
   53 
   54 #define round_line32(x)         (((x) + 31) & ~31)
   55 #define trunc_line32(x)         ((x) & ~31)
   56 
   57 #if defined(CPU_NLM)
   58 static __inline void
   59 xlp_sync(void)
   60 {
   61         __asm __volatile (
   62             ".set push              \n"
   63             ".set noreorder         \n"
   64             ".set mips64            \n"
   65             "dla    $8, 1f          \n"
   66             "/* jr.hb $8 */         \n"
   67             ".word 0x1000408        \n"
   68             "nop                    \n"
   69          "1: nop                    \n"
   70             ".set pop               \n"
   71             : : : "$8");
   72 }
   73 #endif
   74 
   75 #if defined(SB1250_PASS1)
   76 #define SYNC    __asm volatile("sync; sync")
   77 #elif defined(CPU_NLM)
   78 #define SYNC    xlp_sync()
   79 #else
   80 #define SYNC    __asm volatile("sync")
   81 #endif
   82 
   83 #if defined(CPU_CNMIPS)
   84 #define SYNCI  mips_sync_icache();
   85 #elif defined(CPU_NLM)
   86 #define SYNCI   xlp_sync()
   87 #else
   88 #define SYNCI
   89 #endif
   90 
   91 /*
   92  * Exported variables for consumers like bus_dma code
   93  */
   94 int mips_picache_linesize;
   95 int mips_pdcache_linesize;
   96 
   97 static int picache_size;
   98 static int picache_stride;
   99 static int picache_loopcount;
  100 static int picache_way_mask;
  101 static int pdcache_size;
  102 static int pdcache_stride;
  103 static int pdcache_loopcount;
  104 static int pdcache_way_mask;
  105 
  106 void
  107 mipsNN_cache_init(struct mips_cpuinfo * cpuinfo)
  108 {
  109         int flush_multiple_lines_per_way;
  110 
  111         flush_multiple_lines_per_way = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_linesize > PAGE_SIZE;
  112         if (cpuinfo->icache_virtual) {
  113                 /*
  114                  * With a virtual Icache we don't need to flush
  115                  * multiples of the page size with index ops; we just
  116                  * need to flush one pages' worth.
  117                  */
  118                 flush_multiple_lines_per_way = 0;
  119         }
  120 
  121         if (flush_multiple_lines_per_way) {
  122                 picache_stride = PAGE_SIZE;
  123                 picache_loopcount = (cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize / PAGE_SIZE) *
  124                     cpuinfo->l1.ic_nways;
  125         } else {
  126                 picache_stride = cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_linesize;
  127                 picache_loopcount = cpuinfo->l1.ic_nways;
  128         }
  129 
  130         if (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize < PAGE_SIZE) {
  131                 pdcache_stride = cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize;
  132                 pdcache_loopcount = cpuinfo->l1.dc_nways;
  133         } else {
  134                 pdcache_stride = PAGE_SIZE;
  135                 pdcache_loopcount = (cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_linesize / PAGE_SIZE) *
  136                     cpuinfo->l1.dc_nways;
  137         }
  138 
  139         mips_picache_linesize = cpuinfo->l1.ic_linesize;
  140         mips_pdcache_linesize = cpuinfo->l1.dc_linesize;
  141 
  142         picache_size = cpuinfo->l1.ic_size;
  143         picache_way_mask = cpuinfo->l1.ic_nways - 1;
  144         pdcache_size = cpuinfo->l1.dc_size;
  145         pdcache_way_mask = cpuinfo->l1.dc_nways - 1;
  146 
  147 #define CACHE_DEBUG
  148 #ifdef CACHE_DEBUG
  149         printf("Cache info:\n");
  150         if (cpuinfo->icache_virtual)
  151                 printf("  icache is virtual\n");
  152         printf("  picache_stride    = %d\n", picache_stride);
  153         printf("  picache_loopcount = %d\n", picache_loopcount);
  154         printf("  pdcache_stride    = %d\n", pdcache_stride);
  155         printf("  pdcache_loopcount = %d\n", pdcache_loopcount);
  156 #endif
  157 }
  158 
  159 void
  160 mipsNN_icache_sync_all_16(void)
  161 {
  162         vm_offset_t va, eva;
  163 
  164         va = MIPS_PHYS_TO_KSEG0(0);
  165         eva = va + picache_size;
  166 
  167         /*
  168          * Since we're hitting the whole thing, we don't have to
  169          * worry about the N different "ways".
  170          */
  171 
  172         mips_intern_dcache_wbinv_all();
  173 
  174         while (va < eva) {
  175                 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  176                 va += (32 * 16);
  177         }
  178 
  179         SYNC;
  180 }
  181 
  182 void
  183 mipsNN_icache_sync_all_32(void)
  184 {
  185         vm_offset_t va, eva;
  186 
  187         va = MIPS_PHYS_TO_KSEG0(0);
  188         eva = va + picache_size;
  189 
  190         /*
  191          * Since we're hitting the whole thing, we don't have to
  192          * worry about the N different "ways".
  193          */
  194 
  195         mips_intern_dcache_wbinv_all();
  196 
  197         while (va < eva) {
  198                 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  199                 va += (32 * 32);
  200         }
  201 
  202         SYNC;
  203 }
  204 
  205 void
  206 mipsNN_icache_sync_range_16(vm_offset_t va, vm_size_t size)
  207 {
  208         vm_offset_t eva;
  209 
  210         eva = round_line16(va + size);
  211         va = trunc_line16(va);
  212 
  213         mips_intern_dcache_wb_range(va, (eva - va));
  214 
  215         while ((eva - va) >= (32 * 16)) {
  216                 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
  217                 va += (32 * 16);
  218         }
  219 
  220         while (va < eva) {
  221                 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
  222                 va += 16;
  223         }
  224 
  225         SYNC;
  226 }
  227 
  228 void
  229 mipsNN_icache_sync_range_32(vm_offset_t va, vm_size_t size)
  230 {
  231         vm_offset_t eva;
  232 
  233         eva = round_line32(va + size);
  234         va = trunc_line32(va);
  235 
  236         mips_intern_dcache_wb_range(va, (eva - va));
  237 
  238         while ((eva - va) >= (32 * 32)) {
  239                 cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
  240                 va += (32 * 32);
  241         }
  242 
  243         while (va < eva) {
  244                 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
  245                 va += 32;
  246         }
  247 
  248         SYNC;
  249 }
  250 
  251 void
  252 mipsNN_icache_sync_range_index_16(vm_offset_t va, vm_size_t size)
  253 {
  254         vm_offset_t eva, tmpva;
  255         int i, stride, loopcount;
  256 
  257         /*
  258          * Since we're doing Index ops, we expect to not be able
  259          * to access the address we've been given.  So, get the
  260          * bits that determine the cache index, and make a KSEG0
  261          * address out of them.
  262          */
  263         va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
  264 
  265         eva = round_line16(va + size);
  266         va = trunc_line16(va);
  267 
  268         /*
  269          * GCC generates better code in the loops if we reference local
  270          * copies of these global variables.
  271          */
  272         stride = picache_stride;
  273         loopcount = picache_loopcount;
  274 
  275         mips_intern_dcache_wbinv_range_index(va, (eva - va));
  276 
  277         while ((eva - va) >= (8 * 16)) {
  278                 tmpva = va;
  279                 for (i = 0; i < loopcount; i++, tmpva += stride)
  280                         cache_r4k_op_8lines_16(tmpva,
  281                             CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  282                 va += 8 * 16;
  283         }
  284 
  285         while (va < eva) {
  286                 tmpva = va;
  287                 for (i = 0; i < loopcount; i++, tmpva += stride)
  288                         cache_op_r4k_line(tmpva,
  289                             CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  290                 va += 16;
  291         }
  292 }
  293 
  294 void
  295 mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size)
  296 {
  297         vm_offset_t eva, tmpva;
  298         int i, stride, loopcount;
  299 
  300         /*
  301          * Since we're doing Index ops, we expect to not be able
  302          * to access the address we've been given.  So, get the
  303          * bits that determine the cache index, and make a KSEG0
  304          * address out of them.
  305          */
  306         va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);
  307 
  308         eva = round_line32(va + size);
  309         va = trunc_line32(va);
  310 
  311         /*
  312          * GCC generates better code in the loops if we reference local
  313          * copies of these global variables.
  314          */
  315         stride = picache_stride;
  316         loopcount = picache_loopcount;
  317 
  318         mips_intern_dcache_wbinv_range_index(va, (eva - va));
  319 
  320         while ((eva - va) >= (8 * 32)) {
  321                 tmpva = va;
  322                 for (i = 0; i < loopcount; i++, tmpva += stride)
  323                         cache_r4k_op_8lines_32(tmpva,
  324                             CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  325                 va += 8 * 32;
  326         }
  327 
  328         while (va < eva) {
  329                 tmpva = va;
  330                 for (i = 0; i < loopcount; i++, tmpva += stride)
  331                         cache_op_r4k_line(tmpva,
  332                             CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
  333                 va += 32;
  334         }
  335 }
  336 
  337 void
  338 mipsNN_pdcache_wbinv_all_16(void)
  339 {
  340         vm_offset_t va, eva;
  341 
  342         va = MIPS_PHYS_TO_KSEG0(0);
  343         eva = va + pdcache_size;
  344 
  345         /*
  346          * Since we're hitting the whole thing, we don't have to
  347          * worry about the N different "ways".
  348          */
  349 
  350         while (va < eva) {
  351                 cache_r4k_op_32lines_16(va,
  352                     CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  353                 va += (32 * 16);
  354         }
  355 
  356         SYNC;
  357 }
  358 
  359 void
  360 mipsNN_pdcache_wbinv_all_32(void)
  361 {
  362         vm_offset_t va, eva;
  363 
  364         va = MIPS_PHYS_TO_KSEG0(0);
  365         eva = va + pdcache_size;
  366 
  367         /*
  368          * Since we're hitting the whole thing, we don't have to
  369          * worry about the N different "ways".
  370          */
  371 
  372         while (va < eva) {
  373                 cache_r4k_op_32lines_32(va,
  374                     CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  375                 va += (32 * 32);
  376         }
  377 
  378         SYNC;
  379 }
  380 
  381 void
  382 mipsNN_pdcache_wbinv_range_16(vm_offset_t va, vm_size_t size)
  383 {
  384         vm_offset_t eva;
  385 
  386         eva = round_line16(va + size);
  387         va = trunc_line16(va);
  388 
  389         while ((eva - va) >= (32 * 16)) {
  390                 cache_r4k_op_32lines_16(va,
  391                     CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
  392                 va += (32 * 16);
  393         }
  394 
  395         while (va < eva) {
  396                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
  397                 va += 16;
  398         }
  399 
  400         SYNC;
  401 }
  402 
  403 void
  404 mipsNN_pdcache_wbinv_range_32(vm_offset_t va, vm_size_t size)
  405 {
  406         vm_offset_t eva;
  407 
  408         eva = round_line32(va + size);
  409         va = trunc_line32(va);
  410 
  411         while ((eva - va) >= (32 * 32)) {
  412                 cache_r4k_op_32lines_32(va,
  413                     CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
  414                 va += (32 * 32);
  415         }
  416 
  417         while (va < eva) {
  418                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
  419                 va += 32;
  420         }
  421 
  422         SYNC;
  423 }
  424 
  425 void
  426 mipsNN_pdcache_wbinv_range_index_16(vm_offset_t va, vm_size_t size)
  427 {
  428         vm_offset_t eva, tmpva;
  429         int i, stride, loopcount;
  430 
  431         /*
  432          * Since we're doing Index ops, we expect to not be able
  433          * to access the address we've been given.  So, get the
  434          * bits that determine the cache index, and make a KSEG0
  435          * address out of them.
  436          */
  437         va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
  438 
  439         eva = round_line16(va + size);
  440         va = trunc_line16(va);
  441 
  442         /*
  443          * GCC generates better code in the loops if we reference local
  444          * copies of these global variables.
  445          */
  446         stride = pdcache_stride;
  447         loopcount = pdcache_loopcount;
  448 
  449         while ((eva - va) >= (8 * 16)) {
  450                 tmpva = va;
  451                 for (i = 0; i < loopcount; i++, tmpva += stride)
  452                         cache_r4k_op_8lines_16(tmpva,
  453                             CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  454                 va += 8 * 16;
  455         }
  456 
  457         while (va < eva) {
  458                 tmpva = va;
  459                 for (i = 0; i < loopcount; i++, tmpva += stride)
  460                         cache_op_r4k_line(tmpva,
  461                             CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  462                 va += 16;
  463         }
  464 }
  465 
  466 void
  467 mipsNN_pdcache_wbinv_range_index_32(vm_offset_t va, vm_size_t size)
  468 {
  469         vm_offset_t eva, tmpva;
  470         int i, stride, loopcount;
  471 
  472         /*
  473          * Since we're doing Index ops, we expect to not be able
  474          * to access the address we've been given.  So, get the
  475          * bits that determine the cache index, and make a KSEG0
  476          * address out of them.
  477          */
  478         va = MIPS_PHYS_TO_KSEG0(va & pdcache_way_mask);
  479 
  480         eva = round_line32(va + size);
  481         va = trunc_line32(va);
  482 
  483         /*
  484          * GCC generates better code in the loops if we reference local
  485          * copies of these global variables.
  486          */
  487         stride = pdcache_stride;
  488         loopcount = pdcache_loopcount;
  489 
  490         while ((eva - va) >= (8 * 32)) {
  491                 tmpva = va;
  492                 for (i = 0; i < loopcount; i++, tmpva += stride)
  493                         cache_r4k_op_8lines_32(tmpva,
  494                             CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  495                 va += 8 * 32;
  496         }
  497 
  498         while (va < eva) {
  499                 tmpva = va;
  500                 for (i = 0; i < loopcount; i++, tmpva += stride)
  501                         cache_op_r4k_line(tmpva,
  502                             CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
  503                 va += 32;
  504         }
  505 }
  506  
  507 void
  508 mipsNN_pdcache_inv_range_16(vm_offset_t va, vm_size_t size)
  509 {
  510         vm_offset_t eva;
  511 
  512         eva = round_line16(va + size);
  513         va = trunc_line16(va);
  514 
  515         while ((eva - va) >= (32 * 16)) {
  516                 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
  517                 va += (32 * 16);
  518         }
  519 
  520         while (va < eva) {
  521                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
  522                 va += 16;
  523         }
  524 
  525         SYNC;
  526 }
  527 
  528 void
  529 mipsNN_pdcache_inv_range_32(vm_offset_t va, vm_size_t size)
  530 {
  531         vm_offset_t eva;
  532 
  533         eva = round_line32(va + size);
  534         va = trunc_line32(va);
  535 
  536         while ((eva - va) >= (32 * 32)) {
  537                 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
  538                 va += (32 * 32);
  539         }
  540 
  541         while (va < eva) {
  542                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
  543                 va += 32;
  544         }
  545 
  546         SYNC;
  547 }
  548 
  549 void
  550 mipsNN_pdcache_wb_range_16(vm_offset_t va, vm_size_t size)
  551 {
  552         vm_offset_t eva;
  553 
  554         eva = round_line16(va + size);
  555         va = trunc_line16(va);
  556 
  557         while ((eva - va) >= (32 * 16)) {
  558                 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
  559                 va += (32 * 16);
  560         }
  561 
  562         while (va < eva) {
  563                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
  564                 va += 16;
  565         }
  566 
  567         SYNC;
  568 }
  569 
  570 void
  571 mipsNN_pdcache_wb_range_32(vm_offset_t va, vm_size_t size)
  572 {
  573         vm_offset_t eva;
  574 
  575         eva = round_line32(va + size);
  576         va = trunc_line32(va);
  577 
  578         while ((eva - va) >= (32 * 32)) {
  579                 cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
  580                 va += (32 * 32);
  581         }
  582 
  583         while (va < eva) {
  584                 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
  585                 va += 32;
  586         }
  587 
  588         SYNC;
  589 }
  590 
  591 
  592 #ifdef CPU_CNMIPS
  593 
  594 void
  595 mipsNN_icache_sync_all_128(void)
  596 {
  597         SYNCI
  598 }
  599 
  600 void
  601 mipsNN_icache_sync_range_128(vm_offset_t va, vm_size_t size)
  602 {
  603         SYNC;
  604 }
  605 
  606 void
  607 mipsNN_icache_sync_range_index_128(vm_offset_t va, vm_size_t size)
  608 {
  609 }
  610 
  611 
  612 void
  613 mipsNN_pdcache_wbinv_all_128(void)
  614 {
  615 }
  616 
  617 
  618 void
  619 mipsNN_pdcache_wbinv_range_128(vm_offset_t va, vm_size_t size)
  620 {
  621         SYNC;
  622 }
  623 
  624 void
  625 mipsNN_pdcache_wbinv_range_index_128(vm_offset_t va, vm_size_t size)
  626 {
  627 }
  628 
  629 void
  630 mipsNN_pdcache_inv_range_128(vm_offset_t va, vm_size_t size)
  631 {
  632 }
  633 
  634 void
  635 mipsNN_pdcache_wb_range_128(vm_offset_t va, vm_size_t size)
  636 {
  637         SYNC;
  638 }
  639 
  640 #endif

Cache object: 2aa33c8b398c36ffe812a19a0f38beab


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.