The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/include/asm-mips64/r4kcache.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * This file is subject to the terms and conditions of the GNU General Public
    3  * License.  See the file "COPYING" in the main directory of this archive
    4  * for more details.
    5  *
    6  * Inline assembly cache operations.
    7  *
    8  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
    9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
   10  */
   11 #ifndef __ASM_R4KCACHE_H
   12 #define __ASM_R4KCACHE_H
   13 
   14 #include <asm/asm.h>
   15 #include <asm/cacheops.h>
   16 
   17 #define cache_op(op,addr)                                               \
   18         __asm__ __volatile__(                                           \
   19         "       .set    noreorder               \n"                     \
   20         "       .set    mips3\n\t               \n"                     \
   21         "       cache   %0, %1                  \n"                     \
   22         "       .set    mips0                   \n"                     \
   23         "       .set    reorder"                                        \
   24         :                                                               \
   25         : "i" (op), "m" (*(unsigned char *)(addr)))
   26 
   27 static inline void flush_icache_line_indexed(unsigned long addr)
   28 {
   29         cache_op(Index_Invalidate_I, addr);
   30 }
   31 
   32 static inline void flush_dcache_line_indexed(unsigned long addr)
   33 {
   34         cache_op(Index_Writeback_Inv_D, addr);
   35 }
   36 
   37 static inline void flush_scache_line_indexed(unsigned long addr)
   38 {
   39         cache_op(Index_Writeback_Inv_SD, addr);
   40 }
   41 
   42 static inline void flush_icache_line(unsigned long addr)
   43 {
   44         cache_op(Hit_Invalidate_I, addr);
   45 }
   46 
   47 static inline void flush_dcache_line(unsigned long addr)
   48 {
   49         cache_op(Hit_Writeback_Inv_D, addr);
   50 }
   51 
   52 static inline void invalidate_dcache_line(unsigned long addr)
   53 {
   54         cache_op(Hit_Invalidate_D, addr);
   55 }
   56 
   57 static inline void invalidate_scache_line(unsigned long addr)
   58 {
   59         cache_op(Hit_Invalidate_SD, addr);
   60 }
   61 
   62 static inline void flush_scache_line(unsigned long addr)
   63 {
   64         cache_op(Hit_Writeback_Inv_SD, addr);
   65 }
   66 
   67 /*
   68  * The next two are for badland addresses like signal trampolines.
   69  */
   70 static inline void protected_flush_icache_line(unsigned long addr)
   71 {
   72         __asm__ __volatile__(
   73                 ".set noreorder\n\t"
   74                 ".set mips3\n"
   75                 "1:\tcache %0,(%1)\n"
   76                 "2:\t.set mips0\n\t"
   77                 ".set reorder\n\t"
   78                 ".section\t__ex_table,\"a\"\n\t"
   79                 STR(PTR)"\t1b,2b\n\t"
   80                 ".previous"
   81                 :
   82                 : "i" (Hit_Invalidate_I), "r" (addr));
   83 }
   84 
   85 /*
   86  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
   87  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
   88  * caches.  We're talking about one cacheline unnecessarily getting invalidated
   89  * here so the penaltiy isn't overly hard.
   90  */
   91 static inline void protected_writeback_dcache_line(unsigned long addr)
   92 {
   93         __asm__ __volatile__(
   94                 ".set noreorder\n\t"
   95                 ".set mips3\n"
   96                 "1:\tcache %0,(%1)\n"
   97                 "2:\t.set mips0\n\t"
   98                 ".set reorder\n\t"
   99                 ".section\t__ex_table,\"a\"\n\t"
  100                 STR(PTR)"\t1b,2b\n\t"
  101                 ".previous"
  102                 :
  103                 : "i" (Hit_Writeback_Inv_D), "r" (addr));
  104 }
  105 
  106 /*
  107  * This one is RM7000-specific
  108  */
  109 static inline void invalidate_tcache_page(unsigned long addr)
  110 {
  111         cache_op(Page_Invalidate_T, addr);
  112 }
  113 
  114 #define cache16_unroll32(base,op)                               \
  115         __asm__ __volatile__("                                  \
  116                 .set noreorder;                                 \
  117                 .set mips3;                                     \
  118                 cache %1, 0x000(%0); cache %1, 0x010(%0);       \
  119                 cache %1, 0x020(%0); cache %1, 0x030(%0);       \
  120                 cache %1, 0x040(%0); cache %1, 0x050(%0);       \
  121                 cache %1, 0x060(%0); cache %1, 0x070(%0);       \
  122                 cache %1, 0x080(%0); cache %1, 0x090(%0);       \
  123                 cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);       \
  124                 cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);       \
  125                 cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);       \
  126                 cache %1, 0x100(%0); cache %1, 0x110(%0);       \
  127                 cache %1, 0x120(%0); cache %1, 0x130(%0);       \
  128                 cache %1, 0x140(%0); cache %1, 0x150(%0);       \
  129                 cache %1, 0x160(%0); cache %1, 0x170(%0);       \
  130                 cache %1, 0x180(%0); cache %1, 0x190(%0);       \
  131                 cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);       \
  132                 cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);       \
  133                 cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);       \
  134                 .set mips0;                                     \
  135                 .set reorder"                                   \
  136                 :                                               \
  137                 : "r" (base),                                   \
  138                   "i" (op));
  139 
  140 static inline void blast_dcache16(void)
  141 {
  142         unsigned long start = KSEG0;
  143         unsigned long end = start + current_cpu_data.dcache.waysize;
  144         unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  145         unsigned long ws_end = current_cpu_data.dcache.ways << 
  146                                current_cpu_data.dcache.waybit;
  147         unsigned long ws, addr;
  148 
  149         for (ws = 0; ws < ws_end; ws += ws_inc) 
  150                 for (addr = start; addr < end; addr += 0x200)
  151                         cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
  152 }
  153 
  154 static inline void blast_dcache16_page(unsigned long page)
  155 {
  156         unsigned long start = page;
  157         unsigned long end = start + PAGE_SIZE;
  158 
  159         while (start < end) {
  160                 cache16_unroll32(start,Hit_Writeback_Inv_D);
  161                 start += 0x200;
  162         }
  163 }
  164 
  165 static inline void blast_dcache16_page_indexed(unsigned long page)
  166 {
  167         unsigned long start = page;
  168         unsigned long end = start + PAGE_SIZE;
  169         unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  170         unsigned long ws_end = current_cpu_data.dcache.ways <<
  171                                current_cpu_data.dcache.waybit;
  172         unsigned long ws, addr;
  173 
  174         for (ws = 0; ws < ws_end; ws += ws_inc) 
  175                 for (addr = start; addr < end; addr += 0x200) 
  176                         cache16_unroll32(addr|ws,Index_Writeback_Inv_D);
  177 }
  178 
  179 static inline void blast_icache16(void)
  180 {
  181         unsigned long start = KSEG0;
  182         unsigned long end = start + current_cpu_data.icache.waysize;
  183         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  184         unsigned long ws_end = current_cpu_data.icache.ways <<
  185                                current_cpu_data.icache.waybit;
  186         unsigned long ws, addr;
  187 
  188         for (ws = 0; ws < ws_end; ws += ws_inc) 
  189                 for (addr = start; addr < end; addr += 0x200) 
  190                         cache16_unroll32(addr|ws,Index_Invalidate_I);
  191 }
  192 
  193 static inline void blast_icache16_page(unsigned long page)
  194 {
  195         unsigned long start = page;
  196         unsigned long end = start + PAGE_SIZE;
  197 
  198         while (start < end) {
  199                 cache16_unroll32(start,Hit_Invalidate_I);
  200                 start += 0x200;
  201         }
  202 }
  203 
  204 static inline void blast_icache16_page_indexed(unsigned long page)
  205 {
  206         unsigned long start = page;
  207         unsigned long end = start + PAGE_SIZE;
  208         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  209         unsigned long ws_end = current_cpu_data.icache.ways <<
  210                                current_cpu_data.icache.waybit;
  211         unsigned long ws, addr;
  212 
  213         for (ws = 0; ws < ws_end; ws += ws_inc) 
  214                 for (addr = start; addr < end; addr += 0x200) 
  215                         cache16_unroll32(addr|ws,Index_Invalidate_I);
  216 }
  217 
  218 static inline void blast_scache16(void)
  219 {
  220         unsigned long start = KSEG0;
  221         unsigned long end = start + current_cpu_data.scache.waysize;
  222         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  223         unsigned long ws_end = current_cpu_data.scache.ways << 
  224                                current_cpu_data.scache.waybit;
  225         unsigned long ws, addr;
  226 
  227         for (ws = 0; ws < ws_end; ws += ws_inc) 
  228                 for (addr = start; addr < end; addr += 0x200)
  229                         cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
  230 }
  231 
  232 static inline void blast_scache16_page(unsigned long page)
  233 {
  234         unsigned long start = page;
  235         unsigned long end = page + PAGE_SIZE;
  236 
  237         while (start < end) {
  238                 cache16_unroll32(start,Hit_Writeback_Inv_SD);
  239                 start += 0x200;
  240         }
  241 }
  242 
  243 static inline void blast_scache16_page_indexed(unsigned long page)
  244 {
  245         unsigned long start = page;
  246         unsigned long end = start + PAGE_SIZE;
  247         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  248         unsigned long ws_end = current_cpu_data.scache.ways <<
  249                                current_cpu_data.scache.waybit;
  250         unsigned long ws, addr;
  251 
  252         for (ws = 0; ws < ws_end; ws += ws_inc) 
  253                 for (addr = start; addr < end; addr += 0x200) 
  254                         cache16_unroll32(addr|ws,Index_Writeback_Inv_SD);
  255 }
  256 
  257 #define cache32_unroll32(base,op)                               \
  258         __asm__ __volatile__("                                  \
  259                 .set noreorder;                                 \
  260                 .set mips3;                                     \
  261                 cache %1, 0x000(%0); cache %1, 0x020(%0);       \
  262                 cache %1, 0x040(%0); cache %1, 0x060(%0);       \
  263                 cache %1, 0x080(%0); cache %1, 0x0a0(%0);       \
  264                 cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);       \
  265                 cache %1, 0x100(%0); cache %1, 0x120(%0);       \
  266                 cache %1, 0x140(%0); cache %1, 0x160(%0);       \
  267                 cache %1, 0x180(%0); cache %1, 0x1a0(%0);       \
  268                 cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);       \
  269                 cache %1, 0x200(%0); cache %1, 0x220(%0);       \
  270                 cache %1, 0x240(%0); cache %1, 0x260(%0);       \
  271                 cache %1, 0x280(%0); cache %1, 0x2a0(%0);       \
  272                 cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);       \
  273                 cache %1, 0x300(%0); cache %1, 0x320(%0);       \
  274                 cache %1, 0x340(%0); cache %1, 0x360(%0);       \
  275                 cache %1, 0x380(%0); cache %1, 0x3a0(%0);       \
  276                 cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);       \
  277                 .set mips0;                                     \
  278                 .set reorder"                                   \
  279                 :                                               \
  280                 : "r" (base),                                   \
  281                   "i" (op));
  282 
  283 static inline void blast_dcache32(void)
  284 {
  285         unsigned long start = KSEG0;
  286         unsigned long end = start + current_cpu_data.dcache.waysize;
  287         unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  288         unsigned long ws_end = current_cpu_data.dcache.ways <<
  289                                current_cpu_data.dcache.waybit;
  290         unsigned long ws, addr;
  291 
  292         for (ws = 0; ws < ws_end; ws += ws_inc) 
  293                 for (addr = start; addr < end; addr += 0x400) 
  294                         cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
  295 }
  296 
  297 static inline void blast_dcache32_page(unsigned long page)
  298 {
  299         unsigned long start = page;
  300         unsigned long end = start + PAGE_SIZE;
  301 
  302         while (start < end) {
  303                 cache32_unroll32(start,Hit_Writeback_Inv_D);
  304                 start += 0x400;
  305         }
  306 }
  307 
  308 static inline void blast_dcache32_page_indexed(unsigned long page)
  309 {
  310         unsigned long start = page;
  311         unsigned long end = start + PAGE_SIZE;
  312         unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  313         unsigned long ws_end = current_cpu_data.dcache.ways <<
  314                                current_cpu_data.dcache.waybit;
  315         unsigned long ws, addr;
  316 
  317         for (ws = 0; ws < ws_end; ws += ws_inc) 
  318                 for (addr = start; addr < end; addr += 0x400) 
  319                         cache32_unroll32(addr|ws,Index_Writeback_Inv_D);
  320 }
  321 
  322 static inline void blast_icache32(void)
  323 {
  324         unsigned long start = KSEG0;
  325         unsigned long end = start + current_cpu_data.icache.waysize;
  326         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  327         unsigned long ws_end = current_cpu_data.icache.ways <<
  328                                current_cpu_data.icache.waybit;
  329         unsigned long ws, addr;
  330 
  331         for (ws = 0; ws < ws_end; ws += ws_inc) 
  332                 for (addr = start; addr < end; addr += 0x400) 
  333                         cache32_unroll32(addr|ws,Index_Invalidate_I);
  334 }
  335 
  336 static inline void blast_icache32_page(unsigned long page)
  337 {
  338         unsigned long start = page;
  339         unsigned long end = start + PAGE_SIZE;
  340 
  341         while (start < end) {
  342                 cache32_unroll32(start,Hit_Invalidate_I);
  343                 start += 0x400;
  344         }
  345 }
  346 
  347 static inline void blast_icache32_page_indexed(unsigned long page)
  348 {
  349         unsigned long start = page;
  350         unsigned long end = start + PAGE_SIZE;
  351         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  352         unsigned long ws_end = current_cpu_data.icache.ways <<
  353                                current_cpu_data.icache.waybit;
  354         unsigned long ws, addr;
  355 
  356         for (ws = 0; ws < ws_end; ws += ws_inc)
  357                 for (addr = start; addr < end; addr += 0x400) 
  358                         cache32_unroll32(addr|ws,Index_Invalidate_I);
  359 }
  360 
  361 static inline void blast_scache32(void)
  362 {
  363         unsigned long start = KSEG0;
  364         unsigned long end = start + current_cpu_data.scache.waysize;
  365         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  366         unsigned long ws_end = current_cpu_data.scache.ways << 
  367                                current_cpu_data.scache.waybit;
  368         unsigned long ws, addr;
  369 
  370         for (ws = 0; ws < ws_end; ws += ws_inc) 
  371                 for (addr = start; addr < end; addr += 0x400)
  372                         cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
  373 }
  374 
  375 static inline void blast_scache32_page(unsigned long page)
  376 {
  377         unsigned long start = page;
  378         unsigned long end = page + PAGE_SIZE;
  379 
  380         while (start < end) {
  381                 cache32_unroll32(start,Hit_Writeback_Inv_SD);
  382                 start += 0x400;
  383         }
  384 }
  385 
  386 static inline void blast_scache32_page_indexed(unsigned long page)
  387 {
  388         unsigned long start = page;
  389         unsigned long end = start + PAGE_SIZE;
  390         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  391         unsigned long ws_end = current_cpu_data.scache.ways <<
  392                                current_cpu_data.scache.waybit;
  393         unsigned long ws, addr;
  394 
  395         for (ws = 0; ws < ws_end; ws += ws_inc) 
  396                 for (addr = start; addr < end; addr += 0x400) 
  397                         cache32_unroll32(addr|ws,Index_Writeback_Inv_SD);
  398 }
  399 
  400 #define cache64_unroll32(base,op)                               \
  401         __asm__ __volatile__("                                  \
  402                 .set noreorder;                                 \
  403                 .set mips3;                                     \
  404                 cache %1, 0x000(%0); cache %1, 0x040(%0);       \
  405                 cache %1, 0x080(%0); cache %1, 0x0c0(%0);       \
  406                 cache %1, 0x100(%0); cache %1, 0x140(%0);       \
  407                 cache %1, 0x180(%0); cache %1, 0x1c0(%0);       \
  408                 cache %1, 0x200(%0); cache %1, 0x240(%0);       \
  409                 cache %1, 0x280(%0); cache %1, 0x2c0(%0);       \
  410                 cache %1, 0x300(%0); cache %1, 0x340(%0);       \
  411                 cache %1, 0x380(%0); cache %1, 0x3c0(%0);       \
  412                 cache %1, 0x400(%0); cache %1, 0x440(%0);       \
  413                 cache %1, 0x480(%0); cache %1, 0x4c0(%0);       \
  414                 cache %1, 0x500(%0); cache %1, 0x540(%0);       \
  415                 cache %1, 0x580(%0); cache %1, 0x5c0(%0);       \
  416                 cache %1, 0x600(%0); cache %1, 0x640(%0);       \
  417                 cache %1, 0x680(%0); cache %1, 0x6c0(%0);       \
  418                 cache %1, 0x700(%0); cache %1, 0x740(%0);       \
  419                 cache %1, 0x780(%0); cache %1, 0x7c0(%0);       \
  420                 .set mips0;                                     \
  421                 .set reorder"                                   \
  422                 :                                               \
  423                 : "r" (base),                                   \
  424                   "i" (op));
  425 
  426 static inline void blast_icache64(void)
  427 {
  428         unsigned long start = KSEG0;
  429         unsigned long end = start + current_cpu_data.icache.waysize;
  430         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  431         unsigned long ws_end = current_cpu_data.icache.ways <<
  432                                current_cpu_data.icache.waybit;
  433         unsigned long ws, addr;
  434 
  435         for (ws = 0; ws < ws_end; ws += ws_inc) 
  436                 for (addr = start; addr < end; addr += 0x800) 
  437                         cache64_unroll32(addr|ws,Index_Invalidate_I);
  438 }
  439 
  440 static inline void blast_icache64_page(unsigned long page)
  441 {
  442         unsigned long start = page;
  443         unsigned long end = start + PAGE_SIZE;
  444 
  445         while (start < end) {
  446                 cache64_unroll32(start,Hit_Invalidate_I);
  447                 start += 0x800;
  448         }
  449 }
  450 
  451 static inline void blast_icache64_page_indexed(unsigned long page)
  452 {
  453         unsigned long start = page;
  454         unsigned long end = start + PAGE_SIZE;
  455         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
  456         unsigned long ws_end = current_cpu_data.icache.ways <<
  457                                current_cpu_data.icache.waybit;
  458         unsigned long ws, addr;
  459 
  460         for (ws = 0; ws < ws_end; ws += ws_inc)
  461                 for (addr = start; addr < end; addr += 0x800) 
  462                         cache64_unroll32(addr|ws,Index_Invalidate_I);
  463 }
  464 
  465 static inline void blast_scache64(void)
  466 {
  467         unsigned long start = KSEG0;
  468         unsigned long end = start + current_cpu_data.scache.waysize;
  469         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  470         unsigned long ws_end = current_cpu_data.scache.ways << 
  471                                current_cpu_data.scache.waybit;
  472         unsigned long ws, addr;
  473 
  474         for (ws = 0; ws < ws_end; ws += ws_inc) 
  475                 for (addr = start; addr < end; addr += 0x800)
  476                         cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
  477 }
  478 
  479 static inline void blast_scache64_page(unsigned long page)
  480 {
  481         unsigned long start = page;
  482         unsigned long end = page + PAGE_SIZE;
  483 
  484         while (start < end) {
  485                 cache64_unroll32(start,Hit_Writeback_Inv_SD);
  486                 start += 0x800;
  487         }
  488 }
  489 
  490 static inline void blast_scache64_page_indexed(unsigned long page)
  491 {
  492         unsigned long start = page;
  493         unsigned long end = start + PAGE_SIZE;
  494         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  495         unsigned long ws_end = current_cpu_data.scache.ways <<
  496                                current_cpu_data.scache.waybit;
  497         unsigned long ws, addr;
  498 
  499         for (ws = 0; ws < ws_end; ws += ws_inc) 
  500                 for (addr = start; addr < end; addr += 0x800) 
  501                         cache64_unroll32(addr|ws,Index_Writeback_Inv_SD);
  502 }
  503 
  504 #define cache128_unroll32(base,op)                              \
  505         __asm__ __volatile__("                                  \
  506                 .set noreorder;                                 \
  507                 .set mips3;                                     \
  508                 cache %1, 0x000(%0); cache %1, 0x080(%0);       \
  509                 cache %1, 0x100(%0); cache %1, 0x180(%0);       \
  510                 cache %1, 0x200(%0); cache %1, 0x280(%0);       \
  511                 cache %1, 0x300(%0); cache %1, 0x380(%0);       \
  512                 cache %1, 0x400(%0); cache %1, 0x480(%0);       \
  513                 cache %1, 0x500(%0); cache %1, 0x580(%0);       \
  514                 cache %1, 0x600(%0); cache %1, 0x680(%0);       \
  515                 cache %1, 0x700(%0); cache %1, 0x780(%0);       \
  516                 cache %1, 0x800(%0); cache %1, 0x880(%0);       \
  517                 cache %1, 0x900(%0); cache %1, 0x980(%0);       \
  518                 cache %1, 0xa00(%0); cache %1, 0xa80(%0);       \
  519                 cache %1, 0xb00(%0); cache %1, 0xb80(%0);       \
  520                 cache %1, 0xc00(%0); cache %1, 0xc80(%0);       \
  521                 cache %1, 0xd00(%0); cache %1, 0xd80(%0);       \
  522                 cache %1, 0xe00(%0); cache %1, 0xe80(%0);       \
  523                 cache %1, 0xf00(%0); cache %1, 0xf80(%0);       \
  524                 .set mips0;                                     \
  525                 .set reorder"                                   \
  526                 :                                               \
  527                 : "r" (base),                                   \
  528                   "i" (op));
  529 
  530 static inline void blast_scache128(void)
  531 {
  532         unsigned long start = KSEG0;
  533         unsigned long end = start + current_cpu_data.scache.waysize;
  534         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  535         unsigned long ws_end = current_cpu_data.scache.ways << 
  536                                current_cpu_data.scache.waybit;
  537         unsigned long ws, addr;
  538 
  539         for (ws = 0; ws < ws_end; ws += ws_inc) 
  540                 for (addr = start; addr < end; addr += 0x1000)
  541                         cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
  542 }
  543 
  544 static inline void blast_scache128_page(unsigned long page)
  545 {
  546         unsigned long start = page;
  547         unsigned long end = page + PAGE_SIZE;
  548 
  549         while (start < end) {
  550                 cache128_unroll32(start,Hit_Writeback_Inv_SD);
  551                 start += 0x1000;
  552         }
  553 }
  554 
  555 static inline void blast_scache128_page_indexed(unsigned long page)
  556 {
  557         unsigned long start = page;
  558         unsigned long end = start + PAGE_SIZE;
  559         unsigned long ws_inc = 1UL << current_cpu_data.scache.waybit;
  560         unsigned long ws_end = current_cpu_data.scache.ways <<
  561                                current_cpu_data.scache.waybit;
  562         unsigned long ws, addr;
  563 
  564         for (ws = 0; ws < ws_end; ws += ws_inc) 
  565                 for (addr = start; addr < end; addr += 0x1000) 
  566                         cache128_unroll32(addr|ws,Index_Writeback_Inv_SD);
  567 }
  568 
  569 #endif /* __ASM_R4KCACHE_H */

Cache object: 30acfaa4b8d5ae6c5d5bc60ac97c5910


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.