1 /*-
2 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
3 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 */
29 #ifndef MACHINE_CPU_V6_H
30 #define MACHINE_CPU_V6_H
31
32 /* There are no user serviceable parts here, they may change without notice */
33 #ifndef _KERNEL
34 #error Only include this file in the kernel
35 #endif
36
37 #include <machine/atomic.h>
38 #include <machine/cpufunc.h>
39 #include <machine/cpuinfo.h>
40 #include <machine/sysreg.h>
41
42 #if __ARM_ARCH < 6
43 #error Only include this file for ARMv6
44 #else
45
46 #define CPU_ASID_KERNEL 0
47
48 void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */
49 vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
50 vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
51
52 #ifdef DEV_PMU
53 #include <sys/pcpu.h>
54 #define PMU_OVSR_C 0x80000000 /* Cycle Counter */
55 extern uint32_t ccnt_hi[MAXCPU];
56 extern int pmu_attched;
57 #endif /* DEV_PMU */
58
59 #define sev() __asm __volatile("sev" : : : "memory")
60 #define wfe() __asm __volatile("wfe" : : : "memory")
61
62 /*
63 * Macros to generate CP15 (system control processor) read/write functions.
64 */
65 #define _FX(s...) #s
66
67 #define _RF0(fname, aname...) \
68 static __inline uint32_t \
69 fname(void) \
70 { \
71 uint32_t reg; \
72 __asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
73 return(reg); \
74 }
75
76 #define _R64F0(fname, aname) \
77 static __inline uint64_t \
78 fname(void) \
79 { \
80 uint64_t reg; \
81 __asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
82 return(reg); \
83 }
84
85 #define _WF0(fname, aname...) \
86 static __inline void \
87 fname(void) \
88 { \
89 __asm __volatile("mcr\t" _FX(aname)); \
90 }
91
92 #define _WF1(fname, aname...) \
93 static __inline void \
94 fname(uint32_t reg) \
95 { \
96 __asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
97 }
98
99 #define _W64F1(fname, aname...) \
100 static __inline void \
101 fname(uint64_t reg) \
102 { \
103 __asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \
104 }
105
106 /*
107 * Raw CP15 maintenance operations
108 * !!! not for external use !!!
109 */
110
111 /* TLB */
112
113 _WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
114 #if __ARM_ARCH >= 7 && defined SMP
115 _WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
116 #endif
117 _WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
118 #if __ARM_ARCH >= 7 && defined SMP
119 _WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
120 #endif
121 _WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
122 #if __ARM_ARCH >= 7 && defined SMP
123 _WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
124 #endif
125 _WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
126
127 _WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
128
129 /* Cache and Branch predictor */
130
131 _WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
132 #if __ARM_ARCH >= 7 && defined SMP
133 _WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
134 #endif
135 _WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
136 _WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
137 _WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
138 _WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
139 #if __ARM_ARCH >= 7
140 _WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
141 #endif
142 _WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
143 _WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
144 _WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
145 _WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
146 #if __ARM_ARCH >= 7 && defined SMP
147 _WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
148 #endif
149 _WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
150
151 /*
152 * Publicly accessible functions
153 */
154
155 /* CP14 Debug Registers */
156 _RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0))
157 _RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0))
158 _RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0))
159 _RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0))
160 _RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0))
161
162 _WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0))
163 _WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0))
164 _WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0))
165 _WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0))
166
167 /* Various control registers */
168
169 _RF0(cp15_cpacr_get, CP15_CPACR(%0))
170 _WF1(cp15_cpacr_set, CP15_CPACR(%0))
171 _RF0(cp15_dfsr_get, CP15_DFSR(%0))
172 _RF0(cp15_ifsr_get, CP15_IFSR(%0))
173 _WF1(cp15_prrr_set, CP15_PRRR(%0))
174 _WF1(cp15_nmrr_set, CP15_NMRR(%0))
175 _RF0(cp15_ttbr_get, CP15_TTBR0(%0))
176 _RF0(cp15_dfar_get, CP15_DFAR(%0))
177 #if __ARM_ARCH >= 7
178 _RF0(cp15_ifar_get, CP15_IFAR(%0))
179 _RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
180 #endif
181 _RF0(cp15_actlr_get, CP15_ACTLR(%0))
182 _WF1(cp15_actlr_set, CP15_ACTLR(%0))
183 _WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
184 _WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
185 _WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
186 _WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
187 _RF0(cp15_par_get, CP15_PAR(%0))
188 _RF0(cp15_sctlr_get, CP15_SCTLR(%0))
189
190 /*CPU id registers */
191 _RF0(cp15_midr_get, CP15_MIDR(%0))
192 _RF0(cp15_ctr_get, CP15_CTR(%0))
193 _RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
194 _RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
195 _RF0(cp15_mpidr_get, CP15_MPIDR(%0))
196 _RF0(cp15_revidr_get, CP15_REVIDR(%0))
197 _RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
198 _RF0(cp15_clidr_get, CP15_CLIDR(%0))
199 _RF0(cp15_aidr_get, CP15_AIDR(%0))
200 _WF1(cp15_csselr_set, CP15_CSSELR(%0))
201 _RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
202 _RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
203 _RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
204 _RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
205 _RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
206 _RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
207 _RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
208 _RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
209 _RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
210 _RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
211 _RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
212 _RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
213 _RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
214 _RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
215 _RF0(cp15_cbar_get, CP15_CBAR(%0))
216
217 /* Performance Monitor registers */
218
219 #if __ARM_ARCH == 6 && defined(CPU_ARM1176)
220 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
221 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
222 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
223 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
224 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
225 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
226 #elif __ARM_ARCH > 6
227 _RF0(cp15_pmcr_get, CP15_PMCR(%0))
228 _WF1(cp15_pmcr_set, CP15_PMCR(%0))
229 _RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
230 _WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
231 _WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
232 _RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
233 _WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
234 _WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
235 _RF0(cp15_pmselr_get, CP15_PMSELR(%0))
236 _WF1(cp15_pmselr_set, CP15_PMSELR(%0))
237 _RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
238 _WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
239 _RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
240 _WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
241 _RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
242 _WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
243 _RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
244 _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
245 _RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
246 _WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
247 _WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
248 #endif
249
250 _RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
251 _WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
252 _RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
253 _WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
254 _RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
255 _WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
256
257 /* Generic Timer registers - only use when you know the hardware is available */
258 _RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
259 _WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
260 _RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
261 _WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
262 _RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
263 _WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
264 _RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
265 _WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
266 _RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
267 _WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
268 _RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
269 _WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
270 _RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
271 _WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
272 _RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
273 _WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
274 _RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
275 _WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
276
277 _R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
278 _R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
279 _R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
280 _W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
281 _R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
282 _W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
283 _R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
284 _W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
285 _R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
286 _W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
287
288 #undef _FX
289 #undef _RF0
290 #undef _WF0
291 #undef _WF1
292
293 /*
294 * TLB maintenance operations.
295 */
296
297 /* Local (i.e. not broadcasting ) operations. */
298
299 /* Flush all TLB entries (even global). */
300 static __inline void
301 tlb_flush_all_local(void)
302 {
303
304 dsb();
305 _CP15_TLBIALL();
306 dsb();
307 }
308
309 /* Flush all not global TLB entries. */
310 static __inline void
311 tlb_flush_all_ng_local(void)
312 {
313
314 dsb();
315 _CP15_TLBIASID(CPU_ASID_KERNEL);
316 dsb();
317 }
318
319 /* Flush single TLB entry (even global). */
320 static __inline void
321 tlb_flush_local(vm_offset_t va)
322 {
323
324 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
325
326 dsb();
327 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
328 dsb();
329 }
330
331 /* Flush range of TLB entries (even global). */
332 static __inline void
333 tlb_flush_range_local(vm_offset_t va, vm_size_t size)
334 {
335 vm_offset_t eva = va + size;
336
337 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
338 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
339 size));
340
341 dsb();
342 for (; va < eva; va += PAGE_SIZE)
343 _CP15_TLBIMVA(va | CPU_ASID_KERNEL);
344 dsb();
345 }
346
347 /* Broadcasting operations. */
348 #if __ARM_ARCH >= 7 && defined SMP
349
350 static __inline void
351 tlb_flush_all(void)
352 {
353
354 dsb();
355 _CP15_TLBIALLIS();
356 dsb();
357 }
358
359 static __inline void
360 tlb_flush_all_ng(void)
361 {
362
363 dsb();
364 _CP15_TLBIASIDIS(CPU_ASID_KERNEL);
365 dsb();
366 }
367
368 static __inline void
369 tlb_flush(vm_offset_t va)
370 {
371
372 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
373
374 dsb();
375 _CP15_TLBIMVAAIS(va);
376 dsb();
377 }
378
379 static __inline void
380 tlb_flush_range(vm_offset_t va, vm_size_t size)
381 {
382 vm_offset_t eva = va + size;
383
384 KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
385 KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
386 size));
387
388 dsb();
389 for (; va < eva; va += PAGE_SIZE)
390 _CP15_TLBIMVAAIS(va);
391 dsb();
392 }
393 #else /* SMP */
394
395 #define tlb_flush_all() tlb_flush_all_local()
396 #define tlb_flush_all_ng() tlb_flush_all_ng_local()
397 #define tlb_flush(va) tlb_flush_local(va)
398 #define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
399
400 #endif /* SMP */
401
402 /*
403 * Cache maintenance operations.
404 */
405
406 /* Sync I and D caches to PoU */
407 static __inline void
408 icache_sync(vm_offset_t va, vm_size_t size)
409 {
410 vm_offset_t eva = va + size;
411
412 dsb();
413 va &= ~cpuinfo.dcache_line_mask;
414 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
415 #if __ARM_ARCH >= 7 && defined SMP
416 _CP15_DCCMVAU(va);
417 #else
418 _CP15_DCCMVAC(va);
419 #endif
420 }
421 dsb();
422 #if __ARM_ARCH >= 7 && defined SMP
423 _CP15_ICIALLUIS();
424 #else
425 _CP15_ICIALLU();
426 #endif
427 dsb();
428 isb();
429 }
430
431 /* Invalidate I cache */
432 static __inline void
433 icache_inv_all(void)
434 {
435 #if __ARM_ARCH >= 7 && defined SMP
436 _CP15_ICIALLUIS();
437 #else
438 _CP15_ICIALLU();
439 #endif
440 dsb();
441 isb();
442 }
443
444 /* Invalidate branch predictor buffer */
445 static __inline void
446 bpb_inv_all(void)
447 {
448 #if __ARM_ARCH >= 7 && defined SMP
449 _CP15_BPIALLIS();
450 #else
451 _CP15_BPIALL();
452 #endif
453 dsb();
454 isb();
455 }
456
457 /* Write back D-cache to PoU */
458 static __inline void
459 dcache_wb_pou(vm_offset_t va, vm_size_t size)
460 {
461 vm_offset_t eva = va + size;
462
463 dsb();
464 va &= ~cpuinfo.dcache_line_mask;
465 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
466 #if __ARM_ARCH >= 7 && defined SMP
467 _CP15_DCCMVAU(va);
468 #else
469 _CP15_DCCMVAC(va);
470 #endif
471 }
472 dsb();
473 }
474
475 /*
476 * Invalidate D-cache to PoC
477 *
478 * Caches are invalidated from outermost to innermost as fresh cachelines
479 * flow in this direction. In given range, if there was no dirty cacheline
480 * in any cache before, no stale cacheline should remain in them after this
481 * operation finishes.
482 */
483 static __inline void
484 dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
485 {
486 vm_offset_t eva = va + size;
487
488 dsb();
489 /* invalidate L2 first */
490 cpu_l2cache_inv_range(pa, size);
491
492 /* then L1 */
493 va &= ~cpuinfo.dcache_line_mask;
494 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
495 _CP15_DCIMVAC(va);
496 }
497 dsb();
498 }
499
500 /*
501 * Discard D-cache lines to PoC, prior to overwrite by DMA engine.
502 *
503 * Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
504 * flow into L1 while invalidating. This routine is intended to be used only
505 * when invalidating a buffer before a DMA operation loads new data into memory.
506 * The concern in this case is that dirty lines are not evicted to main memory,
507 * overwriting the DMA data. For that reason, the L1 is done first to ensure
508 * that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
509 */
510 static __inline void
511 dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
512 {
513 vm_offset_t eva = va + size;
514
515 /* invalidate L1 first */
516 dsb();
517 va &= ~cpuinfo.dcache_line_mask;
518 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
519 _CP15_DCIMVAC(va);
520 }
521 dsb();
522
523 /* then L2 */
524 cpu_l2cache_inv_range(pa, size);
525 }
526
527 /*
528 * Write back D-cache to PoC
529 *
530 * Caches are written back from innermost to outermost as dirty cachelines
531 * flow in this direction. In given range, no dirty cacheline should remain
532 * in any cache after this operation finishes.
533 */
534 static __inline void
535 dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
536 {
537 vm_offset_t eva = va + size;
538
539 dsb();
540 va &= ~cpuinfo.dcache_line_mask;
541 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
542 _CP15_DCCMVAC(va);
543 }
544 dsb();
545
546 cpu_l2cache_wb_range(pa, size);
547 }
548
549 /* Write back and invalidate D-cache to PoC */
550 static __inline void
551 dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
552 {
553 vm_offset_t va;
554 vm_offset_t eva = sva + size;
555
556 dsb();
557 /* write back L1 first */
558 va = sva & ~cpuinfo.dcache_line_mask;
559 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
560 _CP15_DCCMVAC(va);
561 }
562 dsb();
563
564 /* then write back and invalidate L2 */
565 cpu_l2cache_wbinv_range(pa, size);
566
567 /* then invalidate L1 */
568 va = sva & ~cpuinfo.dcache_line_mask;
569 for ( ; va < eva; va += cpuinfo.dcache_line_size) {
570 _CP15_DCIMVAC(va);
571 }
572 dsb();
573 }
574
575 /* Set TTB0 register */
576 static __inline void
577 cp15_ttbr_set(uint32_t reg)
578 {
579 dsb();
580 _CP15_TTB_SET(reg);
581 dsb();
582 _CP15_BPIALL();
583 dsb();
584 isb();
585 tlb_flush_all_ng_local();
586 }
587
588 /*
589 * Functions for address checking:
590 *
591 * cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
592 * cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
593 * cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
594 * cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
595 *
596 * They must be called while interrupts are disabled to get consistent result.
597 */
598 static __inline int
599 cp15_ats1cpr_check(vm_offset_t addr)
600 {
601
602 cp15_ats1cpr_set(addr);
603 isb();
604 return (cp15_par_get() & 0x01 ? EFAULT : 0);
605 }
606
607 static __inline int
608 cp15_ats1cpw_check(vm_offset_t addr)
609 {
610
611 cp15_ats1cpw_set(addr);
612 isb();
613 return (cp15_par_get() & 0x01 ? EFAULT : 0);
614 }
615
616 static __inline int
617 cp15_ats1cur_check(vm_offset_t addr)
618 {
619
620 cp15_ats1cur_set(addr);
621 isb();
622 return (cp15_par_get() & 0x01 ? EFAULT : 0);
623 }
624
625 static __inline int
626 cp15_ats1cuw_check(vm_offset_t addr)
627 {
628
629 cp15_ats1cuw_set(addr);
630 isb();
631 return (cp15_par_get() & 0x01 ? EFAULT : 0);
632 }
633 #endif /* !__ARM_ARCH < 6 */
634
635 #endif /* !MACHINE_CPU_V6_H */
Cache object: 47d6406eda5be6619e1d6002b28cdef3
|