1 /*-
2 * Copyright (c) 2014 Robin Randhawa
3 * Copyright (c) 2015 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Andrew Turner
7 * under sponsorship from the FreeBSD Foundation
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32 #include <machine/asm.h>
33 #include <machine/param.h>
34 __FBSDID("$FreeBSD: releng/11.2/sys/arm64/arm64/cpufunc_asm.S 305884 2016-09-16 12:42:36Z andrew $");
35
36 /*
37 * FIXME:
38 * Need big.LITTLE awareness at some point.
39 * Using arm64_p[id]cache_line_size may not be the best option.
40 * Need better SMP awareness.
41 */
42 .text
43 .align 2
44
45 .Lpage_mask:
46 .word PAGE_MASK
47
48 /*
49 * Macro to handle the cache. This takes the start address in x0, length
50 * in x1. It will corrupt x0, x1, x2, and x3.
51 */
52 .macro cache_handle_range dcop = 0, ic = 0, icop = 0
53 .if \ic == 0
54 ldr x3, =dcache_line_size /* Load the D cache line size */
55 .else
56 ldr x3, =idcache_line_size /* Load the I & D cache line size */
57 .endif
58 ldr x3, [x3]
59 sub x4, x3, #1 /* Get the address mask */
60 and x2, x0, x4 /* Get the low bits of the address */
61 add x1, x1, x2 /* Add these to the size */
62 bic x0, x0, x4 /* Clear the low bit of the address */
63 1:
64 dc \dcop, x0
65 dsb ish
66 .if \ic != 0
67 ic \icop, x0
68 dsb ish
69 .endif
70 add x0, x0, x3 /* Move to the next line */
71 subs x1, x1, x3 /* Reduce the size */
72 b.hi 1b /* Check if we are done */
73 .if \ic != 0
74 isb
75 .endif
76 ret
77 .endm
78
79 ENTRY(arm64_nullop)
80 ret
81 END(arm64_nullop)
82
83 /*
84 * Generic functions to read/modify/write the internal coprocessor registers
85 */
86
87 ENTRY(arm64_setttb)
88 dsb ish
89 msr ttbr0_el1, x0
90 dsb ish
91 isb
92 ret
93 END(arm64_setttb)
94
95 ENTRY(arm64_tlb_flushID)
96 #ifdef SMP
97 tlbi vmalle1is
98 #else
99 tlbi vmalle1
100 #endif
101 dsb ish
102 isb
103 ret
104 END(arm64_tlb_flushID)
105
106 ENTRY(arm64_tlb_flushID_SE)
107 ldr x1, .Lpage_mask
108 bic x0, x0, x1
109 #ifdef SMP
110 tlbi vae1is, x0
111 #else
112 tlbi vae1, x0
113 #endif
114 dsb ish
115 isb
116 ret
117 END(arm64_tlb_flushID_SE)
118
119 /*
120 * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
121 */
122 ENTRY(arm64_dcache_wb_range)
123 cache_handle_range dcop = cvac
124 END(arm64_dcache_wb_range)
125
126 /*
127 * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
128 */
129 ENTRY(arm64_dcache_wbinv_range)
130 cache_handle_range dcop = civac
131 END(arm64_dcache_wbinv_range)
132
133 /*
134 * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
135 *
136 * Note, we must not invalidate everything. If the range is too big we
137 * must use wb-inv of the entire cache.
138 */
139 ENTRY(arm64_dcache_inv_range)
140 cache_handle_range dcop = ivac
141 END(arm64_dcache_inv_range)
142
143 /*
144 * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
145 */
146 ENTRY(arm64_idcache_wbinv_range)
147 cache_handle_range dcop = civac, ic = 1, icop = ivau
148 END(arm64_idcache_wbinv_range)
149
150 /*
151 * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
152 */
153 ENTRY(arm64_icache_sync_range)
154 cache_handle_range dcop = cvau, ic = 1, icop = ivau
155 END(arm64_icache_sync_range)
Cache object: 5443659c612a12abb385dc76a136f8f7
|