1 /*
2 * SPDX-License-Identifier: CDDL 1.0
3 *
4 * Copyright 2022 Christos Margiolis <christos@FreeBSD.org>
5 * Copyright 2022 Mark Johnston <markj@FreeBSD.org>
6 */
7
8 #include <sys/param.h>
9 #include <sys/bitset.h>
10 #include <sys/cred.h>
11 #include <sys/eventhandler.h>
12 #include <sys/kernel.h>
13 #include <sys/lock.h>
14 #include <sys/malloc.h>
15 #include <sys/proc.h>
16 #include <sys/queue.h>
17 #include <sys/sx.h>
18
19 #include <vm/vm.h>
20 #include <vm/vm_param.h>
21 #include <vm/pmap.h>
22 #include <vm/vm_map.h>
23 #include <vm/vm_kern.h>
24 #include <vm/vm_object.h>
25
26 #include <cddl/dev/dtrace/dtrace_cddl.h>
27
28 #include "kinst.h"
29 #include "kinst_isa.h"
30
31 /*
32 * We can have 4KB/32B = 128 trampolines per chunk.
33 */
34 #define KINST_TRAMPS_PER_CHUNK (KINST_TRAMPCHUNK_SIZE / KINST_TRAMP_SIZE)
35
36 struct trampchunk {
37 TAILQ_ENTRY(trampchunk) next;
38 uint8_t *addr;
39 /* 0 -> allocated, 1 -> free */
40 BITSET_DEFINE(, KINST_TRAMPS_PER_CHUNK) free;
41 };
42
43 static TAILQ_HEAD(, trampchunk) kinst_trampchunks =
44 TAILQ_HEAD_INITIALIZER(kinst_trampchunks);
45 static struct sx kinst_tramp_sx;
46 SX_SYSINIT(kinst_tramp_sx, &kinst_tramp_sx, "kinst tramp");
47 static eventhandler_tag kinst_thread_ctor_handler;
48 static eventhandler_tag kinst_thread_dtor_handler;
49
50 static struct trampchunk *
51 kinst_trampchunk_alloc(void)
52 {
53 struct trampchunk *chunk;
54 vm_offset_t trampaddr;
55 int error __diagused;
56
57 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
58
59 /*
60 * Allocate virtual memory for the trampoline chunk. The returned
61 * address is saved in "trampaddr". To simplify population of
62 * trampolines, we follow the amd64 kernel's code model and allocate
63 * them above KERNBASE, i.e., in the top 2GB of the kernel's virtual
64 * address space. Trampolines must be executable so max_prot must
65 * include VM_PROT_EXECUTE.
66 */
67 trampaddr = KERNBASE;
68 error = vm_map_find(kernel_map, NULL, 0, &trampaddr,
69 KINST_TRAMPCHUNK_SIZE, 0, VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
70 0);
71 if (error != KERN_SUCCESS) {
72 KINST_LOG("trampoline chunk allocation failed: %d", error);
73 return (NULL);
74 }
75
76 error = kmem_back(kernel_object, trampaddr, KINST_TRAMPCHUNK_SIZE,
77 M_WAITOK | M_EXEC);
78 KASSERT(error == KERN_SUCCESS, ("kmem_back failed: %d", error));
79
80 KINST_TRAMP_INIT((void *)trampaddr, KINST_TRAMPCHUNK_SIZE);
81
82 /* Allocate a tracker for this chunk. */
83 chunk = malloc(sizeof(*chunk), M_KINST, M_WAITOK);
84 chunk->addr = (void *)trampaddr;
85 BIT_FILL(KINST_TRAMPS_PER_CHUNK, &chunk->free);
86
87 TAILQ_INSERT_HEAD(&kinst_trampchunks, chunk, next);
88
89 return (chunk);
90 }
91
92 static void
93 kinst_trampchunk_free(struct trampchunk *chunk)
94 {
95 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
96
97 TAILQ_REMOVE(&kinst_trampchunks, chunk, next);
98 kmem_unback(kernel_object, (vm_offset_t)chunk->addr,
99 KINST_TRAMPCHUNK_SIZE);
100 (void)vm_map_remove(kernel_map, (vm_offset_t)chunk->addr,
101 (vm_offset_t)(chunk->addr + KINST_TRAMPCHUNK_SIZE));
102 free(chunk, M_KINST);
103 }
104
105 static uint8_t *
106 kinst_trampoline_alloc_locked(int how)
107 {
108 struct trampchunk *chunk;
109 uint8_t *tramp;
110 int off;
111
112 sx_assert(&kinst_tramp_sx, SX_XLOCKED);
113
114 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
115 /* All trampolines from this chunk are already allocated. */
116 if ((off = BIT_FFS(KINST_TRAMPS_PER_CHUNK, &chunk->free)) == 0)
117 continue;
118 /* BIT_FFS() returns indices starting at 1 instead of 0. */
119 off--;
120 break;
121 }
122 if (chunk == NULL) {
123 if ((how & M_NOWAIT) != 0)
124 return (NULL);
125
126 /*
127 * We didn't find any free trampoline in the current list,
128 * allocate a new one. If that fails the provider will no
129 * longer be reliable, so try to warn the user.
130 */
131 if ((chunk = kinst_trampchunk_alloc()) == NULL) {
132 static bool once = true;
133
134 if (once) {
135 once = false;
136 KINST_LOG(
137 "kinst: failed to allocate trampoline, "
138 "probes may not fire");
139 }
140 return (NULL);
141 }
142 off = 0;
143 }
144 BIT_CLR(KINST_TRAMPS_PER_CHUNK, off, &chunk->free);
145 tramp = chunk->addr + off * KINST_TRAMP_SIZE;
146 return (tramp);
147 }
148
149 uint8_t *
150 kinst_trampoline_alloc(int how)
151 {
152 uint8_t *tramp;
153
154 sx_xlock(&kinst_tramp_sx);
155 tramp = kinst_trampoline_alloc_locked(how);
156 sx_xunlock(&kinst_tramp_sx);
157 return (tramp);
158 }
159
160 static void
161 kinst_trampoline_dealloc_locked(uint8_t *tramp, bool freechunks)
162 {
163 struct trampchunk *chunk;
164 int off;
165
166 if (tramp == NULL)
167 return;
168
169 TAILQ_FOREACH(chunk, &kinst_trampchunks, next) {
170 for (off = 0; off < KINST_TRAMPS_PER_CHUNK; off++) {
171 if (chunk->addr + off * KINST_TRAMP_SIZE == tramp) {
172 KINST_TRAMP_INIT(tramp, KINST_TRAMP_SIZE);
173 BIT_SET(KINST_TRAMPS_PER_CHUNK, off,
174 &chunk->free);
175 if (freechunks &&
176 BIT_ISFULLSET(KINST_TRAMPS_PER_CHUNK,
177 &chunk->free))
178 kinst_trampchunk_free(chunk);
179 return;
180 }
181 }
182 }
183 panic("%s: did not find trampoline chunk for %p", __func__, tramp);
184 }
185
186 void
187 kinst_trampoline_dealloc(uint8_t *tramp)
188 {
189 sx_xlock(&kinst_tramp_sx);
190 kinst_trampoline_dealloc_locked(tramp, true);
191 sx_xunlock(&kinst_tramp_sx);
192 }
193
194 static void
195 kinst_thread_ctor(void *arg __unused, struct thread *td)
196 {
197 td->t_kinst = kinst_trampoline_alloc(M_WAITOK);
198 }
199
200 static void
201 kinst_thread_dtor(void *arg __unused, struct thread *td)
202 {
203 void *tramp;
204
205 tramp = td->t_kinst;
206 td->t_kinst = NULL;
207
208 /*
209 * This assumes that the thread_dtor event permits sleeping, which
210 * appears to be true for the time being.
211 */
212 kinst_trampoline_dealloc(tramp);
213 }
214
215 int
216 kinst_trampoline_init(void)
217 {
218 struct proc *p;
219 struct thread *td;
220 void *tramp;
221 int error;
222
223 kinst_thread_ctor_handler = EVENTHANDLER_REGISTER(thread_ctor,
224 kinst_thread_ctor, NULL, EVENTHANDLER_PRI_ANY);
225 kinst_thread_dtor_handler = EVENTHANDLER_REGISTER(thread_dtor,
226 kinst_thread_dtor, NULL, EVENTHANDLER_PRI_ANY);
227
228 error = 0;
229 tramp = NULL;
230
231 sx_slock(&allproc_lock);
232 sx_xlock(&kinst_tramp_sx);
233 FOREACH_PROC_IN_SYSTEM(p) {
234 retry:
235 PROC_LOCK(p);
236 FOREACH_THREAD_IN_PROC(p, td) {
237 if (td->t_kinst != NULL)
238 continue;
239 if (tramp == NULL) {
240 /*
241 * Try to allocate a trampoline without dropping
242 * the process lock. If all chunks are fully
243 * utilized, we must release the lock and try
244 * again.
245 */
246 tramp = kinst_trampoline_alloc_locked(M_NOWAIT);
247 if (tramp == NULL) {
248 PROC_UNLOCK(p);
249 tramp = kinst_trampoline_alloc_locked(
250 M_WAITOK);
251 if (tramp == NULL) {
252 /*
253 * Let the unload handler clean
254 * up.
255 */
256 error = ENOMEM;
257 goto out;
258 } else
259 goto retry;
260 }
261 }
262 td->t_kinst = tramp;
263 tramp = NULL;
264 }
265 PROC_UNLOCK(p);
266 }
267 out:
268 sx_xunlock(&kinst_tramp_sx);
269 sx_sunlock(&allproc_lock);
270 return (error);
271 }
272
273 int
274 kinst_trampoline_deinit(void)
275 {
276 struct trampchunk *chunk, *tmp;
277 struct proc *p;
278 struct thread *td;
279
280 EVENTHANDLER_DEREGISTER(thread_ctor, kinst_thread_ctor_handler);
281 EVENTHANDLER_DEREGISTER(thread_dtor, kinst_thread_dtor_handler);
282
283 sx_slock(&allproc_lock);
284 sx_xlock(&kinst_tramp_sx);
285 FOREACH_PROC_IN_SYSTEM(p) {
286 PROC_LOCK(p);
287 FOREACH_THREAD_IN_PROC(p, td) {
288 kinst_trampoline_dealloc_locked(td->t_kinst, false);
289 td->t_kinst = NULL;
290 }
291 PROC_UNLOCK(p);
292 }
293 sx_sunlock(&allproc_lock);
294 TAILQ_FOREACH_SAFE(chunk, &kinst_trampchunks, next, tmp)
295 kinst_trampchunk_free(chunk);
296 sx_xunlock(&kinst_tramp_sx);
297
298 return (0);
299 }
Cache object: 78cf1cfc13c7fed8ec44d05141e028e5
|