1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/memdesc.h>
46 #include <sys/tree.h>
47 #include <sys/taskqueue.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 #include <vm/vm.h>
52
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <machine/bus.h>
56 #include <dev/iommu/busdma_iommu.h>
57 #include <machine/vmparam.h>
58
59 #include "iommu.h"
60 #include "iommu_if.h"
61
62 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
63
64 #define IOMMU_LIST_LOCK() mtx_lock(&iommu_mtx)
65 #define IOMMU_LIST_UNLOCK() mtx_unlock(&iommu_mtx)
66 #define IOMMU_LIST_ASSERT_LOCKED() mtx_assert(&iommu_mtx, MA_OWNED)
67
68 #define dprintf(fmt, ...)
69
70 static struct mtx iommu_mtx;
71
72 struct iommu_entry {
73 struct iommu_unit *iommu;
74 LIST_ENTRY(iommu_entry) next;
75 };
76 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
77
78 static int
79 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
80 iommu_gaddr_t size, int flags)
81 {
82 struct iommu_unit *iommu;
83 int error;
84
85 iommu = iodom->iommu;
86
87 error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
88
89 return (error);
90 }
91
92 static int
93 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
94 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
95 {
96 struct iommu_unit *iommu;
97 vm_prot_t prot;
98 vm_offset_t va;
99 int error;
100
101 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
102
103 prot = 0;
104 if (eflags & IOMMU_MAP_ENTRY_READ)
105 prot |= VM_PROT_READ;
106 if (eflags & IOMMU_MAP_ENTRY_WRITE)
107 prot |= VM_PROT_WRITE;
108
109 va = base;
110
111 iommu = iodom->iommu;
112
113 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
114
115 return (0);
116 }
117
118 static const struct iommu_domain_map_ops domain_map_ops = {
119 .map = iommu_domain_map_buf,
120 .unmap = iommu_domain_unmap_buf,
121 };
122
123 static struct iommu_domain *
124 iommu_domain_alloc(struct iommu_unit *iommu)
125 {
126 struct iommu_domain *iodom;
127
128 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
129 if (iodom == NULL)
130 return (NULL);
131
132 iommu_domain_init(iommu, iodom, &domain_map_ops);
133 iodom->end = VM_MAXUSER_ADDRESS;
134 iodom->iommu = iommu;
135 iommu_gas_init_domain(iodom);
136
137 return (iodom);
138 }
139
140 static int
141 iommu_domain_free(struct iommu_domain *iodom)
142 {
143 struct iommu_unit *iommu;
144
145 iommu = iodom->iommu;
146
147 IOMMU_LOCK(iommu);
148
149 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
150 IOMMU_DOMAIN_LOCK(iodom);
151 iommu_gas_fini_domain(iodom);
152 IOMMU_DOMAIN_UNLOCK(iodom);
153 }
154
155 iommu_domain_fini(iodom);
156
157 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
158 IOMMU_UNLOCK(iommu);
159
160 return (0);
161 }
162
163 static void
164 iommu_tag_init(struct bus_dma_tag_iommu *t)
165 {
166 bus_addr_t maxaddr;
167
168 maxaddr = BUS_SPACE_MAXADDR;
169
170 t->common.ref_count = 0;
171 t->common.impl = &bus_dma_iommu_impl;
172 t->common.alignment = 1;
173 t->common.boundary = 0;
174 t->common.lowaddr = maxaddr;
175 t->common.highaddr = maxaddr;
176 t->common.maxsize = maxaddr;
177 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
178 t->common.maxsegsz = maxaddr;
179 }
180
181 static struct iommu_ctx *
182 iommu_ctx_alloc(device_t dev, struct iommu_domain *iodom, bool disabled)
183 {
184 struct iommu_unit *iommu;
185 struct iommu_ctx *ioctx;
186
187 iommu = iodom->iommu;
188
189 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, dev, disabled);
190 if (ioctx == NULL)
191 return (NULL);
192
193 /*
194 * iommu can also be used for non-PCI based devices.
195 * This should be reimplemented as new newbus method with
196 * pci_get_rid() as a default for PCI device class.
197 */
198 ioctx->rid = pci_get_rid(dev);
199
200 return (ioctx);
201 }
202
203 struct iommu_ctx *
204 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
205 uint16_t rid, bool disabled, bool rmrr)
206 {
207 struct iommu_ctx *ioctx;
208 struct iommu_domain *iodom;
209 struct bus_dma_tag_iommu *tag;
210
211 IOMMU_LOCK(iommu);
212 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
213 if (ioctx) {
214 IOMMU_UNLOCK(iommu);
215 return (ioctx);
216 }
217 IOMMU_UNLOCK(iommu);
218
219 /*
220 * In our current configuration we have a domain per each ctx.
221 * So allocate a domain first.
222 */
223 iodom = iommu_domain_alloc(iommu);
224 if (iodom == NULL)
225 return (NULL);
226
227 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
228 if (ioctx == NULL) {
229 iommu_domain_free(iodom);
230 return (NULL);
231 }
232
233 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
234 M_IOMMU, M_WAITOK | M_ZERO);
235 tag->owner = requester;
236 tag->ctx = ioctx;
237 tag->ctx->domain = iodom;
238
239 iommu_tag_init(tag);
240
241 ioctx->domain = iodom;
242
243 return (ioctx);
244 }
245
246 void
247 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
248 {
249 struct bus_dma_tag_iommu *tag;
250
251 IOMMU_ASSERT_LOCKED(iommu);
252
253 tag = ioctx->tag;
254
255 IOMMU_CTX_FREE(iommu->dev, ioctx);
256
257 free(tag, M_IOMMU);
258 }
259
260 void
261 iommu_free_ctx(struct iommu_ctx *ioctx)
262 {
263 struct iommu_unit *iommu;
264 struct iommu_domain *iodom;
265 int error;
266
267 iodom = ioctx->domain;
268 iommu = iodom->iommu;
269
270 IOMMU_LOCK(iommu);
271 iommu_free_ctx_locked(iommu, ioctx);
272 IOMMU_UNLOCK(iommu);
273
274 /* Since we have a domain per each ctx, remove the domain too. */
275 error = iommu_domain_free(iodom);
276 if (error)
277 device_printf(iommu->dev, "Could not free a domain\n");
278 }
279
280 static void
281 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
282 {
283 iommu_gas_free_space(entry);
284
285 if (free)
286 iommu_gas_free_entry(entry);
287 else
288 entry->flags = 0;
289 }
290
291 void
292 iommu_domain_unload(struct iommu_domain *iodom,
293 struct iommu_map_entries_tailq *entries, bool cansleep)
294 {
295 struct iommu_map_entry *entry, *entry1;
296 int error;
297
298 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
299 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
300 ("not mapped entry %p %p", iodom, entry));
301 error = iodom->ops->unmap(iodom, entry->start, entry->end -
302 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
303 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
304 TAILQ_REMOVE(entries, entry, dmamap_link);
305 iommu_domain_free_entry(entry, true);
306 }
307
308 if (TAILQ_EMPTY(entries))
309 return;
310
311 panic("entries map is not empty");
312 }
313
314 int
315 iommu_register(struct iommu_unit *iommu)
316 {
317 struct iommu_entry *entry;
318
319 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
320
321 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
322 entry->iommu = iommu;
323
324 IOMMU_LIST_LOCK();
325 LIST_INSERT_HEAD(&iommu_list, entry, next);
326 IOMMU_LIST_UNLOCK();
327
328 iommu_init_busdma(iommu);
329
330 return (0);
331 }
332
333 int
334 iommu_unregister(struct iommu_unit *iommu)
335 {
336 struct iommu_entry *entry, *tmp;
337
338 IOMMU_LIST_LOCK();
339 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
340 if (entry->iommu == iommu) {
341 LIST_REMOVE(entry, next);
342 free(entry, M_IOMMU);
343 }
344 }
345 IOMMU_LIST_UNLOCK();
346
347 iommu_fini_busdma(iommu);
348
349 mtx_destroy(&iommu->lock);
350
351 return (0);
352 }
353
354 struct iommu_unit *
355 iommu_find(device_t dev, bool verbose)
356 {
357 struct iommu_entry *entry;
358 struct iommu_unit *iommu;
359 int error;
360
361 IOMMU_LIST_LOCK();
362 LIST_FOREACH(entry, &iommu_list, next) {
363 iommu = entry->iommu;
364 error = IOMMU_FIND(iommu->dev, dev);
365 if (error == 0) {
366 IOMMU_LIST_UNLOCK();
367 return (entry->iommu);
368 }
369 }
370 IOMMU_LIST_UNLOCK();
371
372 return (NULL);
373 }
374
375 void
376 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
377 bool cansleep __unused)
378 {
379
380 dprintf("%s\n", __func__);
381
382 iommu_domain_free_entry(entry, free);
383 }
384
385 static void
386 iommu_init(void)
387 {
388
389 mtx_init(&iommu_mtx, "IOMMU", NULL, MTX_DEF);
390 }
391
392 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
Cache object: 81407070058f19bacfd445957b77b115
|