1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Portions of this work was supported by Innovate UK project 105694,
12 * "Digital Security by Design (DSbD) Technology Platform Prototype".
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 */
35
36 #include "opt_platform.h"
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/malloc.h>
45 #include <sys/memdesc.h>
46 #include <sys/tree.h>
47 #include <sys/taskqueue.h>
48 #include <sys/lock.h>
49 #include <sys/mutex.h>
50 #include <sys/sx.h>
51 #include <sys/sysctl.h>
52 #include <vm/vm.h>
53
54 #include <dev/pci/pcireg.h>
55 #include <dev/pci/pcivar.h>
56 #include <machine/bus.h>
57 #include <dev/iommu/busdma_iommu.h>
58 #include <machine/vmparam.h>
59
60 #ifdef FDT
61 #include <dev/fdt/fdt_common.h>
62 #include <dev/ofw/ofw_bus.h>
63 #include <dev/ofw/ofw_bus_subr.h>
64 #endif
65
66 #include "iommu.h"
67 #include "iommu_if.h"
68
69 static MALLOC_DEFINE(M_IOMMU, "IOMMU", "IOMMU framework");
70
71 #define IOMMU_LIST_LOCK() sx_xlock(&iommu_sx)
72 #define IOMMU_LIST_UNLOCK() sx_xunlock(&iommu_sx)
73 #define IOMMU_LIST_ASSERT_LOCKED() sx_assert(&iommu_sx, SA_XLOCKED)
74
75 #define dprintf(fmt, ...)
76
77 static struct sx iommu_sx;
78
79 struct iommu_entry {
80 struct iommu_unit *iommu;
81 LIST_ENTRY(iommu_entry) next;
82 };
83 static LIST_HEAD(, iommu_entry) iommu_list = LIST_HEAD_INITIALIZER(iommu_list);
84
85 static int
86 iommu_domain_unmap_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
87 iommu_gaddr_t size, int flags)
88 {
89 struct iommu_unit *iommu;
90 int error;
91
92 iommu = iodom->iommu;
93
94 error = IOMMU_UNMAP(iommu->dev, iodom, base, size);
95
96 return (error);
97 }
98
99 static int
100 iommu_domain_map_buf(struct iommu_domain *iodom, iommu_gaddr_t base,
101 iommu_gaddr_t size, vm_page_t *ma, uint64_t eflags, int flags)
102 {
103 struct iommu_unit *iommu;
104 vm_prot_t prot;
105 vm_offset_t va;
106 int error;
107
108 dprintf("%s: base %lx, size %lx\n", __func__, base, size);
109
110 prot = 0;
111 if (eflags & IOMMU_MAP_ENTRY_READ)
112 prot |= VM_PROT_READ;
113 if (eflags & IOMMU_MAP_ENTRY_WRITE)
114 prot |= VM_PROT_WRITE;
115
116 va = base;
117
118 iommu = iodom->iommu;
119
120 error = IOMMU_MAP(iommu->dev, iodom, va, ma, size, prot);
121
122 return (error);
123 }
124
125 static const struct iommu_domain_map_ops domain_map_ops = {
126 .map = iommu_domain_map_buf,
127 .unmap = iommu_domain_unmap_buf,
128 };
129
130 static struct iommu_domain *
131 iommu_domain_alloc(struct iommu_unit *iommu)
132 {
133 struct iommu_domain *iodom;
134
135 iodom = IOMMU_DOMAIN_ALLOC(iommu->dev, iommu);
136 if (iodom == NULL)
137 return (NULL);
138
139 KASSERT(iodom->end != 0, ("domain end is not set"));
140
141 iommu_domain_init(iommu, iodom, &domain_map_ops);
142 iodom->iommu = iommu;
143 iommu_gas_init_domain(iodom);
144
145 return (iodom);
146 }
147
148 static int
149 iommu_domain_free(struct iommu_domain *iodom)
150 {
151 struct iommu_unit *iommu;
152
153 iommu = iodom->iommu;
154
155 IOMMU_LOCK(iommu);
156
157 if ((iodom->flags & IOMMU_DOMAIN_GAS_INITED) != 0) {
158 IOMMU_DOMAIN_LOCK(iodom);
159 iommu_gas_fini_domain(iodom);
160 IOMMU_DOMAIN_UNLOCK(iodom);
161 }
162
163 iommu_domain_fini(iodom);
164
165 IOMMU_DOMAIN_FREE(iommu->dev, iodom);
166 IOMMU_UNLOCK(iommu);
167
168 return (0);
169 }
170
171 static void
172 iommu_tag_init(struct iommu_domain *iodom, struct bus_dma_tag_iommu *t)
173 {
174 bus_addr_t maxaddr;
175
176 maxaddr = MIN(iodom->end, BUS_SPACE_MAXADDR);
177
178 t->common.ref_count = 0;
179 t->common.impl = &bus_dma_iommu_impl;
180 t->common.alignment = 1;
181 t->common.boundary = 0;
182 t->common.lowaddr = maxaddr;
183 t->common.highaddr = maxaddr;
184 t->common.maxsize = maxaddr;
185 t->common.nsegments = BUS_SPACE_UNRESTRICTED;
186 t->common.maxsegsz = maxaddr;
187 }
188
189 static struct iommu_ctx *
190 iommu_ctx_alloc(device_t requester, struct iommu_domain *iodom, bool disabled)
191 {
192 struct iommu_unit *iommu;
193 struct iommu_ctx *ioctx;
194
195 iommu = iodom->iommu;
196
197 ioctx = IOMMU_CTX_ALLOC(iommu->dev, iodom, requester, disabled);
198 if (ioctx == NULL)
199 return (NULL);
200
201 ioctx->domain = iodom;
202
203 return (ioctx);
204 }
205
206 static int
207 iommu_ctx_init(device_t requester, struct iommu_ctx *ioctx)
208 {
209 struct bus_dma_tag_iommu *tag;
210 struct iommu_domain *iodom;
211 struct iommu_unit *iommu;
212 int error;
213
214 iodom = ioctx->domain;
215 iommu = iodom->iommu;
216
217 error = IOMMU_CTX_INIT(iommu->dev, ioctx);
218 if (error)
219 return (error);
220
221 tag = ioctx->tag = malloc(sizeof(struct bus_dma_tag_iommu),
222 M_IOMMU, M_WAITOK | M_ZERO);
223 tag->owner = requester;
224 tag->ctx = ioctx;
225 tag->ctx->domain = iodom;
226
227 iommu_tag_init(iodom, tag);
228
229 return (error);
230 }
231
232 static struct iommu_unit *
233 iommu_lookup(device_t dev)
234 {
235 struct iommu_entry *entry;
236 struct iommu_unit *iommu;
237
238 IOMMU_LIST_LOCK();
239 LIST_FOREACH(entry, &iommu_list, next) {
240 iommu = entry->iommu;
241 if (iommu->dev == dev) {
242 IOMMU_LIST_UNLOCK();
243 return (iommu);
244 }
245 }
246 IOMMU_LIST_UNLOCK();
247
248 return (NULL);
249 }
250
251 #ifdef FDT
252 struct iommu_ctx *
253 iommu_get_ctx_ofw(device_t dev, int channel)
254 {
255 struct iommu_domain *iodom;
256 struct iommu_unit *iommu;
257 struct iommu_ctx *ioctx;
258 phandle_t node, parent;
259 device_t iommu_dev;
260 pcell_t *cells;
261 int niommus;
262 int ncells;
263 int error;
264
265 node = ofw_bus_get_node(dev);
266 if (node <= 0) {
267 device_printf(dev,
268 "%s called on not ofw based device.\n", __func__);
269 return (NULL);
270 }
271
272 error = ofw_bus_parse_xref_list_get_length(node,
273 "iommus", "#iommu-cells", &niommus);
274 if (error) {
275 device_printf(dev, "%s can't get iommu list.\n", __func__);
276 return (NULL);
277 }
278
279 if (niommus == 0) {
280 device_printf(dev, "%s iommu list is empty.\n", __func__);
281 return (NULL);
282 }
283
284 error = ofw_bus_parse_xref_list_alloc(node, "iommus", "#iommu-cells",
285 channel, &parent, &ncells, &cells);
286 if (error != 0) {
287 device_printf(dev, "%s can't get iommu device xref.\n",
288 __func__);
289 return (NULL);
290 }
291
292 iommu_dev = OF_device_from_xref(parent);
293 if (iommu_dev == NULL) {
294 device_printf(dev, "%s can't get iommu device.\n", __func__);
295 return (NULL);
296 }
297
298 iommu = iommu_lookup(iommu_dev);
299 if (iommu == NULL) {
300 device_printf(dev, "%s can't lookup iommu.\n", __func__);
301 return (NULL);
302 }
303
304 /*
305 * In our current configuration we have a domain per each ctx,
306 * so allocate a domain first.
307 */
308 iodom = iommu_domain_alloc(iommu);
309 if (iodom == NULL) {
310 device_printf(dev, "%s can't allocate domain.\n", __func__);
311 return (NULL);
312 }
313
314 ioctx = iommu_ctx_alloc(dev, iodom, false);
315 if (ioctx == NULL) {
316 iommu_domain_free(iodom);
317 return (NULL);
318 }
319
320 ioctx->domain = iodom;
321
322 error = IOMMU_OFW_MD_DATA(iommu->dev, ioctx, cells, ncells);
323 if (error) {
324 device_printf(dev, "%s can't set MD data\n", __func__);
325 return (NULL);
326 }
327
328 error = iommu_ctx_init(dev, ioctx);
329 if (error) {
330 IOMMU_CTX_FREE(iommu->dev, ioctx);
331 iommu_domain_free(iodom);
332 return (NULL);
333 }
334
335 return (ioctx);
336 }
337 #endif
338
339 struct iommu_ctx *
340 iommu_get_ctx(struct iommu_unit *iommu, device_t requester,
341 uint16_t rid, bool disabled, bool rmrr)
342 {
343 struct iommu_domain *iodom;
344 struct iommu_ctx *ioctx;
345 int error;
346
347 IOMMU_LOCK(iommu);
348 ioctx = IOMMU_CTX_LOOKUP(iommu->dev, requester);
349 if (ioctx) {
350 IOMMU_UNLOCK(iommu);
351 return (ioctx);
352 }
353 IOMMU_UNLOCK(iommu);
354
355 /*
356 * In our current configuration we have a domain per each ctx.
357 * So allocate a domain first.
358 */
359 iodom = iommu_domain_alloc(iommu);
360 if (iodom == NULL)
361 return (NULL);
362
363 ioctx = iommu_ctx_alloc(requester, iodom, disabled);
364 if (ioctx == NULL) {
365 iommu_domain_free(iodom);
366 return (NULL);
367 }
368
369 error = iommu_ctx_init(requester, ioctx);
370 if (error) {
371 IOMMU_CTX_FREE(iommu->dev, ioctx);
372 iommu_domain_free(iodom);
373 return (NULL);
374 }
375
376 return (ioctx);
377 }
378
379 void
380 iommu_free_ctx_locked(struct iommu_unit *iommu, struct iommu_ctx *ioctx)
381 {
382 struct bus_dma_tag_iommu *tag;
383
384 IOMMU_ASSERT_LOCKED(iommu);
385
386 tag = ioctx->tag;
387
388 IOMMU_CTX_FREE(iommu->dev, ioctx);
389
390 free(tag, M_IOMMU);
391 }
392
393 void
394 iommu_free_ctx(struct iommu_ctx *ioctx)
395 {
396 struct iommu_unit *iommu;
397 struct iommu_domain *iodom;
398 int error;
399
400 iodom = ioctx->domain;
401 iommu = iodom->iommu;
402
403 IOMMU_LOCK(iommu);
404 iommu_free_ctx_locked(iommu, ioctx);
405 IOMMU_UNLOCK(iommu);
406
407 /* Since we have a domain per each ctx, remove the domain too. */
408 error = iommu_domain_free(iodom);
409 if (error)
410 device_printf(iommu->dev, "Could not free a domain\n");
411 }
412
413 static void
414 iommu_domain_free_entry(struct iommu_map_entry *entry, bool free)
415 {
416 iommu_gas_free_space(entry);
417
418 if (free)
419 iommu_gas_free_entry(entry);
420 else
421 entry->flags = 0;
422 }
423
424 void
425 iommu_domain_unload(struct iommu_domain *iodom,
426 struct iommu_map_entries_tailq *entries, bool cansleep)
427 {
428 struct iommu_map_entry *entry, *entry1;
429 int error __diagused;
430
431 TAILQ_FOREACH_SAFE(entry, entries, dmamap_link, entry1) {
432 KASSERT((entry->flags & IOMMU_MAP_ENTRY_MAP) != 0,
433 ("not mapped entry %p %p", iodom, entry));
434 error = iodom->ops->unmap(iodom, entry->start, entry->end -
435 entry->start, cansleep ? IOMMU_PGF_WAITOK : 0);
436 KASSERT(error == 0, ("unmap %p error %d", iodom, error));
437 TAILQ_REMOVE(entries, entry, dmamap_link);
438 iommu_domain_free_entry(entry, true);
439 }
440
441 if (TAILQ_EMPTY(entries))
442 return;
443
444 panic("entries map is not empty");
445 }
446
447 int
448 iommu_register(struct iommu_unit *iommu)
449 {
450 struct iommu_entry *entry;
451
452 mtx_init(&iommu->lock, "IOMMU", NULL, MTX_DEF);
453
454 entry = malloc(sizeof(struct iommu_entry), M_IOMMU, M_WAITOK | M_ZERO);
455 entry->iommu = iommu;
456
457 IOMMU_LIST_LOCK();
458 LIST_INSERT_HEAD(&iommu_list, entry, next);
459 IOMMU_LIST_UNLOCK();
460
461 iommu_init_busdma(iommu);
462
463 return (0);
464 }
465
466 int
467 iommu_unregister(struct iommu_unit *iommu)
468 {
469 struct iommu_entry *entry, *tmp;
470
471 IOMMU_LIST_LOCK();
472 LIST_FOREACH_SAFE(entry, &iommu_list, next, tmp) {
473 if (entry->iommu == iommu) {
474 LIST_REMOVE(entry, next);
475 free(entry, M_IOMMU);
476 }
477 }
478 IOMMU_LIST_UNLOCK();
479
480 iommu_fini_busdma(iommu);
481
482 mtx_destroy(&iommu->lock);
483
484 return (0);
485 }
486
487 struct iommu_unit *
488 iommu_find(device_t dev, bool verbose)
489 {
490 struct iommu_entry *entry;
491 struct iommu_unit *iommu;
492 int error;
493
494 IOMMU_LIST_LOCK();
495 LIST_FOREACH(entry, &iommu_list, next) {
496 iommu = entry->iommu;
497 error = IOMMU_FIND(iommu->dev, dev);
498 if (error == 0) {
499 IOMMU_LIST_UNLOCK();
500 return (entry->iommu);
501 }
502 }
503 IOMMU_LIST_UNLOCK();
504
505 return (NULL);
506 }
507
508 void
509 iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
510 bool cansleep __unused)
511 {
512
513 dprintf("%s\n", __func__);
514
515 iommu_domain_free_entry(entry, free);
516 }
517
518 static void
519 iommu_init(void)
520 {
521
522 sx_init(&iommu_sx, "IOMMU list");
523 }
524
525 SYSINIT(iommu, SI_SUB_DRIVERS, SI_ORDER_FIRST, iommu_init, NULL);
Cache object: 611d324f3e86e1c0d5db6d3b37f161c7
|