1 /*-
2 * Copyright (c) 2015 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/memdesc.h>
40 #include <sys/rman.h>
41 #include <sys/rwlock.h>
42 #include <sys/taskqueue.h>
43 #include <sys/tree.h>
44 #include <sys/vmem.h>
45 #include <machine/bus.h>
46 #include <machine/intr_machdep.h>
47 #include <vm/vm.h>
48 #include <vm/vm_extern.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_object.h>
51 #include <vm/vm_page.h>
52 #include <x86/include/apicreg.h>
53 #include <x86/include/apicvar.h>
54 #include <x86/include/busdma_impl.h>
55 #include <x86/iommu/intel_reg.h>
56 #include <x86/iommu/busdma_dmar.h>
57 #include <x86/iommu/intel_dmar.h>
58 #include <dev/pci/pcivar.h>
59 #include <x86/iommu/iommu_intrmap.h>
60
61 static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
62 int *is_dmar);
63 static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
64 uint64_t low, uint16_t rid);
65 static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
66
67 int
68 iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
69 {
70 struct dmar_unit *unit;
71 vmem_addr_t vmem_res;
72 u_int idx, i;
73 int error;
74
75 unit = dmar_ir_find(src, NULL, NULL);
76 if (unit == NULL || !unit->ir_enabled) {
77 for (i = 0; i < count; i++)
78 cookies[i] = -1;
79 return (EOPNOTSUPP);
80 }
81
82 error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
83 &vmem_res);
84 if (error != 0) {
85 KASSERT(error != EOPNOTSUPP,
86 ("impossible EOPNOTSUPP from vmem"));
87 return (error);
88 }
89 idx = vmem_res;
90 for (i = 0; i < count; i++)
91 cookies[i] = idx + i;
92 return (0);
93 }
94
95 int
96 iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
97 uint64_t *addr, uint32_t *data)
98 {
99 struct dmar_unit *unit;
100 uint64_t low;
101 uint16_t rid;
102 int is_dmar;
103
104 unit = dmar_ir_find(src, &rid, &is_dmar);
105 if (is_dmar) {
106 KASSERT(unit == NULL, ("DMAR cannot translate itself"));
107
108 /*
109 * See VT-d specification, 5.1.6 Remapping Hardware -
110 * Interrupt Programming.
111 */
112 *data = vector;
113 *addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
114 if (x2apic_mode)
115 *addr |= ((uint64_t)cpu & 0xffffff00) << 32;
116 else
117 KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
118 return (0);
119 }
120 if (unit == NULL || !unit->ir_enabled || cookie == -1)
121 return (EOPNOTSUPP);
122
123 low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
124 DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
125 DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
126 DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
127 dmar_ir_program_irte(unit, cookie, low, rid);
128
129 if (addr != NULL) {
130 /*
131 * See VT-d specification, 5.1.5.2 MSI and MSI-X
132 * Register Programming.
133 */
134 *addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
135 ((cookie & 0x8000) << 2) | 0x18;
136 *data = 0;
137 }
138 return (0);
139 }
140
141 int
142 iommu_unmap_msi_intr(device_t src, u_int cookie)
143 {
144 struct dmar_unit *unit;
145
146 if (cookie == -1)
147 return (0);
148 unit = dmar_ir_find(src, NULL, NULL);
149 return (dmar_ir_free_irte(unit, cookie));
150 }
151
152 int
153 iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
154 bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
155 {
156 struct dmar_unit *unit;
157 vmem_addr_t vmem_res;
158 uint64_t low, iorte;
159 u_int idx;
160 int error;
161 uint16_t rid;
162
163 unit = dmar_find_ioapic(ioapic_id, &rid);
164 if (unit == NULL || !unit->ir_enabled) {
165 *cookie = -1;
166 return (EOPNOTSUPP);
167 }
168
169 error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
170 if (error != 0) {
171 KASSERT(error != EOPNOTSUPP,
172 ("impossible EOPNOTSUPP from vmem"));
173 return (error);
174 }
175 idx = vmem_res;
176 low = 0;
177 switch (irq) {
178 case IRQ_EXTINT:
179 low |= DMAR_IRTE1_DLM_ExtINT;
180 break;
181 case IRQ_NMI:
182 low |= DMAR_IRTE1_DLM_NMI;
183 break;
184 case IRQ_SMI:
185 low |= DMAR_IRTE1_DLM_SMI;
186 break;
187 default:
188 KASSERT(vector != 0, ("No vector for IRQ %u", irq));
189 low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
190 break;
191 }
192 low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
193 DMAR_IRTE1_DST_xAPIC(cpu)) |
194 (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
195 DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
196 dmar_ir_program_irte(unit, idx, low, rid);
197
198 if (hi != NULL) {
199 /*
200 * See VT-d specification, 5.1.5.1 I/OxAPIC
201 * Programming.
202 */
203 iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
204 ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
205 (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
206 (activehi ? IOART_INTAHI : IOART_INTALO) |
207 IOART_DELFIXED | vector;
208 *hi = iorte >> 32;
209 *lo = iorte;
210 }
211 *cookie = idx;
212 return (0);
213 }
214
215 int
216 iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
217 {
218 struct dmar_unit *unit;
219 u_int idx;
220
221 idx = *cookie;
222 if (idx == -1)
223 return (0);
224 *cookie = -1;
225 unit = dmar_find_ioapic(ioapic_id, NULL);
226 KASSERT(unit != NULL && unit->ir_enabled,
227 ("unmap: cookie %d unit %p", idx, unit));
228 return (dmar_ir_free_irte(unit, idx));
229 }
230
231 static struct dmar_unit *
232 dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
233 {
234 devclass_t src_class;
235 struct dmar_unit *unit;
236
237 /*
238 * We need to determine if the interrupt source generates FSB
239 * interrupts. If yes, it is either DMAR, in which case
240 * interrupts are not remapped. Or it is HPET, and interrupts
241 * are remapped. For HPET, source id is reported by HPET
242 * record in DMAR ACPI table.
243 */
244 if (is_dmar != NULL)
245 *is_dmar = FALSE;
246 src_class = device_get_devclass(src);
247 if (src_class == devclass_find("dmar")) {
248 unit = NULL;
249 if (is_dmar != NULL)
250 *is_dmar = TRUE;
251 } else if (src_class == devclass_find("hpet")) {
252 unit = dmar_find_hpet(src, rid);
253 } else {
254 unit = dmar_find(src);
255 if (unit != NULL && rid != NULL)
256 dmar_get_requester(src, rid);
257 }
258 return (unit);
259 }
260
261 static void
262 dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
263 uint16_t rid)
264 {
265 dmar_irte_t *irte;
266 uint64_t high;
267
268 KASSERT(idx < unit->irte_cnt,
269 ("bad cookie %d %d", idx, unit->irte_cnt));
270 irte = &(unit->irt[idx]);
271 high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
272 DMAR_IRTE2_SID_RID(rid);
273 device_printf(unit->dev,
274 "programming irte[%d] rid %#x high %#jx low %#jx\n",
275 idx, rid, (uintmax_t)high, (uintmax_t)low);
276 DMAR_LOCK(unit);
277 if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
278 /*
279 * The rte is already valid. Assume that the request
280 * is to remap the interrupt for balancing. Only low
281 * word of rte needs to be changed. Assert that the
282 * high word contains expected value.
283 */
284 KASSERT(irte->irte2 == high,
285 ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
286 (uintmax_t)high));
287 dmar_pte_update(&irte->irte1, low);
288 } else {
289 dmar_pte_store(&irte->irte2, high);
290 dmar_pte_store(&irte->irte1, low);
291 }
292 dmar_qi_invalidate_iec(unit, idx, 1);
293 DMAR_UNLOCK(unit);
294
295 }
296
297 static int
298 dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
299 {
300 dmar_irte_t *irte;
301
302 KASSERT(unit != NULL && unit->ir_enabled,
303 ("unmap: cookie %d unit %p", cookie, unit));
304 KASSERT(cookie < unit->irte_cnt,
305 ("bad cookie %u %u", cookie, unit->irte_cnt));
306 irte = &(unit->irt[cookie]);
307 dmar_pte_clear(&irte->irte1);
308 dmar_pte_clear(&irte->irte2);
309 DMAR_LOCK(unit);
310 dmar_qi_invalidate_iec(unit, cookie, 1);
311 DMAR_UNLOCK(unit);
312 vmem_free(unit->irtids, cookie, 1);
313 return (0);
314 }
315
316 static u_int
317 clp2(u_int v)
318 {
319
320 return (powerof2(v) ? v : 1 << fls(v));
321 }
322
323 int
324 dmar_init_irt(struct dmar_unit *unit)
325 {
326
327 if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
328 return (0);
329 unit->ir_enabled = 1;
330 TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
331 if (!unit->ir_enabled)
332 return (0);
333 if (!unit->qi_enabled) {
334 unit->ir_enabled = 0;
335 if (bootverbose)
336 device_printf(unit->dev,
337 "QI disabled, disabling interrupt remapping\n");
338 return (0);
339 }
340 unit->irte_cnt = clp2(num_io_irqs);
341 unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(kernel_arena,
342 unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
343 dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
344 VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
345 if (unit->irt == NULL)
346 return (ENOMEM);
347 unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
348 unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
349 M_FIRSTFIT | M_NOWAIT);
350 DMAR_LOCK(unit);
351 dmar_load_irt_ptr(unit);
352 dmar_qi_invalidate_iec_glob(unit);
353 DMAR_UNLOCK(unit);
354
355 /*
356 * Initialize mappings for already configured interrupt pins.
357 * Required, because otherwise the interrupts fault without
358 * irtes.
359 */
360 intr_reprogram();
361
362 DMAR_LOCK(unit);
363 dmar_enable_ir(unit);
364 DMAR_UNLOCK(unit);
365 return (0);
366 }
367
368 void
369 dmar_fini_irt(struct dmar_unit *unit)
370 {
371
372 unit->ir_enabled = 0;
373 if (unit->irt != NULL) {
374 dmar_disable_ir(unit);
375 dmar_qi_invalidate_iec_glob(unit);
376 vmem_destroy(unit->irtids);
377 kmem_free(kernel_arena, (vm_offset_t)unit->irt,
378 unit->irte_cnt * sizeof(dmar_irte_t));
379 }
380 }
Cache object: a54b45ec3c7755887630fe6d3f3ae2cd
|