1 /*-
2 * Copyright (c) 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
6 * under sponsorship from the FreeBSD Foundation.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/11.0/sys/x86/iommu/intel_qi.c 296272 2016-03-01 17:47:32Z jhb $");
32
33 #include "opt_acpi.h"
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/memdesc.h>
40 #include <sys/module.h>
41 #include <sys/rman.h>
42 #include <sys/taskqueue.h>
43 #include <sys/tree.h>
44 #include <sys/vmem.h>
45 #include <machine/bus.h>
46 #include <contrib/dev/acpica/include/acpi.h>
47 #include <contrib/dev/acpica/include/accommon.h>
48 #include <dev/acpica/acpivar.h>
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_kern.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_map.h>
54 #include <machine/cpu.h>
55 #include <x86/include/busdma_impl.h>
56 #include <x86/iommu/intel_reg.h>
57 #include <x86/iommu/busdma_dmar.h>
58 #include <x86/iommu/intel_dmar.h>
59
60 static bool
61 dmar_qi_seq_processed(const struct dmar_unit *unit,
62 const struct dmar_qi_genseq *pseq)
63 {
64
65 return (pseq->gen < unit->inv_waitd_gen ||
66 (pseq->gen == unit->inv_waitd_gen &&
67 pseq->seq <= unit->inv_waitd_seq_hw));
68 }
69
70 static int
71 dmar_enable_qi(struct dmar_unit *unit)
72 {
73
74 DMAR_ASSERT_LOCKED(unit);
75 unit->hw_gcmd |= DMAR_GCMD_QIE;
76 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
77 /* XXXKIB should have a timeout */
78 while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) == 0)
79 cpu_spinwait();
80 return (0);
81 }
82
83 static int
84 dmar_disable_qi(struct dmar_unit *unit)
85 {
86
87 DMAR_ASSERT_LOCKED(unit);
88 unit->hw_gcmd &= ~DMAR_GCMD_QIE;
89 dmar_write4(unit, DMAR_GCMD_REG, unit->hw_gcmd);
90 /* XXXKIB should have a timeout */
91 while ((dmar_read4(unit, DMAR_GSTS_REG) & DMAR_GSTS_QIES) != 0)
92 cpu_spinwait();
93 return (0);
94 }
95
96 static void
97 dmar_qi_advance_tail(struct dmar_unit *unit)
98 {
99
100 DMAR_ASSERT_LOCKED(unit);
101 dmar_write4(unit, DMAR_IQT_REG, unit->inv_queue_tail);
102 }
103
104 static void
105 dmar_qi_ensure(struct dmar_unit *unit, int descr_count)
106 {
107 uint32_t head;
108 int bytes;
109
110 DMAR_ASSERT_LOCKED(unit);
111 bytes = descr_count << DMAR_IQ_DESCR_SZ_SHIFT;
112 for (;;) {
113 if (bytes <= unit->inv_queue_avail)
114 break;
115 /* refill */
116 head = dmar_read4(unit, DMAR_IQH_REG);
117 head &= DMAR_IQH_MASK;
118 unit->inv_queue_avail = head - unit->inv_queue_tail -
119 DMAR_IQ_DESCR_SZ;
120 if (head <= unit->inv_queue_tail)
121 unit->inv_queue_avail += unit->inv_queue_size;
122 if (bytes <= unit->inv_queue_avail)
123 break;
124
125 /*
126 * No space in the queue, do busy wait. Hardware must
127 * make a progress. But first advance the tail to
128 * inform the descriptor streamer about entries we
129 * might have already filled, otherwise they could
130 * clog the whole queue..
131 */
132 dmar_qi_advance_tail(unit);
133 unit->inv_queue_full++;
134 cpu_spinwait();
135 }
136 unit->inv_queue_avail -= bytes;
137 }
138
139 static void
140 dmar_qi_emit(struct dmar_unit *unit, uint64_t data1, uint64_t data2)
141 {
142
143 DMAR_ASSERT_LOCKED(unit);
144 *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data1;
145 unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
146 KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
147 ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
148 (uintmax_t)unit->inv_queue_size));
149 unit->inv_queue_tail &= unit->inv_queue_size - 1;
150 *(volatile uint64_t *)(unit->inv_queue + unit->inv_queue_tail) = data2;
151 unit->inv_queue_tail += DMAR_IQ_DESCR_SZ / 2;
152 KASSERT(unit->inv_queue_tail <= unit->inv_queue_size,
153 ("tail overflow 0x%x 0x%jx", unit->inv_queue_tail,
154 (uintmax_t)unit->inv_queue_size));
155 unit->inv_queue_tail &= unit->inv_queue_size - 1;
156 }
157
158 static void
159 dmar_qi_emit_wait_descr(struct dmar_unit *unit, uint32_t seq, bool intr,
160 bool memw, bool fence)
161 {
162
163 DMAR_ASSERT_LOCKED(unit);
164 dmar_qi_emit(unit, DMAR_IQ_DESCR_WAIT_ID |
165 (intr ? DMAR_IQ_DESCR_WAIT_IF : 0) |
166 (memw ? DMAR_IQ_DESCR_WAIT_SW : 0) |
167 (fence ? DMAR_IQ_DESCR_WAIT_FN : 0) |
168 (memw ? DMAR_IQ_DESCR_WAIT_SD(seq) : 0),
169 memw ? unit->inv_waitd_seq_hw_phys : 0);
170 }
171
172 static void
173 dmar_qi_emit_wait_seq(struct dmar_unit *unit, struct dmar_qi_genseq *pseq)
174 {
175 struct dmar_qi_genseq gsec;
176 uint32_t seq;
177
178 KASSERT(pseq != NULL, ("wait descriptor with no place for seq"));
179 DMAR_ASSERT_LOCKED(unit);
180 if (unit->inv_waitd_seq == 0xffffffff) {
181 gsec.gen = unit->inv_waitd_gen;
182 gsec.seq = unit->inv_waitd_seq;
183 dmar_qi_ensure(unit, 1);
184 dmar_qi_emit_wait_descr(unit, gsec.seq, false, true, false);
185 dmar_qi_advance_tail(unit);
186 while (!dmar_qi_seq_processed(unit, &gsec))
187 cpu_spinwait();
188 unit->inv_waitd_gen++;
189 unit->inv_waitd_seq = 1;
190 }
191 seq = unit->inv_waitd_seq++;
192 pseq->gen = unit->inv_waitd_gen;
193 pseq->seq = seq;
194 dmar_qi_emit_wait_descr(unit, seq, true, true, false);
195 }
196
197 static void
198 dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
199 bool nowait)
200 {
201
202 DMAR_ASSERT_LOCKED(unit);
203 unit->inv_seq_waiters++;
204 while (!dmar_qi_seq_processed(unit, gseq)) {
205 if (cold || nowait) {
206 cpu_spinwait();
207 } else {
208 msleep(&unit->inv_seq_waiters, &unit->lock, 0,
209 "dmarse", hz);
210 }
211 }
212 unit->inv_seq_waiters--;
213 }
214
215 void
216 dmar_qi_invalidate_locked(struct dmar_domain *domain, dmar_gaddr_t base,
217 dmar_gaddr_t size, struct dmar_qi_genseq *pseq)
218 {
219 struct dmar_unit *unit;
220 dmar_gaddr_t isize;
221 int am;
222
223 unit = domain->dmar;
224 DMAR_ASSERT_LOCKED(unit);
225 for (; size > 0; base += isize, size -= isize) {
226 am = calc_am(unit, base, size, &isize);
227 dmar_qi_ensure(unit, 1);
228 dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV |
229 DMAR_IQ_DESCR_IOTLB_PAGE | DMAR_IQ_DESCR_IOTLB_DW |
230 DMAR_IQ_DESCR_IOTLB_DR |
231 DMAR_IQ_DESCR_IOTLB_DID(domain->domain),
232 base | am);
233 }
234 if (pseq != NULL) {
235 dmar_qi_ensure(unit, 1);
236 dmar_qi_emit_wait_seq(unit, pseq);
237 }
238 dmar_qi_advance_tail(unit);
239 }
240
241 void
242 dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit)
243 {
244 struct dmar_qi_genseq gseq;
245
246 DMAR_ASSERT_LOCKED(unit);
247 dmar_qi_ensure(unit, 2);
248 dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0);
249 dmar_qi_emit_wait_seq(unit, &gseq);
250 dmar_qi_advance_tail(unit);
251 dmar_qi_wait_for_seq(unit, &gseq, false);
252 }
253
254 void
255 dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit)
256 {
257 struct dmar_qi_genseq gseq;
258
259 DMAR_ASSERT_LOCKED(unit);
260 dmar_qi_ensure(unit, 2);
261 dmar_qi_emit(unit, DMAR_IQ_DESCR_IOTLB_INV | DMAR_IQ_DESCR_IOTLB_GLOB |
262 DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0);
263 dmar_qi_emit_wait_seq(unit, &gseq);
264 dmar_qi_advance_tail(unit);
265 dmar_qi_wait_for_seq(unit, &gseq, false);
266 }
267
268 void
269 dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
270 {
271 struct dmar_qi_genseq gseq;
272
273 DMAR_ASSERT_LOCKED(unit);
274 dmar_qi_ensure(unit, 2);
275 dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0);
276 dmar_qi_emit_wait_seq(unit, &gseq);
277 dmar_qi_advance_tail(unit);
278 dmar_qi_wait_for_seq(unit, &gseq, false);
279 }
280
281 void
282 dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
283 {
284 struct dmar_qi_genseq gseq;
285 u_int c, l;
286
287 DMAR_ASSERT_LOCKED(unit);
288 KASSERT(start < unit->irte_cnt && start < start + cnt &&
289 start + cnt <= unit->irte_cnt,
290 ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
291 for (; cnt > 0; cnt -= c, start += c) {
292 l = ffs(start | cnt) - 1;
293 c = 1 << l;
294 dmar_qi_ensure(unit, 1);
295 dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
296 DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) |
297 DMAR_IQ_DESCR_IEC_IM(l), 0);
298 }
299 dmar_qi_ensure(unit, 1);
300 dmar_qi_emit_wait_seq(unit, &gseq);
301 dmar_qi_advance_tail(unit);
302
303 /*
304 * The caller of the function, in particular,
305 * dmar_ir_program_irte(), may be called from the context
306 * where the sleeping is forbidden (in fact, the
307 * intr_table_lock mutex may be held, locked from
308 * intr_shuffle_irqs()). Wait for the invalidation completion
309 * using the busy wait.
310 *
311 * The impact on the interrupt input setup code is small, the
312 * expected overhead is comparable with the chipset register
313 * read. It is more harmful for the parallel DMA operations,
314 * since we own the dmar unit lock until whole invalidation
315 * queue is processed, which includes requests possibly issued
316 * before our request.
317 */
318 dmar_qi_wait_for_seq(unit, &gseq, true);
319 }
320
321 int
322 dmar_qi_intr(void *arg)
323 {
324 struct dmar_unit *unit;
325
326 unit = arg;
327 KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit));
328 taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task);
329 return (FILTER_HANDLED);
330 }
331
332 static void
333 dmar_qi_task(void *arg, int pending __unused)
334 {
335 struct dmar_unit *unit;
336 struct dmar_map_entry *entry;
337 uint32_t ics;
338
339 unit = arg;
340
341 DMAR_LOCK(unit);
342 for (;;) {
343 entry = TAILQ_FIRST(&unit->tlb_flush_entries);
344 if (entry == NULL)
345 break;
346 if ((entry->gseq.gen == 0 && entry->gseq.seq == 0) ||
347 !dmar_qi_seq_processed(unit, &entry->gseq))
348 break;
349 TAILQ_REMOVE(&unit->tlb_flush_entries, entry, dmamap_link);
350 DMAR_UNLOCK(unit);
351 dmar_domain_free_entry(entry, (entry->flags &
352 DMAR_MAP_ENTRY_QI_NF) == 0);
353 DMAR_LOCK(unit);
354 }
355 ics = dmar_read4(unit, DMAR_ICS_REG);
356 if ((ics & DMAR_ICS_IWC) != 0) {
357 ics = DMAR_ICS_IWC;
358 dmar_write4(unit, DMAR_ICS_REG, ics);
359 }
360 if (unit->inv_seq_waiters > 0)
361 wakeup(&unit->inv_seq_waiters);
362 DMAR_UNLOCK(unit);
363 }
364
365 int
366 dmar_init_qi(struct dmar_unit *unit)
367 {
368 uint64_t iqa;
369 uint32_t ics;
370 int qi_sz;
371
372 if (!DMAR_HAS_QI(unit) || (unit->hw_cap & DMAR_CAP_CM) != 0)
373 return (0);
374 unit->qi_enabled = 1;
375 TUNABLE_INT_FETCH("hw.dmar.qi", &unit->qi_enabled);
376 if (!unit->qi_enabled)
377 return (0);
378
379 TAILQ_INIT(&unit->tlb_flush_entries);
380 TASK_INIT(&unit->qi_task, 0, dmar_qi_task, unit);
381 unit->qi_taskqueue = taskqueue_create_fast("dmar", M_WAITOK,
382 taskqueue_thread_enqueue, &unit->qi_taskqueue);
383 taskqueue_start_threads(&unit->qi_taskqueue, 1, PI_AV,
384 "dmar%d qi taskq", unit->unit);
385
386 unit->inv_waitd_gen = 0;
387 unit->inv_waitd_seq = 1;
388
389 qi_sz = DMAR_IQA_QS_DEF;
390 TUNABLE_INT_FETCH("hw.dmar.qi_size", &qi_sz);
391 if (qi_sz > DMAR_IQA_QS_MAX)
392 qi_sz = DMAR_IQA_QS_MAX;
393 unit->inv_queue_size = (1ULL << qi_sz) * PAGE_SIZE;
394 /* Reserve one descriptor to prevent wraparound. */
395 unit->inv_queue_avail = unit->inv_queue_size - DMAR_IQ_DESCR_SZ;
396
397 /* The invalidation queue reads by DMARs are always coherent. */
398 unit->inv_queue = kmem_alloc_contig(kernel_arena, unit->inv_queue_size,
399 M_WAITOK | M_ZERO, 0, dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
400 unit->inv_waitd_seq_hw_phys = pmap_kextract(
401 (vm_offset_t)&unit->inv_waitd_seq_hw);
402
403 DMAR_LOCK(unit);
404 dmar_write8(unit, DMAR_IQT_REG, 0);
405 iqa = pmap_kextract(unit->inv_queue);
406 iqa |= qi_sz;
407 dmar_write8(unit, DMAR_IQA_REG, iqa);
408 dmar_enable_qi(unit);
409 ics = dmar_read4(unit, DMAR_ICS_REG);
410 if ((ics & DMAR_ICS_IWC) != 0) {
411 ics = DMAR_ICS_IWC;
412 dmar_write4(unit, DMAR_ICS_REG, ics);
413 }
414 dmar_enable_qi_intr(unit);
415 DMAR_UNLOCK(unit);
416
417 return (0);
418 }
419
420 void
421 dmar_fini_qi(struct dmar_unit *unit)
422 {
423 struct dmar_qi_genseq gseq;
424
425 if (unit->qi_enabled)
426 return;
427 taskqueue_drain(unit->qi_taskqueue, &unit->qi_task);
428 taskqueue_free(unit->qi_taskqueue);
429 unit->qi_taskqueue = NULL;
430
431 DMAR_LOCK(unit);
432 /* quisce */
433 dmar_qi_ensure(unit, 1);
434 dmar_qi_emit_wait_seq(unit, &gseq);
435 dmar_qi_advance_tail(unit);
436 dmar_qi_wait_for_seq(unit, &gseq, false);
437 /* only after the quisce, disable queue */
438 dmar_disable_qi_intr(unit);
439 dmar_disable_qi(unit);
440 KASSERT(unit->inv_seq_waiters == 0,
441 ("dmar%d: waiters on disabled queue", unit->unit));
442 DMAR_UNLOCK(unit);
443
444 kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size);
445 unit->inv_queue = 0;
446 unit->inv_queue_size = 0;
447 unit->qi_enabled = 0;
448 }
449
450 void
451 dmar_enable_qi_intr(struct dmar_unit *unit)
452 {
453 uint32_t iectl;
454
455 DMAR_ASSERT_LOCKED(unit);
456 KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
457 iectl = dmar_read4(unit, DMAR_IECTL_REG);
458 iectl &= ~DMAR_IECTL_IM;
459 dmar_write4(unit, DMAR_IECTL_REG, iectl);
460 }
461
462 void
463 dmar_disable_qi_intr(struct dmar_unit *unit)
464 {
465 uint32_t iectl;
466
467 DMAR_ASSERT_LOCKED(unit);
468 KASSERT(DMAR_HAS_QI(unit), ("dmar%d: QI is not supported", unit->unit));
469 iectl = dmar_read4(unit, DMAR_IECTL_REG);
470 dmar_write4(unit, DMAR_IECTL_REG, iectl | DMAR_IECTL_IM);
471 }
Cache object: 3df9194543de7bbc0b71f0ccad1748ca
|