1 /*-
2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * VM Bus Driver Implementation
31 */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/linker.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/sbuf.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/taskqueue.h>
48
49 #include <vm/vm.h>
50 #include <vm/vm_param.h>
51 #include <vm/pmap.h>
52
53 #include <machine/bus.h>
54 #if defined(__aarch64__)
55 #include <dev/psci/smccc.h>
56 #include <dev/hyperv/vmbus/aarch64/hyperv_machdep.h>
57 #include <dev/hyperv/vmbus/aarch64/hyperv_reg.h>
58 #else
59 #include <dev/hyperv/vmbus/x86/hyperv_machdep.h>
60 #include <dev/hyperv/vmbus/x86/hyperv_reg.h>
61 #include <machine/intr_machdep.h>
62 #include <x86/include/apicvar.h>
63 #endif
64 #include <machine/metadata.h>
65 #include <machine/md_var.h>
66 #include <machine/resource.h>
67 #include <contrib/dev/acpica/include/acpi.h>
68 #include <dev/acpica/acpivar.h>
69
70 #include <dev/hyperv/include/hyperv.h>
71 #include <dev/hyperv/include/vmbus_xact.h>
72 #include <dev/hyperv/vmbus/hyperv_var.h>
73 #include <dev/hyperv/vmbus/vmbus_reg.h>
74 #include <dev/hyperv/vmbus/vmbus_var.h>
75 #include <dev/hyperv/vmbus/vmbus_chanvar.h>
76 #include <dev/hyperv/vmbus/hyperv_common_reg.h>
77 #include "acpi_if.h"
78 #include "pcib_if.h"
79 #include "vmbus_if.h"
80
81 #define VMBUS_GPADL_START 0xe1e10
82
83 struct vmbus_msghc {
84 struct vmbus_xact *mh_xact;
85 struct hypercall_postmsg_in mh_inprm_save;
86 };
87
88 static void vmbus_identify(driver_t *, device_t);
89 static int vmbus_probe(device_t);
90 static int vmbus_attach(device_t);
91 static int vmbus_detach(device_t);
92 static int vmbus_read_ivar(device_t, device_t, int,
93 uintptr_t *);
94 static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *);
95 static struct resource *vmbus_alloc_resource(device_t dev,
96 device_t child, int type, int *rid,
97 rman_res_t start, rman_res_t end,
98 rman_res_t count, u_int flags);
99 static int vmbus_alloc_msi(device_t bus, device_t dev,
100 int count, int maxcount, int *irqs);
101 static int vmbus_release_msi(device_t bus, device_t dev,
102 int count, int *irqs);
103 static int vmbus_alloc_msix(device_t bus, device_t dev,
104 int *irq);
105 static int vmbus_release_msix(device_t bus, device_t dev,
106 int irq);
107 static int vmbus_map_msi(device_t bus, device_t dev,
108 int irq, uint64_t *addr, uint32_t *data);
109 static uint32_t vmbus_get_version_method(device_t, device_t);
110 static int vmbus_probe_guid_method(device_t, device_t,
111 const struct hyperv_guid *);
112 static uint32_t vmbus_get_vcpu_id_method(device_t bus,
113 device_t dev, int cpu);
114 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t,
115 int);
116 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__)
117 static void vmbus_intrhook(void *);
118 #endif
119
120 static int vmbus_init(struct vmbus_softc *);
121 static int vmbus_connect(struct vmbus_softc *, uint32_t);
122 static int vmbus_req_channels(struct vmbus_softc *sc);
123 static void vmbus_disconnect(struct vmbus_softc *);
124 static int vmbus_scan(struct vmbus_softc *);
125 static void vmbus_scan_teardown(struct vmbus_softc *);
126 static void vmbus_scan_done(struct vmbus_softc *,
127 const struct vmbus_message *);
128 static void vmbus_chanmsg_handle(struct vmbus_softc *,
129 const struct vmbus_message *);
130 static void vmbus_msg_task(void *, int);
131 static void vmbus_synic_setup(void *);
132 static void vmbus_synic_teardown(void *);
133 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS);
134 static int vmbus_dma_alloc(struct vmbus_softc *);
135 static void vmbus_dma_free(struct vmbus_softc *);
136 static int vmbus_intr_setup(struct vmbus_softc *);
137 static void vmbus_intr_teardown(struct vmbus_softc *);
138 static int vmbus_doattach(struct vmbus_softc *);
139 static void vmbus_event_proc_dummy(struct vmbus_softc *,
140 int);
141 static struct vmbus_softc *vmbus_sc;
142
143 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
144 "Hyper-V vmbus");
145
146 static int vmbus_pin_evttask = 1;
147 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN,
148 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU");
149 uint32_t vmbus_current_version;
150
151 static const uint32_t vmbus_version[] = {
152 VMBUS_VERSION_WIN10,
153 VMBUS_VERSION_WIN8_1,
154 VMBUS_VERSION_WIN8,
155 VMBUS_VERSION_WIN7,
156 VMBUS_VERSION_WS2008
157 };
158
159 static const vmbus_chanmsg_proc_t
160 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = {
161 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done),
162 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP)
163 };
164
165 static device_method_t vmbus_methods[] = {
166 /* Device interface */
167 DEVMETHOD(device_identify, vmbus_identify),
168 DEVMETHOD(device_probe, vmbus_probe),
169 DEVMETHOD(device_attach, vmbus_attach),
170 DEVMETHOD(device_detach, vmbus_detach),
171 DEVMETHOD(device_shutdown, bus_generic_shutdown),
172 DEVMETHOD(device_suspend, bus_generic_suspend),
173 DEVMETHOD(device_resume, bus_generic_resume),
174
175 /* Bus interface */
176 DEVMETHOD(bus_add_child, bus_generic_add_child),
177 DEVMETHOD(bus_print_child, bus_generic_print_child),
178 DEVMETHOD(bus_read_ivar, vmbus_read_ivar),
179 DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo),
180 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource),
181 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
182 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
183 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
184 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
185 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
186 #if __FreeBSD_version >= 1100000
187 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus),
188 #endif
189
190 /* pcib interface */
191 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi),
192 DEVMETHOD(pcib_release_msi, vmbus_release_msi),
193 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix),
194 DEVMETHOD(pcib_release_msix, vmbus_release_msix),
195 DEVMETHOD(pcib_map_msi, vmbus_map_msi),
196
197 /* Vmbus interface */
198 DEVMETHOD(vmbus_get_version, vmbus_get_version_method),
199 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method),
200 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method),
201 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method),
202
203 DEVMETHOD_END
204 };
205
206 static driver_t vmbus_driver = {
207 "vmbus",
208 vmbus_methods,
209 sizeof(struct vmbus_softc)
210 };
211
212 DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL);
213 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL);
214
215 MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
216 MODULE_DEPEND(vmbus, pci, 1, 1, 1);
217 MODULE_VERSION(vmbus, 1);
218
219 static __inline struct vmbus_softc *
220 vmbus_get_softc(void)
221 {
222 return vmbus_sc;
223 }
224
225 void
226 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
227 {
228 struct hypercall_postmsg_in *inprm;
229
230 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
231 panic("invalid data size %zu", dsize);
232
233 inprm = vmbus_xact_req_data(mh->mh_xact);
234 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE);
235 inprm->hc_connid = VMBUS_CONNID_MESSAGE;
236 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL;
237 inprm->hc_dsize = dsize;
238 }
239
240 struct vmbus_msghc *
241 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize)
242 {
243 struct vmbus_msghc *mh;
244 struct vmbus_xact *xact;
245
246 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
247 panic("invalid data size %zu", dsize);
248
249 xact = vmbus_xact_get(sc->vmbus_xc,
250 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
251 if (xact == NULL)
252 return (NULL);
253
254 mh = vmbus_xact_priv(xact, sizeof(*mh));
255 mh->mh_xact = xact;
256
257 vmbus_msghc_reset(mh, dsize);
258 return (mh);
259 }
260
261 void
262 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
263 {
264
265 vmbus_xact_put(mh->mh_xact);
266 }
267
268 void *
269 vmbus_msghc_dataptr(struct vmbus_msghc *mh)
270 {
271 struct hypercall_postmsg_in *inprm;
272
273 inprm = vmbus_xact_req_data(mh->mh_xact);
274 return (inprm->hc_data);
275 }
276
277 int
278 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
279 {
280 sbintime_t time = SBT_1MS;
281 struct hypercall_postmsg_in *inprm;
282 bus_addr_t inprm_paddr;
283 int i;
284
285 inprm = vmbus_xact_req_data(mh->mh_xact);
286 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact);
287
288 /*
289 * Save the input parameter so that we could restore the input
290 * parameter if the Hypercall failed.
291 *
292 * XXX
293 * Is this really necessary?! i.e. Will the Hypercall ever
294 * overwrite the input parameter?
295 */
296 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE);
297
298 /*
299 * In order to cope with transient failures, e.g. insufficient
300 * resources on host side, we retry the post message Hypercall
301 * several times. 20 retries seem sufficient.
302 */
303 #define HC_RETRY_MAX 20
304
305 for (i = 0; i < HC_RETRY_MAX; ++i) {
306 uint64_t status;
307
308 status = hypercall_post_message(inprm_paddr);
309 if (status == HYPERCALL_STATUS_SUCCESS)
310 return 0;
311
312 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK);
313 if (time < SBT_1S * 2)
314 time *= 2;
315
316 /* Restore input parameter and try again */
317 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE);
318 }
319
320 #undef HC_RETRY_MAX
321
322 return EIO;
323 }
324
325 int
326 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
327 {
328 int error;
329
330 vmbus_xact_activate(mh->mh_xact);
331 error = vmbus_msghc_exec_noresult(mh);
332 if (error)
333 vmbus_xact_deactivate(mh->mh_xact);
334 return error;
335 }
336
337 void
338 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
339 {
340
341 vmbus_xact_deactivate(mh->mh_xact);
342 }
343
344 const struct vmbus_message *
345 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
346 {
347 size_t resp_len;
348
349 return (vmbus_xact_wait(mh->mh_xact, &resp_len));
350 }
351
352 const struct vmbus_message *
353 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
354 {
355 size_t resp_len;
356
357 return (vmbus_xact_poll(mh->mh_xact, &resp_len));
358 }
359
360 void
361 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg)
362 {
363
364 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg));
365 }
366
367 uint32_t
368 vmbus_gpadl_alloc(struct vmbus_softc *sc)
369 {
370 uint32_t gpadl;
371
372 again:
373 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1);
374 if (gpadl == 0)
375 goto again;
376 return (gpadl);
377 }
378
379 /* Used for Hyper-V socket when guest client connects to host */
380 int
381 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id,
382 struct hyperv_guid *host_srv_id)
383 {
384 struct vmbus_softc *sc = vmbus_get_softc();
385 struct vmbus_chanmsg_tl_connect *req;
386 struct vmbus_msghc *mh;
387 int error;
388
389 if (!sc)
390 return ENXIO;
391
392 mh = vmbus_msghc_get(sc, sizeof(*req));
393 if (mh == NULL) {
394 device_printf(sc->vmbus_dev,
395 "can not get msg hypercall for tl connect\n");
396 return ENXIO;
397 }
398
399 req = vmbus_msghc_dataptr(mh);
400 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN;
401 req->guest_endpoint_id = *guest_srv_id;
402 req->host_service_id = *host_srv_id;
403
404 error = vmbus_msghc_exec_noresult(mh);
405 vmbus_msghc_put(sc, mh);
406
407 if (error) {
408 device_printf(sc->vmbus_dev,
409 "tl connect msg hypercall failed\n");
410 }
411
412 return error;
413 }
414
415 static int
416 vmbus_connect(struct vmbus_softc *sc, uint32_t version)
417 {
418 struct vmbus_chanmsg_connect *req;
419 const struct vmbus_message *msg;
420 struct vmbus_msghc *mh;
421 int error, done = 0;
422
423 mh = vmbus_msghc_get(sc, sizeof(*req));
424 if (mh == NULL)
425 return ENXIO;
426
427 req = vmbus_msghc_dataptr(mh);
428 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT;
429 req->chm_ver = version;
430 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr;
431 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr;
432 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr;
433
434 error = vmbus_msghc_exec(sc, mh);
435 if (error) {
436 vmbus_msghc_put(sc, mh);
437 return error;
438 }
439
440 msg = vmbus_msghc_wait_result(sc, mh);
441 done = ((const struct vmbus_chanmsg_connect_resp *)
442 msg->msg_data)->chm_done;
443
444 vmbus_msghc_put(sc, mh);
445
446 return (done ? 0 : EOPNOTSUPP);
447 }
448
449 static int
450 vmbus_init(struct vmbus_softc *sc)
451 {
452 int i;
453
454 for (i = 0; i < nitems(vmbus_version); ++i) {
455 int error;
456
457 error = vmbus_connect(sc, vmbus_version[i]);
458 if (!error) {
459 vmbus_current_version = vmbus_version[i];
460 sc->vmbus_version = vmbus_version[i];
461 device_printf(sc->vmbus_dev, "version %u.%u\n",
462 VMBUS_VERSION_MAJOR(sc->vmbus_version),
463 VMBUS_VERSION_MINOR(sc->vmbus_version));
464 return 0;
465 }
466 }
467 return ENXIO;
468 }
469
470 static void
471 vmbus_disconnect(struct vmbus_softc *sc)
472 {
473 struct vmbus_chanmsg_disconnect *req;
474 struct vmbus_msghc *mh;
475 int error;
476
477 mh = vmbus_msghc_get(sc, sizeof(*req));
478 if (mh == NULL) {
479 device_printf(sc->vmbus_dev,
480 "can not get msg hypercall for disconnect\n");
481 return;
482 }
483
484 req = vmbus_msghc_dataptr(mh);
485 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT;
486
487 error = vmbus_msghc_exec_noresult(mh);
488 vmbus_msghc_put(sc, mh);
489
490 if (error) {
491 device_printf(sc->vmbus_dev,
492 "disconnect msg hypercall failed\n");
493 }
494 }
495
496 static int
497 vmbus_req_channels(struct vmbus_softc *sc)
498 {
499 struct vmbus_chanmsg_chrequest *req;
500 struct vmbus_msghc *mh;
501 int error;
502
503 mh = vmbus_msghc_get(sc, sizeof(*req));
504 if (mh == NULL)
505 return ENXIO;
506
507 req = vmbus_msghc_dataptr(mh);
508 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST;
509
510 error = vmbus_msghc_exec_noresult(mh);
511 vmbus_msghc_put(sc, mh);
512
513 return error;
514 }
515
516 static void
517 vmbus_scan_done_task(void *xsc, int pending __unused)
518 {
519 struct vmbus_softc *sc = xsc;
520
521 bus_topo_lock();
522 sc->vmbus_scandone = true;
523 bus_topo_unlock();
524 wakeup(&sc->vmbus_scandone);
525 }
526
527 static void
528 vmbus_scan_done(struct vmbus_softc *sc,
529 const struct vmbus_message *msg __unused)
530 {
531
532 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task);
533 }
534
535 static int
536 vmbus_scan(struct vmbus_softc *sc)
537 {
538 int error;
539
540 /*
541 * Identify, probe and attach for non-channel devices.
542 */
543 bus_generic_probe(sc->vmbus_dev);
544 bus_generic_attach(sc->vmbus_dev);
545
546 /*
547 * This taskqueue serializes vmbus devices' attach and detach
548 * for channel offer and rescind messages.
549 */
550 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK,
551 taskqueue_thread_enqueue, &sc->vmbus_devtq);
552 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev");
553 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc);
554
555 /*
556 * This taskqueue handles sub-channel detach, so that vmbus
557 * device's detach running in vmbus_devtq can drain its sub-
558 * channels.
559 */
560 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK,
561 taskqueue_thread_enqueue, &sc->vmbus_subchtq);
562 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch");
563
564 /*
565 * Start vmbus scanning.
566 */
567 error = vmbus_req_channels(sc);
568 if (error) {
569 device_printf(sc->vmbus_dev, "channel request failed: %d\n",
570 error);
571 return (error);
572 }
573
574 /*
575 * Wait for all vmbus devices from the initial channel offers to be
576 * attached.
577 */
578 bus_topo_assert();
579 while (!sc->vmbus_scandone)
580 mtx_sleep(&sc->vmbus_scandone, bus_topo_mtx(), 0, "vmbusdev", 0);
581
582 if (bootverbose) {
583 device_printf(sc->vmbus_dev, "device scan, probe and attach "
584 "done\n");
585 }
586 return (0);
587 }
588
589 static void
590 vmbus_scan_teardown(struct vmbus_softc *sc)
591 {
592
593 bus_topo_assert();
594 if (sc->vmbus_devtq != NULL) {
595 bus_topo_unlock();
596 taskqueue_free(sc->vmbus_devtq);
597 bus_topo_lock();
598 sc->vmbus_devtq = NULL;
599 }
600 if (sc->vmbus_subchtq != NULL) {
601 bus_topo_unlock();
602 taskqueue_free(sc->vmbus_subchtq);
603 bus_topo_lock();
604 sc->vmbus_subchtq = NULL;
605 }
606 }
607
608 static void
609 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg)
610 {
611 vmbus_chanmsg_proc_t msg_proc;
612 uint32_t msg_type;
613
614 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
615 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) {
616 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n",
617 msg_type);
618 return;
619 }
620
621 msg_proc = vmbus_chanmsg_handlers[msg_type];
622 if (msg_proc != NULL)
623 msg_proc(sc, msg);
624
625 /* Channel specific processing */
626 vmbus_chan_msgproc(sc, msg);
627 }
628
629 static void
630 vmbus_msg_task(void *xsc, int pending __unused)
631 {
632 struct vmbus_softc *sc = xsc;
633 volatile struct vmbus_message *msg;
634
635 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE;
636 for (;;) {
637 if (msg->msg_type == HYPERV_MSGTYPE_NONE) {
638 /* No message */
639 break;
640 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) {
641 /* Channel message */
642 vmbus_chanmsg_handle(sc,
643 __DEVOLATILE(const struct vmbus_message *, msg));
644 }
645
646 msg->msg_type = HYPERV_MSGTYPE_NONE;
647 /*
648 * Make sure the write to msg_type (i.e. set to
649 * HYPERV_MSGTYPE_NONE) happens before we read the
650 * msg_flags and EOMing. Otherwise, the EOMing will
651 * not deliver any more messages since there is no
652 * empty slot
653 *
654 * NOTE:
655 * mb() is used here, since atomic_thread_fence_seq_cst()
656 * will become compiler fence on UP kernel.
657 */
658 mb();
659 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
660 /*
661 * This will cause message queue rescan to possibly
662 * deliver another msg from the hypervisor
663 */
664 WRMSR(MSR_HV_EOM, 0);
665 }
666 }
667 }
668 static __inline int
669 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu)
670 {
671 volatile struct vmbus_message *msg;
672 struct vmbus_message *msg_base;
673
674 msg_base = VMBUS_PCPU_GET(sc, message, cpu);
675
676 /*
677 * Check event timer.
678 *
679 * TODO: move this to independent IDT vector.
680 */
681 vmbus_handle_timer_intr1(msg_base, frame);
682 /*
683 * Check events. Hot path for network and storage I/O data; high rate.
684 *
685 * NOTE:
686 * As recommended by the Windows guest fellows, we check events before
687 * checking messages.
688 */
689 sc->vmbus_event_proc(sc, cpu);
690
691 /*
692 * Check messages. Mainly management stuffs; ultra low rate.
693 */
694 msg = msg_base + VMBUS_SINT_MESSAGE;
695 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) {
696 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu),
697 VMBUS_PCPU_PTR(sc, message_task, cpu));
698 }
699
700 return (FILTER_HANDLED);
701 }
702
703 void
704 vmbus_handle_intr(struct trapframe *trap_frame)
705 {
706 struct vmbus_softc *sc = vmbus_get_softc();
707 int cpu = curcpu;
708
709 /*
710 * Disable preemption.
711 */
712 critical_enter();
713
714 /*
715 * Do a little interrupt counting. This used x86 specific
716 * intrcnt_add function
717 */
718 #if !defined(__aarch64__)
719 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++;
720 #endif /* not for aarch64 */
721 vmbus_handle_intr1(sc, trap_frame, cpu);
722
723 /*
724 * Enable preemption.
725 */
726 critical_exit();
727 }
728
729 static void
730 vmbus_synic_setup(void *xsc)
731 {
732 struct vmbus_softc *sc = xsc;
733 int cpu = curcpu;
734 uint64_t val, orig;
735 uint32_t sint;
736
737 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) {
738 /* Save virtual processor id. */
739 VMBUS_PCPU_GET(sc, vcpuid, cpu) = RDMSR(MSR_HV_VP_INDEX);
740 } else {
741 /* Set virtual processor id to 0 for compatibility. */
742 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0;
743 }
744
745 /*
746 * Setup the SynIC message.
747 */
748 orig = RDMSR(MSR_HV_SIMP);
749 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) |
750 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT)
751 << MSR_HV_SIMP_PGSHIFT);
752 WRMSR(MSR_HV_SIMP, val);
753 /*
754 * Setup the SynIC event flags.
755 */
756 orig = RDMSR(MSR_HV_SIEFP);
757 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) |
758 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) >> PAGE_SHIFT)
759 << MSR_HV_SIEFP_PGSHIFT);
760 WRMSR(MSR_HV_SIEFP, val);
761
762 /*
763 * Configure and unmask SINT for message and event flags.
764 */
765 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
766 orig = RDMSR(sint);
767 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
768 (orig & MSR_HV_SINT_RSVD_MASK);
769 WRMSR(sint, val);
770
771 /*
772 * Configure and unmask SINT for timer.
773 */
774 vmbus_synic_setup1(sc);
775 /*
776 * All done; enable SynIC.
777 */
778 orig = RDMSR(MSR_HV_SCONTROL);
779 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK);
780 WRMSR(MSR_HV_SCONTROL, val);
781 }
782
783 static void
784 vmbus_synic_teardown(void *arg)
785 {
786 uint64_t orig;
787 uint32_t sint;
788
789 /*
790 * Disable SynIC.
791 */
792 orig = RDMSR(MSR_HV_SCONTROL);
793 WRMSR(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK));
794
795 /*
796 * Mask message and event flags SINT.
797 */
798 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
799 orig = RDMSR(sint);
800 WRMSR(sint, orig | MSR_HV_SINT_MASKED);
801
802 /*
803 * Mask timer SINT.
804 */
805 vmbus_synic_teardown1();
806 /*
807 * Teardown SynIC message.
808 */
809 orig = RDMSR(MSR_HV_SIMP);
810 WRMSR(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK));
811
812 /*
813 * Teardown SynIC event flags.
814 */
815 orig = RDMSR(MSR_HV_SIEFP);
816 WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK));
817 }
818
819 static int
820 vmbus_dma_alloc(struct vmbus_softc *sc)
821 {
822 bus_dma_tag_t parent_dtag;
823 uint8_t *evtflags;
824 int cpu;
825
826 parent_dtag = bus_get_dma_tag(sc->vmbus_dev);
827 CPU_FOREACH(cpu) {
828 void *ptr;
829
830 /*
831 * Per-cpu messages and event flags.
832 */
833 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
834 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu),
835 BUS_DMA_WAITOK | BUS_DMA_ZERO);
836 if (ptr == NULL)
837 return ENOMEM;
838 VMBUS_PCPU_GET(sc, message, cpu) = ptr;
839
840 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
841 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
842 BUS_DMA_WAITOK | BUS_DMA_ZERO);
843 if (ptr == NULL)
844 return ENOMEM;
845 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr;
846 }
847
848 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
849 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
850 if (evtflags == NULL)
851 return ENOMEM;
852 sc->vmbus_rx_evtflags = (u_long *)evtflags;
853 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2));
854 sc->vmbus_evtflags = evtflags;
855
856 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
857 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
858 if (sc->vmbus_mnf1 == NULL)
859 return ENOMEM;
860
861 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
862 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma,
863 BUS_DMA_WAITOK | BUS_DMA_ZERO);
864 if (sc->vmbus_mnf2 == NULL)
865 return ENOMEM;
866
867 return 0;
868 }
869
870 static void
871 vmbus_dma_free(struct vmbus_softc *sc)
872 {
873 int cpu;
874
875 if (sc->vmbus_evtflags != NULL) {
876 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags);
877 sc->vmbus_evtflags = NULL;
878 sc->vmbus_rx_evtflags = NULL;
879 sc->vmbus_tx_evtflags = NULL;
880 }
881 if (sc->vmbus_mnf1 != NULL) {
882 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1);
883 sc->vmbus_mnf1 = NULL;
884 }
885 if (sc->vmbus_mnf2 != NULL) {
886 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2);
887 sc->vmbus_mnf2 = NULL;
888 }
889
890 CPU_FOREACH(cpu) {
891 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) {
892 hyperv_dmamem_free(
893 VMBUS_PCPU_PTR(sc, message_dma, cpu),
894 VMBUS_PCPU_GET(sc, message, cpu));
895 VMBUS_PCPU_GET(sc, message, cpu) = NULL;
896 }
897 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) {
898 hyperv_dmamem_free(
899 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
900 VMBUS_PCPU_GET(sc, event_flags, cpu));
901 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL;
902 }
903 }
904 }
905
906 static int
907 vmbus_intr_setup(struct vmbus_softc *sc)
908 {
909 int cpu;
910
911 CPU_FOREACH(cpu) {
912 char buf[MAXCOMLEN + 1];
913 cpuset_t cpu_mask;
914
915 /* Allocate an interrupt counter for Hyper-V interrupt */
916 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu);
917 #if !defined(__aarch64__)
918 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu));
919 #endif /* not for aarch64 */
920 /*
921 * Setup taskqueue to handle events. Task will be per-
922 * channel.
923 */
924 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast(
925 "hyperv event", M_WAITOK, taskqueue_thread_enqueue,
926 VMBUS_PCPU_PTR(sc, event_tq, cpu));
927 if (vmbus_pin_evttask) {
928 CPU_SETOF(cpu, &cpu_mask);
929 taskqueue_start_threads_cpuset(
930 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
931 &cpu_mask, "hvevent%d", cpu);
932 } else {
933 taskqueue_start_threads(
934 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
935 "hvevent%d", cpu);
936 }
937
938 /*
939 * Setup tasks and taskqueues to handle messages.
940 */
941 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast(
942 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue,
943 VMBUS_PCPU_PTR(sc, message_tq, cpu));
944 CPU_SETOF(cpu, &cpu_mask);
945 taskqueue_start_threads_cpuset(
946 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask,
947 "hvmsg%d", cpu);
948 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0,
949 vmbus_msg_task, sc);
950 }
951 return (vmbus_setup_intr1(sc));
952 }
953 static void
954 vmbus_intr_teardown(struct vmbus_softc *sc)
955 {
956 vmbus_intr_teardown1(sc);
957 }
958
959 static int
960 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
961 {
962 return (ENOENT);
963 }
964
965 static int
966 vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb)
967 {
968 const struct vmbus_channel *chan;
969 char guidbuf[HYPERV_GUID_STRLEN];
970
971 chan = vmbus_get_channel(child);
972 if (chan == NULL) {
973 /* Event timer device, which does not belong to a channel */
974 return (0);
975 }
976
977 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf));
978 sbuf_printf(sb, "classid=%s", guidbuf);
979
980 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf));
981 sbuf_printf(sb, " deviceid=%s", guidbuf);
982
983 return (0);
984 }
985
986 int
987 vmbus_add_child(struct vmbus_channel *chan)
988 {
989 struct vmbus_softc *sc = chan->ch_vmbus;
990 device_t parent = sc->vmbus_dev;
991
992 bus_topo_lock();
993 chan->ch_dev = device_add_child(parent, NULL, -1);
994 if (chan->ch_dev == NULL) {
995 bus_topo_unlock();
996 device_printf(parent, "device_add_child for chan%u failed\n",
997 chan->ch_id);
998 return (ENXIO);
999 }
1000 device_set_ivars(chan->ch_dev, chan);
1001 device_probe_and_attach(chan->ch_dev);
1002 bus_topo_unlock();
1003
1004 return (0);
1005 }
1006
1007 int
1008 vmbus_delete_child(struct vmbus_channel *chan)
1009 {
1010 int error = 0;
1011
1012 bus_topo_lock();
1013 if (chan->ch_dev != NULL) {
1014 error = device_delete_child(chan->ch_vmbus->vmbus_dev,
1015 chan->ch_dev);
1016 chan->ch_dev = NULL;
1017 }
1018 bus_topo_unlock();
1019 return (error);
1020 }
1021
1022 static int
1023 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS)
1024 {
1025 struct vmbus_softc *sc = arg1;
1026 char verstr[16];
1027
1028 snprintf(verstr, sizeof(verstr), "%u.%u",
1029 VMBUS_VERSION_MAJOR(sc->vmbus_version),
1030 VMBUS_VERSION_MINOR(sc->vmbus_version));
1031 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req);
1032 }
1033
1034 /*
1035 * We need the function to make sure the MMIO resource is allocated from the
1036 * ranges found in _CRS.
1037 *
1038 * For the release function, we can use bus_generic_release_resource().
1039 */
1040 static struct resource *
1041 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid,
1042 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1043 {
1044 device_t parent = device_get_parent(dev);
1045 struct resource *res;
1046
1047 #ifdef NEW_PCIB
1048 if (type == SYS_RES_MEMORY) {
1049 struct vmbus_softc *sc = device_get_softc(dev);
1050
1051 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type,
1052 rid, start, end, count, flags);
1053 } else
1054 #endif
1055 {
1056 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start,
1057 end, count, flags);
1058 }
1059
1060 return (res);
1061 }
1062
1063 static int
1064 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs)
1065 {
1066
1067 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
1068 irqs));
1069 }
1070
1071 static int
1072 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs)
1073 {
1074
1075 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
1076 }
1077
1078 static int
1079 vmbus_alloc_msix(device_t bus, device_t dev, int *irq)
1080 {
1081
1082 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
1083 }
1084
1085 static int
1086 vmbus_release_msix(device_t bus, device_t dev, int irq)
1087 {
1088
1089 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
1090 }
1091
1092 static int
1093 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr,
1094 uint32_t *data)
1095 {
1096
1097 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data));
1098 }
1099
1100 static uint32_t
1101 vmbus_get_version_method(device_t bus, device_t dev)
1102 {
1103 struct vmbus_softc *sc = device_get_softc(bus);
1104
1105 return sc->vmbus_version;
1106 }
1107
1108 static int
1109 vmbus_probe_guid_method(device_t bus, device_t dev,
1110 const struct hyperv_guid *guid)
1111 {
1112 const struct vmbus_channel *chan = vmbus_get_channel(dev);
1113
1114 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0)
1115 return 0;
1116 return ENXIO;
1117 }
1118
1119 static uint32_t
1120 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu)
1121 {
1122 const struct vmbus_softc *sc = device_get_softc(bus);
1123
1124 return (VMBUS_PCPU_GET(sc, vcpuid, cpu));
1125 }
1126
1127 static struct taskqueue *
1128 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu)
1129 {
1130 const struct vmbus_softc *sc = device_get_softc(bus);
1131
1132 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu));
1133 return (VMBUS_PCPU_GET(sc, event_tq, cpu));
1134 }
1135
1136 #ifdef NEW_PCIB
1137 #define VTPM_BASE_ADDR 0xfed40000
1138 #define FOUR_GB (1ULL << 32)
1139
1140 enum parse_pass { parse_64, parse_32 };
1141
1142 struct parse_context {
1143 device_t vmbus_dev;
1144 enum parse_pass pass;
1145 };
1146
1147 static ACPI_STATUS
1148 parse_crs(ACPI_RESOURCE *res, void *ctx)
1149 {
1150 const struct parse_context *pc = ctx;
1151 device_t vmbus_dev = pc->vmbus_dev;
1152
1153 struct vmbus_softc *sc = device_get_softc(vmbus_dev);
1154 UINT64 start, end;
1155
1156 switch (res->Type) {
1157 case ACPI_RESOURCE_TYPE_ADDRESS32:
1158 start = res->Data.Address32.Address.Minimum;
1159 end = res->Data.Address32.Address.Maximum;
1160 break;
1161
1162 case ACPI_RESOURCE_TYPE_ADDRESS64:
1163 start = res->Data.Address64.Address.Minimum;
1164 end = res->Data.Address64.Address.Maximum;
1165 break;
1166
1167 default:
1168 /* Unused types. */
1169 return (AE_OK);
1170 }
1171
1172 /*
1173 * We don't use <1MB addresses.
1174 */
1175 if (end < 0x100000)
1176 return (AE_OK);
1177
1178 /* Don't conflict with vTPM. */
1179 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR)
1180 end = VTPM_BASE_ADDR - 1;
1181
1182 if ((pc->pass == parse_32 && start < FOUR_GB) ||
1183 (pc->pass == parse_64 && start >= FOUR_GB))
1184 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY,
1185 start, end, 0);
1186
1187 return (AE_OK);
1188 }
1189
1190 static void
1191 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass)
1192 {
1193 struct parse_context pc;
1194 ACPI_STATUS status;
1195
1196 if (bootverbose)
1197 device_printf(dev, "walking _CRS, pass=%d\n", pass);
1198
1199 pc.vmbus_dev = vmbus_dev;
1200 pc.pass = pass;
1201 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
1202 parse_crs, &pc);
1203
1204 if (bootverbose && ACPI_FAILURE(status))
1205 device_printf(dev, "_CRS: not found, pass=%d\n", pass);
1206 }
1207
1208 static void
1209 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass)
1210 {
1211 device_t acpi0, parent;
1212
1213 parent = device_get_parent(dev);
1214
1215 acpi0 = device_get_parent(parent);
1216 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) {
1217 device_t *children;
1218 int count;
1219
1220 /*
1221 * Try to locate VMBUS resources and find _CRS on them.
1222 */
1223 if (device_get_children(acpi0, &children, &count) == 0) {
1224 int i;
1225
1226 for (i = 0; i < count; ++i) {
1227 if (!device_is_attached(children[i]))
1228 continue;
1229
1230 if (strcmp("vmbus_res",
1231 device_get_name(children[i])) == 0)
1232 vmbus_get_crs(children[i], dev, pass);
1233 }
1234 free(children, M_TEMP);
1235 }
1236
1237 /*
1238 * Try to find _CRS on acpi.
1239 */
1240 vmbus_get_crs(acpi0, dev, pass);
1241 } else {
1242 device_printf(dev, "not grandchild of acpi\n");
1243 }
1244
1245 /*
1246 * Try to find _CRS on parent.
1247 */
1248 vmbus_get_crs(parent, dev, pass);
1249 }
1250
1251 static void
1252 vmbus_get_mmio_res(device_t dev)
1253 {
1254 struct vmbus_softc *sc = device_get_softc(dev);
1255 /*
1256 * We walk the resources twice to make sure that: in the resource
1257 * list, the 32-bit resources appear behind the 64-bit resources.
1258 * NB: resource_list_add() uses INSERT_TAIL. This way, when we
1259 * iterate through the list to find a range for a 64-bit BAR in
1260 * vmbus_alloc_resource(), we can make sure we try to use >4GB
1261 * ranges first.
1262 */
1263 pcib_host_res_init(dev, &sc->vmbus_mmio_res);
1264
1265 vmbus_get_mmio_res_pass(dev, parse_64);
1266 vmbus_get_mmio_res_pass(dev, parse_32);
1267 }
1268
1269 /*
1270 * On Gen2 VMs, Hyper-V provides mmio space for framebuffer.
1271 * This mmio address range is not useable for other PCI devices.
1272 * Currently only efifb and vbefb drivers are using this range without
1273 * reserving it from system.
1274 * Therefore, vmbus driver reserves it before any other PCI device
1275 * drivers start to request mmio addresses.
1276 */
1277 static struct resource *hv_fb_res;
1278
1279 static void
1280 vmbus_fb_mmio_res(device_t dev)
1281 {
1282 struct efi_fb *efifb;
1283 #if !defined(__aarch64__)
1284 struct vbe_fb *vbefb;
1285 #endif /* aarch64 */
1286 rman_res_t fb_start, fb_end, fb_count;
1287 int fb_height, fb_width;
1288 caddr_t kmdp;
1289
1290 struct vmbus_softc *sc = device_get_softc(dev);
1291 int rid = 0;
1292
1293 kmdp = preload_search_by_type("elf kernel");
1294 if (kmdp == NULL)
1295 kmdp = preload_search_by_type("elf64 kernel");
1296 efifb = (struct efi_fb *)preload_search_info(kmdp,
1297 MODINFO_METADATA | MODINFOMD_EFI_FB);
1298 #if !defined(__aarch64__)
1299 vbefb = (struct vbe_fb *)preload_search_info(kmdp,
1300 MODINFO_METADATA | MODINFOMD_VBE_FB);
1301 #endif /* aarch64 */
1302 if (efifb != NULL) {
1303 fb_start = efifb->fb_addr;
1304 fb_end = efifb->fb_addr + efifb->fb_size;
1305 fb_count = efifb->fb_size;
1306 fb_height = efifb->fb_height;
1307 fb_width = efifb->fb_width;
1308 }
1309 #if !defined(__aarch64__)
1310 else if (vbefb != NULL) {
1311 fb_start = vbefb->fb_addr;
1312 fb_end = vbefb->fb_addr + vbefb->fb_size;
1313 fb_count = vbefb->fb_size;
1314 fb_height = vbefb->fb_height;
1315 fb_width = vbefb->fb_width;
1316 }
1317 #endif /* aarch64 */
1318 else {
1319 if (bootverbose)
1320 device_printf(dev,
1321 "no preloaded kernel fb information\n");
1322 /* We are on Gen1 VM, just return. */
1323 return;
1324 }
1325
1326 if (bootverbose)
1327 device_printf(dev,
1328 "fb: fb_addr: %#jx, size: %#jx, "
1329 "actual size needed: 0x%x\n",
1330 fb_start, fb_count, fb_height * fb_width);
1331
1332 hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev,
1333 SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count,
1334 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE));
1335
1336 if (hv_fb_res && bootverbose)
1337 device_printf(dev,
1338 "successfully reserved memory for framebuffer "
1339 "starting at %#jx, size %#jx\n",
1340 fb_start, fb_count);
1341 }
1342
1343 static void
1344 vmbus_free_mmio_res(device_t dev)
1345 {
1346 struct vmbus_softc *sc = device_get_softc(dev);
1347
1348 pcib_host_res_free(dev, &sc->vmbus_mmio_res);
1349
1350 if (hv_fb_res)
1351 hv_fb_res = NULL;
1352 }
1353 #endif /* NEW_PCIB */
1354
1355 static void
1356 vmbus_identify(driver_t *driver, device_t parent)
1357 {
1358
1359 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV ||
1360 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1361 return;
1362 device_add_child(parent, "vmbus", -1);
1363 }
1364
1365 static int
1366 vmbus_probe(device_t dev)
1367 {
1368
1369 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV ||
1370 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1371 return (ENXIO);
1372
1373 device_set_desc(dev, "Hyper-V Vmbus");
1374 return (BUS_PROBE_DEFAULT);
1375 }
1376
1377 /**
1378 * @brief Main vmbus driver initialization routine.
1379 *
1380 * Here, we
1381 * - initialize the vmbus driver context
1382 * - setup various driver entry points
1383 * - invoke the vmbus hv main init routine
1384 * - get the irq resource
1385 * - invoke the vmbus to add the vmbus root device
1386 * - setup the vmbus root device
1387 * - retrieve the channel offers
1388 */
1389 static int
1390 vmbus_doattach(struct vmbus_softc *sc)
1391 {
1392 struct sysctl_oid_list *child;
1393 struct sysctl_ctx_list *ctx;
1394 int ret;
1395
1396 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED)
1397 return (0);
1398
1399 #ifdef NEW_PCIB
1400 vmbus_get_mmio_res(sc->vmbus_dev);
1401 vmbus_fb_mmio_res(sc->vmbus_dev);
1402 #endif
1403
1404 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED;
1405
1406 sc->vmbus_gpadl = VMBUS_GPADL_START;
1407 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF);
1408 TAILQ_INIT(&sc->vmbus_prichans);
1409 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF);
1410 TAILQ_INIT(&sc->vmbus_chans);
1411 sc->vmbus_chmap = malloc(
1412 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF,
1413 M_WAITOK | M_ZERO);
1414
1415 /*
1416 * Create context for "post message" Hypercalls
1417 */
1418 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev),
1419 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE,
1420 sizeof(struct vmbus_msghc));
1421 if (sc->vmbus_xc == NULL) {
1422 ret = ENXIO;
1423 goto cleanup;
1424 }
1425
1426 /*
1427 * Allocate DMA stuffs.
1428 */
1429 ret = vmbus_dma_alloc(sc);
1430 if (ret != 0)
1431 goto cleanup;
1432
1433 /*
1434 * Setup interrupt.
1435 */
1436 ret = vmbus_intr_setup(sc);
1437 if (ret != 0)
1438 goto cleanup;
1439
1440 /*
1441 * Setup SynIC.
1442 */
1443 if (bootverbose)
1444 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started);
1445 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc);
1446 sc->vmbus_flags |= VMBUS_FLAG_SYNIC;
1447
1448 /*
1449 * Initialize vmbus, e.g. connect to Hypervisor.
1450 */
1451 ret = vmbus_init(sc);
1452 if (ret != 0)
1453 goto cleanup;
1454
1455 if (sc->vmbus_version == VMBUS_VERSION_WS2008 ||
1456 sc->vmbus_version == VMBUS_VERSION_WIN7)
1457 sc->vmbus_event_proc = vmbus_event_proc_compat;
1458 else
1459 sc->vmbus_event_proc = vmbus_event_proc;
1460
1461 ret = vmbus_scan(sc);
1462 if (ret != 0)
1463 goto cleanup;
1464
1465 ctx = device_get_sysctl_ctx(sc->vmbus_dev);
1466 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev));
1467 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version",
1468 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1469 vmbus_sysctl_version, "A", "vmbus version");
1470
1471 return (ret);
1472
1473 cleanup:
1474 vmbus_scan_teardown(sc);
1475 vmbus_intr_teardown(sc);
1476 vmbus_dma_free(sc);
1477 if (sc->vmbus_xc != NULL) {
1478 vmbus_xact_ctx_destroy(sc->vmbus_xc);
1479 sc->vmbus_xc = NULL;
1480 }
1481 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1482 mtx_destroy(&sc->vmbus_prichan_lock);
1483 mtx_destroy(&sc->vmbus_chan_lock);
1484
1485 return (ret);
1486 }
1487
1488 static void
1489 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused)
1490 {
1491 }
1492
1493 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__)
1494
1495 static void
1496 vmbus_intrhook(void *xsc)
1497 {
1498 struct vmbus_softc *sc = xsc;
1499
1500 if (bootverbose)
1501 device_printf(sc->vmbus_dev, "intrhook\n");
1502 vmbus_doattach(sc);
1503 config_intrhook_disestablish(&sc->vmbus_intrhook);
1504 }
1505
1506 #endif /* EARLY_AP_STARTUP aarch64 */
1507
1508 static int
1509 vmbus_attach(device_t dev)
1510 {
1511 vmbus_sc = device_get_softc(dev);
1512 vmbus_sc->vmbus_dev = dev;
1513 vmbus_sc->vmbus_idtvec = -1;
1514
1515 /*
1516 * Event processing logic will be configured:
1517 * - After the vmbus protocol version negotiation.
1518 * - Before we request channel offers.
1519 */
1520 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy;
1521
1522 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__)
1523 /*
1524 * Defer the real attach until the pause(9) works as expected.
1525 */
1526 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook;
1527 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc;
1528 config_intrhook_establish(&vmbus_sc->vmbus_intrhook);
1529 #else /* !EARLY_AP_STARTUP */
1530 /*
1531 * If the system has already booted and thread
1532 * scheduling is possible indicated by the global
1533 * cold set to zero, we just call the driver
1534 * initialization directly.
1535 */
1536 if (!cold)
1537 vmbus_doattach(vmbus_sc);
1538 #endif /* EARLY_AP_STARTUP and aarch64 */
1539
1540 return (0);
1541 }
1542
1543 static int
1544 vmbus_detach(device_t dev)
1545 {
1546 struct vmbus_softc *sc = device_get_softc(dev);
1547
1548 bus_generic_detach(dev);
1549 vmbus_chan_destroy_all(sc);
1550
1551 vmbus_scan_teardown(sc);
1552
1553 vmbus_disconnect(sc);
1554
1555 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) {
1556 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC;
1557 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL);
1558 }
1559
1560 vmbus_intr_teardown(sc);
1561 vmbus_dma_free(sc);
1562
1563 if (sc->vmbus_xc != NULL) {
1564 vmbus_xact_ctx_destroy(sc->vmbus_xc);
1565 sc->vmbus_xc = NULL;
1566 }
1567
1568 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1569 mtx_destroy(&sc->vmbus_prichan_lock);
1570 mtx_destroy(&sc->vmbus_chan_lock);
1571
1572 #ifdef NEW_PCIB
1573 vmbus_free_mmio_res(dev);
1574 #endif
1575
1576 #if defined(__aarch64__)
1577 bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector,
1578 sc->ires);
1579 #endif
1580 return (0);
1581 }
1582
1583 #if !defined(EARLY_AP_STARTUP) && !defined(__aarch64__)
1584
1585 static void
1586 vmbus_sysinit(void *arg __unused)
1587 {
1588 struct vmbus_softc *sc = vmbus_get_softc();
1589
1590 if (vm_guest != VM_GUEST_HV || sc == NULL)
1591 return;
1592
1593 /*
1594 * If the system has already booted and thread
1595 * scheduling is possible, as indicated by the
1596 * global cold set to zero, we just call the driver
1597 * initialization directly.
1598 */
1599 if (!cold)
1600 vmbus_doattach(sc);
1601 }
1602 /*
1603 * NOTE:
1604 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is
1605 * initialized.
1606 */
1607 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL);
1608 #endif /* !EARLY_AP_STARTUP */
Cache object: b4f688f92fc5ceee157a0a617cede288
|