1 /*-
2 * Copyright (c) 2018 VMware, Inc.
3 *
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 */
6
7 /* VMCI QueuePair API implementation. */
8
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11
12 #include "vmci.h"
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_queue_pair.h"
18
19 #define LGPFX "vmci_queue_pair: "
20
21 struct queue_pair_entry {
22 vmci_list_item(queue_pair_entry) list_item;
23 struct vmci_handle handle;
24 vmci_id peer;
25 uint32_t flags;
26 uint64_t produce_size;
27 uint64_t consume_size;
28 uint32_t ref_count;
29 };
30
31 struct qp_guest_endpoint {
32 struct queue_pair_entry qp;
33 uint64_t num_ppns;
34 void *produce_q;
35 void *consume_q;
36 bool hibernate_failure;
37 struct ppn_set ppn_set;
38 };
39
40 struct queue_pair_list {
41 vmci_list(queue_pair_entry) head;
42 volatile int hibernate;
43 vmci_mutex mutex;
44 };
45
46 #define QPE_NUM_PAGES(_QPE) \
47 ((uint32_t)(CEILING(_QPE.produce_size, PAGE_SIZE) + \
48 CEILING(_QPE.consume_size, PAGE_SIZE) + 2))
49
50 static struct queue_pair_list qp_guest_endpoints;
51
52 static struct queue_pair_entry *queue_pair_list_find_entry(
53 struct queue_pair_list *qp_list, struct vmci_handle handle);
54 static void queue_pair_list_add_entry(struct queue_pair_list *qp_list,
55 struct queue_pair_entry *entry);
56 static void queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
57 struct queue_pair_entry *entry);
58 static struct queue_pair_entry *queue_pair_list_get_head(
59 struct queue_pair_list *qp_list);
60 static int queue_pair_notify_peer_local(bool attach,
61 struct vmci_handle handle);
62 static struct qp_guest_endpoint *qp_guest_endpoint_create(
63 struct vmci_handle handle, vmci_id peer, uint32_t flags,
64 uint64_t produce_size, uint64_t consume_size,
65 void *produce_q, void *consume_q);
66 static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry);
67 static int vmci_queue_pair_alloc_hypercall(
68 const struct qp_guest_endpoint *entry);
69 static int vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
70 struct vmci_queue **produce_q, uint64_t produce_size,
71 struct vmci_queue **consume_q, uint64_t consume_size,
72 vmci_id peer, uint32_t flags,
73 vmci_privilege_flags priv_flags);
74 static int vmci_queue_pair_detach_guest_work(struct vmci_handle handle);
75 static int vmci_queue_pair_detach_hypercall(struct vmci_handle handle);
76
77 /*
78 *------------------------------------------------------------------------------
79 *
80 * vmci_queue_pair_alloc --
81 *
82 * Allocates a VMCI QueuePair. Only checks validity of input arguments. The
83 * real work is done in the host or guest specific function.
84 *
85 * Results:
86 * VMCI_SUCCESS on success, appropriate error code otherwise.
87 *
88 * Side effects:
89 * None.
90 *
91 *------------------------------------------------------------------------------
92 */
93
94 int
95 vmci_queue_pair_alloc(struct vmci_handle *handle, struct vmci_queue **produce_q,
96 uint64_t produce_size, struct vmci_queue **consume_q, uint64_t consume_size,
97 vmci_id peer, uint32_t flags, vmci_privilege_flags priv_flags)
98 {
99
100 if (!handle || !produce_q || !consume_q ||
101 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
102 return (VMCI_ERROR_INVALID_ARGS);
103
104 return (vmci_queue_pair_alloc_guest_work(handle, produce_q,
105 produce_size, consume_q, consume_size, peer, flags, priv_flags));
106 }
107
108 /*
109 *------------------------------------------------------------------------------
110 *
111 * vmci_queue_pair_detach --
112 *
113 * Detaches from a VMCI QueuePair. Only checks validity of input argument.
114 * Real work is done in the host or guest specific function.
115 *
116 * Results:
117 * Success or failure.
118 *
119 * Side effects:
120 * Memory is freed.
121 *
122 *------------------------------------------------------------------------------
123 */
124
125 int
126 vmci_queue_pair_detach(struct vmci_handle handle)
127 {
128
129 if (VMCI_HANDLE_INVALID(handle))
130 return (VMCI_ERROR_INVALID_ARGS);
131
132 return (vmci_queue_pair_detach_guest_work(handle));
133 }
134
135 /*
136 *------------------------------------------------------------------------------
137 *
138 * queue_pair_list_init --
139 *
140 * Initializes the list of QueuePairs.
141 *
142 * Results:
143 * Success or failure.
144 *
145 * Side effects:
146 * None.
147 *
148 *------------------------------------------------------------------------------
149 */
150
151 static inline int
152 queue_pair_list_init(struct queue_pair_list *qp_list)
153 {
154 int ret;
155
156 vmci_list_init(&qp_list->head);
157 atomic_store_int(&qp_list->hibernate, 0);
158 ret = vmci_mutex_init(&qp_list->mutex, "VMCI QP List lock");
159 return (ret);
160 }
161
162 /*
163 *------------------------------------------------------------------------------
164 *
165 * queue_pair_list_destroy --
166 *
167 * Destroy the list's mutex.
168 *
169 * Results:
170 * None.
171 *
172 * Side effects:
173 * None.
174 *
175 *------------------------------------------------------------------------------
176 */
177
178 static inline void
179 queue_pair_list_destroy(struct queue_pair_list *qp_list)
180 {
181
182 vmci_mutex_destroy(&qp_list->mutex);
183 vmci_list_init(&qp_list->head);
184 }
185
186 /*
187 *------------------------------------------------------------------------------
188 *
189 * queue_pair_list_find_entry --
190 *
191 * Finds the entry in the list corresponding to a given handle. Assumes that
192 * the list is locked.
193 *
194 * Results:
195 * Pointer to entry.
196 *
197 * Side effects:
198 * None.
199 *
200 *------------------------------------------------------------------------------
201 */
202
203 static struct queue_pair_entry *
204 queue_pair_list_find_entry(struct queue_pair_list *qp_list,
205 struct vmci_handle handle)
206 {
207 struct queue_pair_entry *next;
208
209 if (VMCI_HANDLE_INVALID(handle))
210 return (NULL);
211
212 vmci_list_scan(next, &qp_list->head, list_item) {
213 if (VMCI_HANDLE_EQUAL(next->handle, handle))
214 return (next);
215 }
216
217 return (NULL);
218 }
219
220 /*
221 *------------------------------------------------------------------------------
222 *
223 * queue_pair_list_add_entry --
224 *
225 * Adds the given entry to the list. Assumes that the list is locked.
226 *
227 * Results:
228 * None.
229 *
230 * Side effects:
231 * None.
232 *
233 *------------------------------------------------------------------------------
234 */
235
236 static void
237 queue_pair_list_add_entry(struct queue_pair_list *qp_list,
238 struct queue_pair_entry *entry)
239 {
240
241 if (entry)
242 vmci_list_insert(&qp_list->head, entry, list_item);
243 }
244
245 /*
246 *------------------------------------------------------------------------------
247 *
248 * queue_pair_list_remove_entry --
249 *
250 * Removes the given entry from the list. Assumes that the list is locked.
251 *
252 * Results:
253 * None.
254 *
255 * Side effects:
256 * None.
257 *
258 *------------------------------------------------------------------------------
259 */
260
261 static void
262 queue_pair_list_remove_entry(struct queue_pair_list *qp_list,
263 struct queue_pair_entry *entry)
264 {
265
266 if (entry)
267 vmci_list_remove(entry, list_item);
268 }
269
270 /*
271 *------------------------------------------------------------------------------
272 *
273 * queue_pair_list_get_head --
274 *
275 * Returns the entry from the head of the list. Assumes that the list is
276 * locked.
277 *
278 * Results:
279 * Pointer to entry.
280 *
281 * Side effects:
282 * None.
283 *
284 *------------------------------------------------------------------------------
285 */
286
287 static struct queue_pair_entry *
288 queue_pair_list_get_head(struct queue_pair_list *qp_list)
289 {
290
291 return (vmci_list_first(&qp_list->head));
292 }
293
294 /*
295 *------------------------------------------------------------------------------
296 *
297 * vmci_qp_guest_endpoints_init --
298 *
299 * Initalizes data structure state keeping track of queue pair guest
300 * endpoints.
301 *
302 * Results:
303 * VMCI_SUCCESS on success and appropriate failure code otherwise.
304 *
305 * Side effects:
306 * None.
307 *
308 *------------------------------------------------------------------------------
309 */
310
311 int
312 vmci_qp_guest_endpoints_init(void)
313 {
314
315 return (queue_pair_list_init(&qp_guest_endpoints));
316 }
317
318 /*
319 *------------------------------------------------------------------------------
320 *
321 * vmci_qp_guest_endpoints_exit --
322 *
323 * Destroys all guest queue pair endpoints. If active guest queue pairs
324 * still exist, hypercalls to attempt detach from these queue pairs will be
325 * made. Any failure to detach is silently ignored.
326 *
327 * Results:
328 * None.
329 *
330 * Side effects:
331 * None.
332 *
333 *------------------------------------------------------------------------------
334 */
335
336 void
337 vmci_qp_guest_endpoints_exit(void)
338 {
339 struct qp_guest_endpoint *entry;
340
341 if (!vmci_mutex_initialized(&qp_guest_endpoints.mutex))
342 return;
343
344 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
345
346 while ((entry =
347 (struct qp_guest_endpoint *)queue_pair_list_get_head(
348 &qp_guest_endpoints)) != NULL) {
349 /*
350 * Don't make a hypercall for local QueuePairs.
351 */
352 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL))
353 vmci_queue_pair_detach_hypercall(entry->qp.handle);
354 /*
355 * We cannot fail the exit, so let's reset ref_count.
356 */
357 entry->qp.ref_count = 0;
358 queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
359 qp_guest_endpoint_destroy(entry);
360 }
361
362 atomic_store_int(&qp_guest_endpoints.hibernate, 0);
363 vmci_mutex_release(&qp_guest_endpoints.mutex);
364 queue_pair_list_destroy(&qp_guest_endpoints);
365 }
366
367 /*
368 *------------------------------------------------------------------------------
369 *
370 * vmci_qp_guest_endpoints_sync --
371 *
372 * Use this as a synchronization point when setting globals, for example,
373 * during device shutdown.
374 *
375 * Results:
376 * true.
377 *
378 * Side effects:
379 * None.
380 *
381 *------------------------------------------------------------------------------
382 */
383
384 void
385 vmci_qp_guest_endpoints_sync(void)
386 {
387
388 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
389 vmci_mutex_release(&qp_guest_endpoints.mutex);
390 }
391
392 /*
393 *------------------------------------------------------------------------------
394 *
395 * qp_guest_endpoint_create --
396 *
397 * Allocates and initializes a qp_guest_endpoint structure. Allocates a
398 * QueuePair rid (and handle) iff the given entry has an invalid handle.
399 * 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved handles. Assumes
400 * that the QP list mutex is held by the caller.
401 *
402 * Results:
403 * Pointer to structure intialized.
404 *
405 * Side effects:
406 * None.
407 *
408 *------------------------------------------------------------------------------
409 */
410
411 struct qp_guest_endpoint *
412 qp_guest_endpoint_create(struct vmci_handle handle, vmci_id peer,
413 uint32_t flags, uint64_t produce_size, uint64_t consume_size,
414 void *produce_q, void *consume_q)
415 {
416 struct qp_guest_endpoint *entry;
417 static vmci_id queue_pair_rid;
418 const uint64_t num_ppns = CEILING(produce_size, PAGE_SIZE) +
419 CEILING(consume_size, PAGE_SIZE) +
420 2; /* One page each for the queue headers. */
421
422 queue_pair_rid = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
423
424 ASSERT((produce_size || consume_size) && produce_q && consume_q);
425
426 if (VMCI_HANDLE_INVALID(handle)) {
427 vmci_id context_id = vmci_get_context_id();
428 vmci_id old_rid = queue_pair_rid;
429
430 /*
431 * Generate a unique QueuePair rid. Keep on trying until we
432 * wrap around in the RID space.
433 */
434 ASSERT(old_rid > VMCI_RESERVED_RESOURCE_ID_MAX);
435 do {
436 handle = VMCI_MAKE_HANDLE(context_id, queue_pair_rid);
437 entry =
438 (struct qp_guest_endpoint *)
439 queue_pair_list_find_entry(&qp_guest_endpoints,
440 handle);
441 queue_pair_rid++;
442 if (UNLIKELY(!queue_pair_rid)) {
443 /*
444 * Skip the reserved rids.
445 */
446 queue_pair_rid =
447 VMCI_RESERVED_RESOURCE_ID_MAX + 1;
448 }
449 } while (entry && queue_pair_rid != old_rid);
450
451 if (UNLIKELY(entry != NULL)) {
452 ASSERT(queue_pair_rid == old_rid);
453 /*
454 * We wrapped around --- no rids were free.
455 */
456 return (NULL);
457 }
458 }
459
460 ASSERT(!VMCI_HANDLE_INVALID(handle) &&
461 queue_pair_list_find_entry(&qp_guest_endpoints, handle) == NULL);
462 entry = vmci_alloc_kernel_mem(sizeof(*entry), VMCI_MEMORY_NORMAL);
463 if (entry) {
464 entry->qp.handle = handle;
465 entry->qp.peer = peer;
466 entry->qp.flags = flags;
467 entry->qp.produce_size = produce_size;
468 entry->qp.consume_size = consume_size;
469 entry->qp.ref_count = 0;
470 entry->num_ppns = num_ppns;
471 memset(&entry->ppn_set, 0, sizeof(entry->ppn_set));
472 entry->produce_q = produce_q;
473 entry->consume_q = consume_q;
474 }
475 return (entry);
476 }
477
478 /*
479 *------------------------------------------------------------------------------
480 *
481 * qp_guest_endpoint_destroy --
482 *
483 * Frees a qp_guest_endpoint structure.
484 *
485 * Results:
486 * None.
487 *
488 * Side effects:
489 * None.
490 *
491 *------------------------------------------------------------------------------
492 */
493
494 void
495 qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
496 {
497
498 ASSERT(entry);
499 ASSERT(entry->qp.ref_count == 0);
500
501 vmci_free_ppn_set(&entry->ppn_set);
502 vmci_free_queue(entry->produce_q, entry->qp.produce_size);
503 vmci_free_queue(entry->consume_q, entry->qp.consume_size);
504 vmci_free_kernel_mem(entry, sizeof(*entry));
505 }
506
507 /*
508 *------------------------------------------------------------------------------
509 *
510 * vmci_queue_pair_alloc_hypercall --
511 *
512 * Helper to make a QueuePairAlloc hypercall when the driver is
513 * supporting a guest device.
514 *
515 * Results:
516 * Result of the hypercall.
517 *
518 * Side effects:
519 * Memory is allocated & freed.
520 *
521 *------------------------------------------------------------------------------
522 */
523 static int
524 vmci_queue_pair_alloc_hypercall(const struct qp_guest_endpoint *entry)
525 {
526 struct vmci_queue_pair_alloc_msg *alloc_msg;
527 size_t msg_size;
528 int result;
529
530 if (!entry || entry->num_ppns <= 2)
531 return (VMCI_ERROR_INVALID_ARGS);
532
533 ASSERT(!(entry->qp.flags & VMCI_QPFLAG_LOCAL));
534
535 msg_size = sizeof(*alloc_msg) + (size_t)entry->num_ppns * sizeof(PPN);
536 alloc_msg = vmci_alloc_kernel_mem(msg_size, VMCI_MEMORY_NORMAL);
537 if (!alloc_msg)
538 return (VMCI_ERROR_NO_MEM);
539
540 alloc_msg->hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
541 VMCI_QUEUEPAIR_ALLOC);
542 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
543 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
544 alloc_msg->handle = entry->qp.handle;
545 alloc_msg->peer = entry->qp.peer;
546 alloc_msg->flags = entry->qp.flags;
547 alloc_msg->produce_size = entry->qp.produce_size;
548 alloc_msg->consume_size = entry->qp.consume_size;
549 alloc_msg->num_ppns = entry->num_ppns;
550 result = vmci_populate_ppn_list((uint8_t *)alloc_msg +
551 sizeof(*alloc_msg), &entry->ppn_set);
552 if (result == VMCI_SUCCESS)
553 result = vmci_send_datagram((struct vmci_datagram *)alloc_msg);
554 vmci_free_kernel_mem(alloc_msg, msg_size);
555
556 return (result);
557 }
558
559 /*
560 *------------------------------------------------------------------------------
561 *
562 * vmci_queue_pair_alloc_guest_work --
563 *
564 * This functions handles the actual allocation of a VMCI queue pair guest
565 * endpoint. Allocates physical pages for the queue pair. It makes OS
566 * dependent calls through generic wrappers.
567 *
568 * Results:
569 * Success or failure.
570 *
571 * Side effects:
572 * Memory is allocated.
573 *
574 *------------------------------------------------------------------------------
575 */
576
577 static int
578 vmci_queue_pair_alloc_guest_work(struct vmci_handle *handle,
579 struct vmci_queue **produce_q, uint64_t produce_size,
580 struct vmci_queue **consume_q, uint64_t consume_size, vmci_id peer,
581 uint32_t flags, vmci_privilege_flags priv_flags)
582 {
583 struct qp_guest_endpoint *queue_pair_entry = NULL;
584 void *my_consume_q = NULL;
585 void *my_produce_q = NULL;
586 const uint64_t num_consume_pages = CEILING(consume_size, PAGE_SIZE) + 1;
587 const uint64_t num_produce_pages = CEILING(produce_size, PAGE_SIZE) + 1;
588 int result;
589
590 ASSERT(handle && produce_q && consume_q &&
591 (produce_size || consume_size));
592
593 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
594 return (VMCI_ERROR_NO_ACCESS);
595
596 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
597
598 if ((atomic_load_int(&qp_guest_endpoints.hibernate) == 1) &&
599 !(flags & VMCI_QPFLAG_LOCAL)) {
600 /*
601 * While guest OS is in hibernate state, creating non-local
602 * queue pairs is not allowed after the point where the VMCI
603 * guest driver converted the existing queue pairs to local
604 * ones.
605 */
606
607 result = VMCI_ERROR_UNAVAILABLE;
608 goto error;
609 }
610
611 if ((queue_pair_entry =
612 (struct qp_guest_endpoint *)queue_pair_list_find_entry(
613 &qp_guest_endpoints, *handle)) != NULL) {
614 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
615 /* Local attach case. */
616 if (queue_pair_entry->qp.ref_count > 1) {
617 VMCI_LOG_DEBUG(LGPFX"Error attempting to "
618 "attach more than once.\n");
619 result = VMCI_ERROR_UNAVAILABLE;
620 goto error_keep_entry;
621 }
622
623 if (queue_pair_entry->qp.produce_size != consume_size ||
624 queue_pair_entry->qp.consume_size != produce_size ||
625 queue_pair_entry->qp.flags !=
626 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
627 VMCI_LOG_DEBUG(LGPFX"Error mismatched "
628 "queue pair in local attach.\n");
629 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
630 goto error_keep_entry;
631 }
632
633 /*
634 * Do a local attach. We swap the consume and produce
635 * queues for the attacher and deliver an attach event.
636 */
637 result = queue_pair_notify_peer_local(true, *handle);
638 if (result < VMCI_SUCCESS)
639 goto error_keep_entry;
640 my_produce_q = queue_pair_entry->consume_q;
641 my_consume_q = queue_pair_entry->produce_q;
642 goto out;
643 }
644 result = VMCI_ERROR_ALREADY_EXISTS;
645 goto error_keep_entry;
646 }
647
648 my_produce_q = vmci_alloc_queue(produce_size, flags);
649 if (!my_produce_q) {
650 VMCI_LOG_WARNING(LGPFX"Error allocating pages for produce "
651 "queue.\n");
652 result = VMCI_ERROR_NO_MEM;
653 goto error;
654 }
655
656 my_consume_q = vmci_alloc_queue(consume_size, flags);
657 if (!my_consume_q) {
658 VMCI_LOG_WARNING(LGPFX"Error allocating pages for consume "
659 "queue.\n");
660 result = VMCI_ERROR_NO_MEM;
661 goto error;
662 }
663
664 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
665 produce_size, consume_size, my_produce_q, my_consume_q);
666 if (!queue_pair_entry) {
667 VMCI_LOG_WARNING(LGPFX"Error allocating memory in %s.\n",
668 __FUNCTION__);
669 result = VMCI_ERROR_NO_MEM;
670 goto error;
671 }
672
673 result = vmci_alloc_ppn_set(my_produce_q, num_produce_pages,
674 my_consume_q, num_consume_pages, &queue_pair_entry->ppn_set);
675 if (result < VMCI_SUCCESS) {
676 VMCI_LOG_WARNING(LGPFX"vmci_alloc_ppn_set failed.\n");
677 goto error;
678 }
679
680 /*
681 * It's only necessary to notify the host if this queue pair will be
682 * attached to from another context.
683 */
684 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
685 /* Local create case. */
686 vmci_id context_id = vmci_get_context_id();
687
688 /*
689 * Enforce similar checks on local queue pairs as we do for
690 * regular ones. The handle's context must match the creator
691 * or attacher context id (here they are both the current
692 * context id) and the attach-only flag cannot exist during
693 * create. We also ensure specified peer is this context or
694 * an invalid one.
695 */
696 if (queue_pair_entry->qp.handle.context != context_id ||
697 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
698 queue_pair_entry->qp.peer != context_id)) {
699 result = VMCI_ERROR_NO_ACCESS;
700 goto error;
701 }
702
703 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
704 result = VMCI_ERROR_NOT_FOUND;
705 goto error;
706 }
707 } else {
708 result = vmci_queue_pair_alloc_hypercall(queue_pair_entry);
709 if (result < VMCI_SUCCESS) {
710 VMCI_LOG_WARNING(
711 LGPFX"vmci_queue_pair_alloc_hypercall result = "
712 "%d.\n", result);
713 goto error;
714 }
715 }
716
717 queue_pair_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
718
719 out:
720 queue_pair_entry->qp.ref_count++;
721 *handle = queue_pair_entry->qp.handle;
722 *produce_q = (struct vmci_queue *)my_produce_q;
723 *consume_q = (struct vmci_queue *)my_consume_q;
724
725 /*
726 * We should initialize the queue pair header pages on a local queue
727 * pair create. For non-local queue pairs, the hypervisor initializes
728 * the header pages in the create step.
729 */
730 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
731 queue_pair_entry->qp.ref_count == 1) {
732 vmci_queue_header_init((*produce_q)->q_header, *handle);
733 vmci_queue_header_init((*consume_q)->q_header, *handle);
734 }
735
736 vmci_mutex_release(&qp_guest_endpoints.mutex);
737
738 return (VMCI_SUCCESS);
739
740 error:
741 vmci_mutex_release(&qp_guest_endpoints.mutex);
742 if (queue_pair_entry) {
743 /* The queues will be freed inside the destroy routine. */
744 qp_guest_endpoint_destroy(queue_pair_entry);
745 } else {
746 if (my_produce_q)
747 vmci_free_queue(my_produce_q, produce_size);
748 if (my_consume_q)
749 vmci_free_queue(my_consume_q, consume_size);
750 }
751 return (result);
752
753 error_keep_entry:
754 /* This path should only be used when an existing entry was found. */
755 ASSERT(queue_pair_entry->qp.ref_count > 0);
756 vmci_mutex_release(&qp_guest_endpoints.mutex);
757 return (result);
758 }
759
760 /*
761 *------------------------------------------------------------------------------
762 *
763 * vmci_queue_pair_detach_hypercall --
764 *
765 * Helper to make a QueuePairDetach hypercall when the driver is supporting
766 * a guest device.
767 *
768 * Results:
769 * Result of the hypercall.
770 *
771 * Side effects:
772 * None.
773 *
774 *------------------------------------------------------------------------------
775 */
776
777 int
778 vmci_queue_pair_detach_hypercall(struct vmci_handle handle)
779 {
780 struct vmci_queue_pair_detach_msg detach_msg;
781
782 detach_msg.hdr.dst = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
783 VMCI_QUEUEPAIR_DETACH);
784 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
785 detach_msg.hdr.payload_size = sizeof(handle);
786 detach_msg.handle = handle;
787
788 return (vmci_send_datagram((struct vmci_datagram *)&detach_msg));
789 }
790
791 /*
792 *------------------------------------------------------------------------------
793 *
794 * vmci_queue_pair_detach_guest_work --
795 *
796 * Helper for VMCI QueuePair detach interface. Frees the physical pages for
797 * the queue pair.
798 *
799 * Results:
800 * Success or failure.
801 *
802 * Side effects:
803 * Memory may be freed.
804 *
805 *------------------------------------------------------------------------------
806 */
807
808 static int
809 vmci_queue_pair_detach_guest_work(struct vmci_handle handle)
810 {
811 struct qp_guest_endpoint *entry;
812 int result;
813 uint32_t ref_count;
814
815 ASSERT(!VMCI_HANDLE_INVALID(handle));
816
817 vmci_mutex_acquire(&qp_guest_endpoints.mutex);
818
819 entry = (struct qp_guest_endpoint *)queue_pair_list_find_entry(
820 &qp_guest_endpoints, handle);
821 if (!entry) {
822 vmci_mutex_release(&qp_guest_endpoints.mutex);
823 return (VMCI_ERROR_NOT_FOUND);
824 }
825
826 ASSERT(entry->qp.ref_count >= 1);
827
828 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
829 result = VMCI_SUCCESS;
830
831 if (entry->qp.ref_count > 1) {
832 result = queue_pair_notify_peer_local(false, handle);
833
834 /*
835 * We can fail to notify a local queuepair because we
836 * can't allocate. We still want to release the entry
837 * if that happens, so don't bail out yet.
838 */
839 }
840 } else {
841 result = vmci_queue_pair_detach_hypercall(handle);
842 if (entry->hibernate_failure) {
843 if (result == VMCI_ERROR_NOT_FOUND) {
844 /*
845 * If a queue pair detach failed when entering
846 * hibernation, the guest driver and the device
847 * may disagree on its existence when coming
848 * out of hibernation. The guest driver will
849 * regard it as a non-local queue pair, but
850 * the device state is gone, since the device
851 * has been powered off. In this case, we
852 * treat the queue pair as a local queue pair
853 * with no peer.
854 */
855
856 ASSERT(entry->qp.ref_count == 1);
857 result = VMCI_SUCCESS;
858 }
859 }
860 if (result < VMCI_SUCCESS) {
861 /*
862 * We failed to notify a non-local queuepair. That other
863 * queuepair might still be accessing the shared
864 * memory, so don't release the entry yet. It will get
865 * cleaned up by vmci_queue_pair_Exit() if necessary
866 * (assuming we are going away, otherwise why did this
867 * fail?).
868 */
869
870 vmci_mutex_release(&qp_guest_endpoints.mutex);
871 return (result);
872 }
873 }
874
875 /*
876 * If we get here then we either failed to notify a local queuepair, or
877 * we succeeded in all cases. Release the entry if required.
878 */
879
880 entry->qp.ref_count--;
881 if (entry->qp.ref_count == 0)
882 queue_pair_list_remove_entry(&qp_guest_endpoints, &entry->qp);
883
884 /* If we didn't remove the entry, this could change once we unlock. */
885 ref_count = entry ? entry->qp.ref_count :
886 0xffffffff; /*
887 * Value does not matter, silence the
888 * compiler.
889 */
890
891 vmci_mutex_release(&qp_guest_endpoints.mutex);
892
893 if (ref_count == 0)
894 qp_guest_endpoint_destroy(entry);
895 return (result);
896 }
897
898 /*
899 *------------------------------------------------------------------------------
900 *
901 * queue_pair_notify_peer_local --
902 *
903 * Dispatches a queue pair event message directly into the local event
904 * queue.
905 *
906 * Results:
907 * VMCI_SUCCESS on success, error code otherwise
908 *
909 * Side effects:
910 * None.
911 *
912 *------------------------------------------------------------------------------
913 */
914
915 static int
916 queue_pair_notify_peer_local(bool attach, struct vmci_handle handle)
917 {
918 struct vmci_event_msg *e_msg;
919 struct vmci_event_payload_qp *e_payload;
920 /* buf is only 48 bytes. */
921 vmci_id context_id;
922 context_id = vmci_get_context_id();
923 char buf[sizeof(*e_msg) + sizeof(*e_payload)];
924
925 e_msg = (struct vmci_event_msg *)buf;
926 e_payload = vmci_event_msg_payload(e_msg);
927
928 e_msg->hdr.dst = VMCI_MAKE_HANDLE(context_id, VMCI_EVENT_HANDLER);
929 e_msg->hdr.src = VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
930 VMCI_CONTEXT_RESOURCE_ID);
931 e_msg->hdr.payload_size = sizeof(*e_msg) + sizeof(*e_payload) -
932 sizeof(e_msg->hdr);
933 e_msg->event_data.event = attach ? VMCI_EVENT_QP_PEER_ATTACH :
934 VMCI_EVENT_QP_PEER_DETACH;
935 e_payload->peer_id = context_id;
936 e_payload->handle = handle;
937
938 return (vmci_event_dispatch((struct vmci_datagram *)e_msg));
939 }
Cache object: 3b45f6f1c216a5bfc2c41073e2472ef8
|