1 /*-
2 * Copyright (c) 2018 VMware, Inc.
3 *
4 * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5 */
6
7 /* This file implements defines and helper functions. */
8
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11
12 #include <sys/malloc.h>
13 #include <sys/proc.h>
14 #include <sys/uio.h>
15
16 #include <machine/bus.h>
17
18 #include "vmci.h"
19 #include "vmci_defs.h"
20 #include "vmci_kernel_defs.h"
21 #include "vmci_kernel_if.h"
22 #include "vmci_queue.h"
23
24 struct vmci_queue_kernel_if {
25 size_t num_pages; /* Num pages incl. header. */
26 struct vmci_dma_alloc *dmas; /* For dma alloc. */
27 };
28
29 /*
30 *------------------------------------------------------------------------------
31 *
32 * vmci_init_lock
33 *
34 * Initializes the lock. Must be called before use.
35 *
36 * Results:
37 * Always VMCI_SUCCESS.
38 *
39 * Side effects:
40 * Thread can block.
41 *
42 *------------------------------------------------------------------------------
43 */
44
45 int
46 vmci_init_lock(vmci_lock *lock, char *name)
47 {
48
49 mtx_init(lock, name, NULL, MTX_DEF | MTX_NOWITNESS);
50 return (VMCI_SUCCESS);
51 }
52
53 /*
54 *------------------------------------------------------------------------------
55 *
56 * vmci_cleanup_lock
57 *
58 * Cleanup the lock. Must be called before deallocating lock.
59 *
60 * Results:
61 * None
62 *
63 * Side effects:
64 * Deletes kernel lock state
65 *
66 *------------------------------------------------------------------------------
67 */
68
69 void
70 vmci_cleanup_lock(vmci_lock *lock)
71 {
72
73 if (mtx_initialized(lock))
74 mtx_destroy(lock);
75 }
76
77 /*
78 *------------------------------------------------------------------------------
79 *
80 * vmci_grab_lock
81 *
82 * Grabs the given lock.
83 *
84 * Results:
85 * None
86 *
87 * Side effects:
88 * Thread can block.
89 *
90 *------------------------------------------------------------------------------
91 */
92
93 void
94 vmci_grab_lock(vmci_lock *lock)
95 {
96
97 mtx_lock(lock);
98 }
99
100 /*
101 *------------------------------------------------------------------------------
102 *
103 * vmci_release_lock
104 *
105 * Releases the given lock.
106 *
107 * Results:
108 * None
109 *
110 * Side effects:
111 * A thread blocked on this lock may wake up.
112 *
113 *------------------------------------------------------------------------------
114 */
115
116 void
117 vmci_release_lock(vmci_lock *lock)
118 {
119
120 mtx_unlock(lock);
121 }
122
123 /*
124 *------------------------------------------------------------------------------
125 *
126 * vmci_grab_lock_bh
127 *
128 * Grabs the given lock.
129 *
130 * Results:
131 * None
132 *
133 * Side effects:
134 * None.
135 *
136 *------------------------------------------------------------------------------
137 */
138
139 void
140 vmci_grab_lock_bh(vmci_lock *lock)
141 {
142
143 mtx_lock(lock);
144 }
145
146 /*
147 *------------------------------------------------------------------------------
148 *
149 * vmci_release_lock_bh
150 *
151 * Releases the given lock.
152 *
153 * Results:
154 * None
155 *
156 * Side effects:
157 * None.
158 *
159 *------------------------------------------------------------------------------
160 */
161
162 void
163 vmci_release_lock_bh(vmci_lock *lock)
164 {
165
166 mtx_unlock(lock);
167 }
168
169 /*
170 *------------------------------------------------------------------------------
171 *
172 * vmci_initialized_lock
173 *
174 * Returns whether a lock has been initialized.
175 *
176 * Results:
177 * Return 1 if initialized or 0 if unininitialized.
178 *
179 * Side effects:
180 * None
181 *
182 *------------------------------------------------------------------------------
183 */
184
185 int
186 vmci_initialized_lock(vmci_lock *lock)
187 {
188
189 return mtx_initialized(lock);
190 }
191
192 /*
193 *------------------------------------------------------------------------------
194 *
195 * vmci_alloc_kernel_mem
196 *
197 * Allocate physically contiguous memory for the VMCI driver.
198 *
199 * Results:
200 * The address allocated or NULL on error.
201 *
202 *
203 * Side effects:
204 * Memory may be allocated.
205 *
206 *------------------------------------------------------------------------------
207 */
208
209 void *
210 vmci_alloc_kernel_mem(size_t size, int flags)
211 {
212 void *ptr;
213
214 if ((flags & VMCI_MEMORY_ATOMIC) != 0)
215 ptr = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xFFFFFFFF,
216 8, 1024 * 1024);
217 else
218 ptr = contigmalloc(size, M_DEVBUF, M_WAITOK, 0, 0xFFFFFFFF,
219 8, 1024 * 1024);
220
221 return (ptr);
222 }
223
224 /*
225 *------------------------------------------------------------------------------
226 *
227 * vmci_free_kernel_mem
228 *
229 * Free kernel memory allocated for the VMCI driver.
230 *
231 * Results:
232 * None.
233 *
234 * Side effects:
235 * Memory is freed.
236 *
237 *------------------------------------------------------------------------------
238 */
239
240 void
241 vmci_free_kernel_mem(void *ptr, size_t size)
242 {
243
244 contigfree(ptr, size, M_DEVBUF);
245 }
246
247 /*
248 *------------------------------------------------------------------------------
249 *
250 * vmci_can_schedule_delayed_work --
251 *
252 * Checks to see if the given platform supports delayed work callbacks.
253 *
254 * Results:
255 * true if it does. false otherwise.
256 *
257 * Side effects:
258 * None.
259 *
260 *------------------------------------------------------------------------------
261 */
262
263 bool
264 vmci_can_schedule_delayed_work(void)
265 {
266
267 return (true);
268 }
269
270 /*
271 *------------------------------------------------------------------------------
272 *
273 * vmci_schedule_delayed_work --
274 *
275 * Schedule the specified callback.
276 *
277 * Results:
278 * Zero on success, error code otherwise.
279 *
280 * Side effects:
281 * None.
282 *
283 *------------------------------------------------------------------------------
284 */
285
286 int
287 vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data)
288 {
289
290 return (vmci_schedule_delayed_work_fn(work_fn, data));
291 }
292
293 /*
294 *------------------------------------------------------------------------------
295 *
296 * vmci_create_event --
297 *
298 * Results:
299 * None.
300 *
301 * Side effects:
302 * None.
303 *
304 *------------------------------------------------------------------------------
305 */
306
307 void
308 vmci_create_event(vmci_event *event)
309 {
310
311 sema_init(event, 0, "vmci_event");
312 }
313
314 /*
315 *------------------------------------------------------------------------------
316 *
317 * vmci_destroy_event --
318 *
319 * Results:
320 * None.
321 *
322 * Side effects:
323 * None.
324 *
325 *------------------------------------------------------------------------------
326 */
327
328 void
329 vmci_destroy_event(vmci_event *event)
330 {
331
332 if (mtx_owned(&event->sema_mtx))
333 sema_destroy(event);
334 }
335
336 /*
337 *------------------------------------------------------------------------------
338 *
339 * vmci_signal_event --
340 *
341 * Results:
342 * None.
343 *
344 * Side effects:
345 * None.
346 *
347 *------------------------------------------------------------------------------
348 */
349
350 void
351 vmci_signal_event(vmci_event *event)
352 {
353
354 sema_post(event);
355 }
356
357 /*
358 *------------------------------------------------------------------------------
359 *
360 * vmci_wait_on_event --
361 *
362 * Results:
363 * None.
364 *
365 * Side effects:
366 * None.
367 *
368 *------------------------------------------------------------------------------
369 */
370
371 void
372 vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb,
373 void *client_data)
374 {
375
376 release_cb(client_data);
377 sema_wait(event);
378 }
379
380 /*
381 *------------------------------------------------------------------------------
382 *
383 * vmci_mutex_init --
384 *
385 * Initializes the mutex. Must be called before use.
386 *
387 * Results:
388 * Success.
389 *
390 * Side effects:
391 * None.
392 *
393 *------------------------------------------------------------------------------
394 */
395
396 int
397 vmci_mutex_init(vmci_mutex *mutex, char *name)
398 {
399
400 mtx_init(mutex, name, NULL, MTX_DEF | MTX_NOWITNESS);
401 return (VMCI_SUCCESS);
402 }
403
404 /*
405 *------------------------------------------------------------------------------
406 *
407 * vmci_mutex_destroy --
408 *
409 * Destroys the mutex.
410 *
411 * Results:
412 * None.
413 *
414 * Side effects:
415 * None.
416 *
417 *------------------------------------------------------------------------------
418 */
419
420 void
421 vmci_mutex_destroy(vmci_mutex *mutex)
422 {
423
424 mtx_destroy(mutex);
425 }
426
427 /*
428 *------------------------------------------------------------------------------
429 *
430 * vmci_mutex_acquire --
431 *
432 * Acquires the mutex.
433 *
434 * Results:
435 * None.
436 *
437 * Side effects:
438 * Thread may block.
439 *
440 *------------------------------------------------------------------------------
441 */
442
443 void
444 vmci_mutex_acquire(vmci_mutex *mutex)
445 {
446
447 mtx_lock(mutex);
448 }
449
450 /*
451 *------------------------------------------------------------------------------
452 *
453 * vmci_mutex_release --
454 *
455 * Releases the mutex.
456 *
457 * Results:
458 * None.
459 *
460 * Side effects:
461 * May wake up the thread blocking on this mutex.
462 *
463 *------------------------------------------------------------------------------
464 */
465
466 void
467 vmci_mutex_release(vmci_mutex *mutex)
468 {
469
470 mtx_unlock(mutex);
471 }
472
473 /*
474 *------------------------------------------------------------------------------
475 *
476 * vmci_mutex_initialized
477 *
478 * Returns whether a mutex has been initialized.
479 *
480 * Results:
481 * Return 1 if initialized or 0 if unininitialized.
482 *
483 * Side effects:
484 * None
485 *
486 *------------------------------------------------------------------------------
487 */
488
489 int
490 vmci_mutex_initialized(vmci_mutex *mutex)
491 {
492
493 return mtx_initialized(mutex);
494 }
495 /*
496 *------------------------------------------------------------------------------
497 *
498 * vmci_alloc_queue --
499 *
500 * Allocates kernel queue pages of specified size with IOMMU mappings, plus
501 * space for the queue structure/kernel interface and the queue header.
502 *
503 * Results:
504 * Pointer to the queue on success, NULL otherwise.
505 *
506 * Side effects:
507 * Memory is allocated.
508 *
509 *------------------------------------------------------------------------------
510 */
511
512 void *
513 vmci_alloc_queue(uint64_t size, uint32_t flags)
514 {
515 struct vmci_queue *queue;
516 size_t i;
517 const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
518 const size_t dmas_size = num_pages * sizeof(struct vmci_dma_alloc);
519 const size_t queue_size =
520 sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size;
521
522 /* Size should be enforced by vmci_qpair_alloc(), double-check here. */
523 if (size > VMCI_MAX_GUEST_QP_MEMORY) {
524 ASSERT(false);
525 return (NULL);
526 }
527
528 queue = malloc(queue_size, M_DEVBUF, M_NOWAIT);
529 if (!queue)
530 return (NULL);
531
532 queue->q_header = NULL;
533 queue->saved_header = NULL;
534 queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1);
535 queue->kernel_if->num_pages = num_pages;
536 queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if +
537 1);
538 for (i = 0; i < num_pages; i++) {
539 vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]);
540 if (!queue->kernel_if->dmas[i].dma_vaddr) {
541 /* Size excl. the header. */
542 vmci_free_queue(queue, i * PAGE_SIZE);
543 return (NULL);
544 }
545 }
546
547 /* Queue header is the first page. */
548 queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr;
549
550 return ((void *)queue);
551 }
552
553 /*
554 *------------------------------------------------------------------------------
555 *
556 * vmci_free_queue --
557 *
558 * Frees kernel VA space for a given queue and its queue header, and frees
559 * physical data pages.
560 *
561 * Results:
562 * None.
563 *
564 * Side effects:
565 * Memory is freed.
566 *
567 *------------------------------------------------------------------------------
568 */
569
570 void
571 vmci_free_queue(void *q, uint64_t size)
572 {
573 struct vmci_queue *queue = q;
574
575 if (queue) {
576 const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
577 uint64_t i;
578
579 /* Given size doesn't include header, so add in a page here. */
580 for (i = 0; i < num_pages; i++)
581 vmci_dma_free(&queue->kernel_if->dmas[i]);
582 free(queue, M_DEVBUF);
583 }
584 }
585
586 /*
587 *------------------------------------------------------------------------------
588 *
589 * vmci_alloc_ppn_set --
590 *
591 * Allocates two list of PPNs --- one for the pages in the produce queue,
592 * and the other for the pages in the consume queue. Intializes the list of
593 * PPNs with the page frame numbers of the KVA for the two queues (and the
594 * queue headers).
595 *
596 * Results:
597 * Success or failure.
598 *
599 * Side effects:
600 * Memory may be allocated.
601 *
602 *-----------------------------------------------------------------------------
603 */
604
605 int
606 vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_pages, void *cons_q,
607 uint64_t num_consume_pages, struct ppn_set *ppn_set)
608 {
609 struct vmci_queue *consume_q = cons_q;
610 struct vmci_queue *produce_q = prod_q;
611 vmci_ppn_list consume_ppns;
612 vmci_ppn_list produce_ppns;
613 uint64_t i;
614
615 if (!produce_q || !num_produce_pages || !consume_q ||
616 !num_consume_pages || !ppn_set)
617 return (VMCI_ERROR_INVALID_ARGS);
618
619 if (ppn_set->initialized)
620 return (VMCI_ERROR_ALREADY_EXISTS);
621
622 produce_ppns =
623 vmci_alloc_kernel_mem(num_produce_pages * sizeof(*produce_ppns),
624 VMCI_MEMORY_NORMAL);
625 if (!produce_ppns)
626 return (VMCI_ERROR_NO_MEM);
627
628 consume_ppns =
629 vmci_alloc_kernel_mem(num_consume_pages * sizeof(*consume_ppns),
630 VMCI_MEMORY_NORMAL);
631 if (!consume_ppns) {
632 vmci_free_kernel_mem(produce_ppns,
633 num_produce_pages * sizeof(*produce_ppns));
634 return (VMCI_ERROR_NO_MEM);
635 }
636
637 for (i = 0; i < num_produce_pages; i++) {
638 unsigned long pfn;
639
640 produce_ppns[i] =
641 pfn = produce_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
642
643 /*
644 * Fail allocation if PFN isn't supported by hypervisor.
645 */
646
647 if (sizeof(pfn) >
648 sizeof(*produce_ppns) && pfn != produce_ppns[i])
649 goto ppn_error;
650 }
651 for (i = 0; i < num_consume_pages; i++) {
652 unsigned long pfn;
653
654 consume_ppns[i] =
655 pfn = consume_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
656
657 /*
658 * Fail allocation if PFN isn't supported by hypervisor.
659 */
660
661 if (sizeof(pfn) >
662 sizeof(*consume_ppns) && pfn != consume_ppns[i])
663 goto ppn_error;
664 }
665
666 ppn_set->num_produce_pages = num_produce_pages;
667 ppn_set->num_consume_pages = num_consume_pages;
668 ppn_set->produce_ppns = produce_ppns;
669 ppn_set->consume_ppns = consume_ppns;
670 ppn_set->initialized = true;
671 return (VMCI_SUCCESS);
672
673 ppn_error:
674 vmci_free_kernel_mem(produce_ppns, num_produce_pages *
675 sizeof(*produce_ppns));
676 vmci_free_kernel_mem(consume_ppns, num_consume_pages *
677 sizeof(*consume_ppns));
678 return (VMCI_ERROR_INVALID_ARGS);
679 }
680
681 /*
682 *------------------------------------------------------------------------------
683 *
684 * vmci_free_ppn_set --
685 *
686 * Frees the two list of PPNs for a queue pair.
687 *
688 * Results:
689 * None.
690 *
691 * Side effects:
692 * None.
693 *
694 *------------------------------------------------------------------------------
695 */
696
697 void
698 vmci_free_ppn_set(struct ppn_set *ppn_set)
699 {
700
701 ASSERT(ppn_set);
702 if (ppn_set->initialized) {
703 /* Do not call these functions on NULL inputs. */
704 ASSERT(ppn_set->produce_ppns && ppn_set->consume_ppns);
705 vmci_free_kernel_mem(ppn_set->produce_ppns,
706 ppn_set->num_produce_pages *
707 sizeof(*ppn_set->produce_ppns));
708 vmci_free_kernel_mem(ppn_set->consume_ppns,
709 ppn_set->num_consume_pages *
710 sizeof(*ppn_set->consume_ppns));
711 }
712 memset(ppn_set, 0, sizeof(*ppn_set));
713 }
714
715 /*
716 *------------------------------------------------------------------------------
717 *
718 * vmci_populate_ppn_list --
719 *
720 * Populates the list of PPNs in the hypercall structure with the PPNS
721 * of the produce queue and the consume queue.
722 *
723 * Results:
724 * VMCI_SUCCESS.
725 *
726 * Side effects:
727 * None.
728 *
729 *------------------------------------------------------------------------------
730 */
731
732 int
733 vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppn_set)
734 {
735
736 ASSERT(call_buf && ppn_set && ppn_set->initialized);
737 memcpy(call_buf, ppn_set->produce_ppns,
738 ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
739 memcpy(call_buf + ppn_set->num_produce_pages *
740 sizeof(*ppn_set->produce_ppns), ppn_set->consume_ppns,
741 ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
742
743 return (VMCI_SUCCESS);
744 }
745
746 /*
747 *------------------------------------------------------------------------------
748 *
749 * vmci_memcpy_{to,from}iovec --
750 *
751 * These helper routines will copy the specified bytes to/from memory that's
752 * specified as a struct iovec. The routines can not verify the correctness
753 * of the struct iovec's contents.
754 *
755 * Results:
756 * None.
757 *
758 * Side effects:
759 * None.
760 *
761 *------------------------------------------------------------------------------
762 */
763
764 static inline void
765 vmci_memcpy_toiovec(struct iovec *iov, uint8_t *src, size_t len)
766 {
767
768 while (len > 0) {
769 if (iov->iov_len) {
770 size_t to_copy = MIN(iov->iov_len, len);
771 memcpy(iov->iov_base, src, to_copy);
772 src += to_copy;
773 len -= to_copy;
774 iov->iov_base = (void *)((uintptr_t) iov->iov_base +
775 to_copy);
776 iov->iov_len -= to_copy;
777 }
778 iov++;
779 }
780 }
781
782 static inline void
783 vmci_memcpy_fromiovec(uint8_t *dst, struct iovec *iov, size_t len)
784 {
785
786 while (len > 0) {
787 if (iov->iov_len) {
788 size_t to_copy = MIN(iov->iov_len, len);
789 memcpy(dst, iov->iov_base, to_copy);
790 dst += to_copy;
791 len -= to_copy;
792 iov->iov_base = (void *)((uintptr_t) iov->iov_base +
793 to_copy);
794 iov->iov_len -= to_copy;
795 }
796 iov++;
797 }
798 }
799
800 /*
801 *------------------------------------------------------------------------------
802 *
803 * __vmci_memcpy_to_queue --
804 *
805 * Copies from a given buffer or iovector to a VMCI Queue. Assumes that
806 * offset + size does not wrap around in the queue.
807 *
808 * Results:
809 * Zero on success, negative error code on failure.
810 *
811 * Side effects:
812 * None.
813 *
814 *------------------------------------------------------------------------------
815 */
816
817 #pragma GCC diagnostic ignored "-Wcast-qual"
818 static int
819 __vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
820 const void *src, size_t size, bool is_iovec)
821 {
822 struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
823 size_t bytes_copied = 0;
824
825 while (bytes_copied < size) {
826 const uint64_t page_index =
827 (queue_offset + bytes_copied) / PAGE_SIZE;
828 const size_t page_offset =
829 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
830 void *va;
831 size_t to_copy;
832
833 /* Skip header. */
834 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
835
836 ASSERT(va);
837 /*
838 * Fill up the page if we have enough payload, or else
839 * copy the remaining bytes.
840 */
841 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
842
843 if (is_iovec) {
844 struct iovec *iov = (struct iovec *)src;
845
846 /* The iovec will track bytes_copied internally. */
847 vmci_memcpy_fromiovec((uint8_t *)va + page_offset,
848 iov, to_copy);
849 } else
850 memcpy((uint8_t *)va + page_offset,
851 (uint8_t *)src + bytes_copied, to_copy);
852 bytes_copied += to_copy;
853 }
854
855 return (VMCI_SUCCESS);
856 }
857
858 /*
859 *------------------------------------------------------------------------------
860 *
861 * __vmci_memcpy_from_queue --
862 *
863 * Copies to a given buffer or iovector from a VMCI Queue. Assumes that
864 * offset + size does not wrap around in the queue.
865 *
866 * Results:
867 * Zero on success, negative error code on failure.
868 *
869 * Side effects:
870 * None.
871 *
872 *------------------------------------------------------------------------------
873 */
874
875 static int
876 __vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue,
877 uint64_t queue_offset, size_t size, bool is_iovec)
878 {
879 struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
880 size_t bytes_copied = 0;
881
882 while (bytes_copied < size) {
883 const uint64_t page_index =
884 (queue_offset + bytes_copied) / PAGE_SIZE;
885 const size_t page_offset =
886 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
887 void *va;
888 size_t to_copy;
889
890 /* Skip header. */
891 va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
892
893 ASSERT(va);
894 /*
895 * Fill up the page if we have enough payload, or else
896 * copy the remaining bytes.
897 */
898 to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
899
900 if (is_iovec) {
901 struct iovec *iov = (struct iovec *)dest;
902
903 /* The iovec will track bytesCopied internally. */
904 vmci_memcpy_toiovec(iov, (uint8_t *)va +
905 page_offset, to_copy);
906 } else
907 memcpy((uint8_t *)dest + bytes_copied,
908 (uint8_t *)va + page_offset, to_copy);
909
910 bytes_copied += to_copy;
911 }
912
913 return (VMCI_SUCCESS);
914 }
915
916 /*
917 *------------------------------------------------------------------------------
918 *
919 * vmci_memcpy_to_queue --
920 *
921 * Copies from a given buffer to a VMCI Queue.
922 *
923 * Results:
924 * Zero on success, negative error code on failure.
925 *
926 * Side effects:
927 * None.
928 *
929 *------------------------------------------------------------------------------
930 */
931
932 int
933 vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
934 const void *src, size_t src_offset, size_t size, int buf_type,
935 bool can_block)
936 {
937
938 ASSERT(can_block);
939
940 return (__vmci_memcpy_to_queue(queue, queue_offset,
941 (uint8_t *)src + src_offset, size, false));
942 }
943
944 /*
945 *------------------------------------------------------------------------------
946 *
947 * vmci_memcpy_from_queue --
948 *
949 * Copies to a given buffer from a VMCI Queue.
950 *
951 * Results:
952 * Zero on success, negative error code on failure.
953 *
954 * Side effects:
955 * None.
956 *
957 *------------------------------------------------------------------------------
958 */
959
960 int
961 vmci_memcpy_from_queue(void *dest, size_t dest_offset,
962 const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
963 int buf_type, bool can_block)
964 {
965
966 ASSERT(can_block);
967
968 return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
969 queue, queue_offset, size, false));
970 }
971
972 /*
973 *------------------------------------------------------------------------------
974 *
975 * vmci_memcpy_to_queue_local --
976 *
977 * Copies from a given buffer to a local VMCI queue. This is the
978 * same as a regular copy.
979 *
980 * Results:
981 * Zero on success, negative error code on failure.
982 *
983 * Side effects:
984 * None.
985 *
986 *------------------------------------------------------------------------------
987 */
988
989 int
990 vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset,
991 const void *src, size_t src_offset, size_t size, int buf_type,
992 bool can_block)
993 {
994
995 ASSERT(can_block);
996
997 return (__vmci_memcpy_to_queue(queue, queue_offset,
998 (uint8_t *)src + src_offset, size, false));
999 }
1000
1001 /*
1002 *------------------------------------------------------------------------------
1003 *
1004 * vmci_memcpy_from_queue_local --
1005 *
1006 * Copies to a given buffer from a VMCI Queue.
1007 *
1008 * Results:
1009 * Zero on success, negative error code on failure.
1010 *
1011 * Side effects:
1012 * None.
1013 *
1014 *------------------------------------------------------------------------------
1015 */
1016
1017 int
1018 vmci_memcpy_from_queue_local(void *dest, size_t dest_offset,
1019 const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1020 int buf_type, bool can_block)
1021 {
1022
1023 ASSERT(can_block);
1024
1025 return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
1026 queue, queue_offset, size, false));
1027 }
1028
1029 /*------------------------------------------------------------------------------
1030 *
1031 * vmci_memcpy_to_queue_v --
1032 *
1033 * Copies from a given iovec from a VMCI Queue.
1034 *
1035 * Results:
1036 * Zero on success, negative error code on failure.
1037 *
1038 * Side effects:
1039 * None.
1040 *
1041 *------------------------------------------------------------------------------
1042 */
1043
1044 int
1045 vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
1046 const void *src, size_t src_offset, size_t size, int buf_type,
1047 bool can_block)
1048 {
1049
1050 ASSERT(can_block);
1051
1052 /*
1053 * We ignore src_offset because src is really a struct iovec * and will
1054 * maintain offset internally.
1055 */
1056 return (__vmci_memcpy_to_queue(queue, queue_offset, src, size,
1057 true));
1058 }
1059
1060 /*
1061 *------------------------------------------------------------------------------
1062 *
1063 * vmci_memcpy_from_queue_v --
1064 *
1065 * Copies to a given iovec from a VMCI Queue.
1066 *
1067 * Results:
1068 * Zero on success, negative error code on failure.
1069 *
1070 * Side effects:
1071 * None.
1072 *
1073 *------------------------------------------------------------------------------
1074 */
1075
1076 int
1077 vmci_memcpy_from_queue_v(void *dest, size_t dest_offset,
1078 const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1079 int buf_type, bool can_block)
1080 {
1081
1082 ASSERT(can_block);
1083
1084 /*
1085 * We ignore dest_offset because dest is really a struct iovec * and
1086 * will maintain offset internally.
1087 */
1088 return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size,
1089 true));
1090 }
1091
1092 /*
1093 *------------------------------------------------------------------------------
1094 *
1095 * vmci_read_port_bytes --
1096 *
1097 * Copy memory from an I/O port to kernel memory.
1098 *
1099 * Results:
1100 * No results.
1101 *
1102 * Side effects:
1103 * None.
1104 *
1105 *------------------------------------------------------------------------------
1106 */
1107
1108 void
1109 vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port, uint8_t *buffer,
1110 size_t buffer_length)
1111 {
1112
1113 insb(port, buffer, buffer_length);
1114 }
Cache object: 4d308753c60e434e422b117fbe6379cd
|