xref: /freebsd/sys/dev/vmware/vmci/vmci_kernel_if.c (revision 685dc743)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file implements defines and helper functions. */
8 
9 #include <sys/cdefs.h>
10 #include <sys/malloc.h>
11 #include <sys/proc.h>
12 #include <sys/uio.h>
13 
14 #include <machine/bus.h>
15 
16 #include "vmci.h"
17 #include "vmci_defs.h"
18 #include "vmci_kernel_defs.h"
19 #include "vmci_kernel_if.h"
20 #include "vmci_queue.h"
21 
22 struct vmci_queue_kernel_if {
23 	size_t			num_pages;	/* Num pages incl. header. */
24 	struct vmci_dma_alloc	*dmas;		/* For dma alloc. */
25 };
26 
27 /*
28  *------------------------------------------------------------------------------
29  *
30  * vmci_init_lock
31  *
32  *     Initializes the lock. Must be called before use.
33  *
34  * Results:
35  *     Always VMCI_SUCCESS.
36  *
37  * Side effects:
38  *     Thread can block.
39  *
40  *------------------------------------------------------------------------------
41  */
42 
43 int
vmci_init_lock(vmci_lock * lock,char * name)44 vmci_init_lock(vmci_lock *lock, char *name)
45 {
46 
47 	mtx_init(lock, name, NULL, MTX_DEF | MTX_NOWITNESS);
48 	return (VMCI_SUCCESS);
49 }
50 
51 /*
52  *------------------------------------------------------------------------------
53  *
54  * vmci_cleanup_lock
55  *
56  *     Cleanup the lock. Must be called before deallocating lock.
57  *
58  * Results:
59  *     None
60  *
61  * Side effects:
62  *     Deletes kernel lock state
63  *
64  *------------------------------------------------------------------------------
65  */
66 
67 void
vmci_cleanup_lock(vmci_lock * lock)68 vmci_cleanup_lock(vmci_lock *lock)
69 {
70 
71 	if (mtx_initialized(lock))
72 		mtx_destroy(lock);
73 }
74 
75 /*
76  *------------------------------------------------------------------------------
77  *
78  * vmci_grab_lock
79  *
80  *     Grabs the given lock.
81  *
82  * Results:
83  *      None
84  *
85  * Side effects:
86  *      Thread can block.
87  *
88  *------------------------------------------------------------------------------
89  */
90 
91 void
vmci_grab_lock(vmci_lock * lock)92 vmci_grab_lock(vmci_lock *lock)
93 {
94 
95 	mtx_lock(lock);
96 }
97 
98 /*
99  *------------------------------------------------------------------------------
100  *
101  * vmci_release_lock
102  *
103  *     Releases the given lock.
104  *
105  * Results:
106  *     None
107  *
108  * Side effects:
109  *     A thread blocked on this lock may wake up.
110  *
111  *------------------------------------------------------------------------------
112  */
113 
114 void
vmci_release_lock(vmci_lock * lock)115 vmci_release_lock(vmci_lock *lock)
116 {
117 
118 	mtx_unlock(lock);
119 }
120 
121 /*
122  *------------------------------------------------------------------------------
123  *
124  * vmci_grab_lock_bh
125  *
126  *     Grabs the given lock.
127  *
128  * Results:
129  *     None
130  *
131  * Side effects:
132  *     None.
133  *
134  *------------------------------------------------------------------------------
135  */
136 
137 void
vmci_grab_lock_bh(vmci_lock * lock)138 vmci_grab_lock_bh(vmci_lock *lock)
139 {
140 
141 	mtx_lock(lock);
142 }
143 
144 /*
145  *------------------------------------------------------------------------------
146  *
147  * vmci_release_lock_bh
148  *
149  *     Releases the given lock.
150  *
151  * Results:
152  *     None
153  *
154  * Side effects:
155  *     None.
156  *
157  *------------------------------------------------------------------------------
158  */
159 
160 void
vmci_release_lock_bh(vmci_lock * lock)161 vmci_release_lock_bh(vmci_lock *lock)
162 {
163 
164 	mtx_unlock(lock);
165 }
166 
167 /*
168  *------------------------------------------------------------------------------
169  *
170  * vmci_initialized_lock
171  *
172  *     Returns whether a lock has been initialized.
173  *
174  * Results:
175  *     Return 1 if initialized or 0 if unininitialized.
176  *
177  * Side effects:
178  *     None
179  *
180  *------------------------------------------------------------------------------
181  */
182 
183 int
vmci_initialized_lock(vmci_lock * lock)184 vmci_initialized_lock(vmci_lock *lock)
185 {
186 
187 	return mtx_initialized(lock);
188 }
189 
190 /*
191  *------------------------------------------------------------------------------
192  *
193  * vmci_alloc_kernel_mem
194  *
195  *     Allocate physically contiguous memory for the VMCI driver.
196  *
197  * Results:
198  *     The address allocated or NULL on error.
199  *
200  *
201  * Side effects:
202  *     Memory may be allocated.
203  *
204  *------------------------------------------------------------------------------
205  */
206 
207 void *
vmci_alloc_kernel_mem(size_t size,int flags)208 vmci_alloc_kernel_mem(size_t size, int flags)
209 {
210 	void *ptr;
211 
212 	if ((flags & VMCI_MEMORY_ATOMIC) != 0)
213 		ptr = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xFFFFFFFF,
214 		    8, 1024 * 1024);
215 	else
216 		ptr = contigmalloc(size, M_DEVBUF, M_WAITOK, 0, 0xFFFFFFFF,
217 		    8, 1024 * 1024);
218 
219 	return (ptr);
220 }
221 
222 /*
223  *------------------------------------------------------------------------------
224  *
225  * vmci_free_kernel_mem
226  *
227  *     Free kernel memory allocated for the VMCI driver.
228  *
229  * Results:
230  *     None.
231  *
232  * Side effects:
233  *     Memory is freed.
234  *
235  *------------------------------------------------------------------------------
236  */
237 
238 void
vmci_free_kernel_mem(void * ptr,size_t size)239 vmci_free_kernel_mem(void *ptr, size_t size)
240 {
241 
242 	contigfree(ptr, size, M_DEVBUF);
243 }
244 
245 /*
246  *------------------------------------------------------------------------------
247  *
248  * vmci_can_schedule_delayed_work --
249  *
250  *     Checks to see if the given platform supports delayed work callbacks.
251  *
252  * Results:
253  *     true if it does. false otherwise.
254  *
255  * Side effects:
256  *     None.
257  *
258  *------------------------------------------------------------------------------
259  */
260 
261 bool
vmci_can_schedule_delayed_work(void)262 vmci_can_schedule_delayed_work(void)
263 {
264 
265 	return (true);
266 }
267 
268 /*
269  *------------------------------------------------------------------------------
270  *
271  * vmci_schedule_delayed_work --
272  *
273  *     Schedule the specified callback.
274  *
275  * Results:
276  *     Zero on success, error code otherwise.
277  *
278  * Side effects:
279  *     None.
280  *
281  *------------------------------------------------------------------------------
282  */
283 
284 int
vmci_schedule_delayed_work(vmci_work_fn * work_fn,void * data)285 vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data)
286 {
287 
288 	return (vmci_schedule_delayed_work_fn(work_fn, data));
289 }
290 
291 /*
292  *------------------------------------------------------------------------------
293  *
294  * vmci_create_event --
295  *
296  * Results:
297  *     None.
298  *
299  * Side effects:
300  *     None.
301  *
302  *------------------------------------------------------------------------------
303  */
304 
305 void
vmci_create_event(vmci_event * event)306 vmci_create_event(vmci_event *event)
307 {
308 
309 	sema_init(event, 0, "vmci_event");
310 }
311 
312 /*
313  *------------------------------------------------------------------------------
314  *
315  * vmci_destroy_event --
316  *
317  * Results:
318  *     None.
319  *
320  * Side effects:
321  *     None.
322  *
323  *------------------------------------------------------------------------------
324  */
325 
326 void
vmci_destroy_event(vmci_event * event)327 vmci_destroy_event(vmci_event *event)
328 {
329 
330 	if (mtx_owned(&event->sema_mtx))
331 		sema_destroy(event);
332 }
333 
334 /*
335  *------------------------------------------------------------------------------
336  *
337  * vmci_signal_event --
338  *
339  * Results:
340  *     None.
341  *
342  * Side effects:
343  *     None.
344  *
345  *------------------------------------------------------------------------------
346  */
347 
348 void
vmci_signal_event(vmci_event * event)349 vmci_signal_event(vmci_event *event)
350 {
351 
352 	sema_post(event);
353 }
354 
355 /*
356  *------------------------------------------------------------------------------
357  *
358  * vmci_wait_on_event --
359  *
360  * Results:
361  *     None.
362  *
363  * Side effects:
364  *     None.
365  *
366  *------------------------------------------------------------------------------
367  */
368 
369 void
vmci_wait_on_event(vmci_event * event,vmci_event_release_cb release_cb,void * client_data)370 vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb,
371     void *client_data)
372 {
373 
374 	release_cb(client_data);
375 	sema_wait(event);
376 }
377 
378 /*
379  *------------------------------------------------------------------------------
380  *
381  * vmci_mutex_init --
382  *
383  *     Initializes the mutex. Must be called before use.
384  *
385  * Results:
386  *     Success.
387  *
388  * Side effects:
389  *     None.
390  *
391  *------------------------------------------------------------------------------
392  */
393 
394 int
vmci_mutex_init(vmci_mutex * mutex,char * name)395 vmci_mutex_init(vmci_mutex *mutex, char *name)
396 {
397 
398 	mtx_init(mutex, name, NULL, MTX_DEF | MTX_NOWITNESS);
399 	return (VMCI_SUCCESS);
400 }
401 
402 /*
403  *------------------------------------------------------------------------------
404  *
405  * vmci_mutex_destroy --
406  *
407  *     Destroys the mutex.
408  *
409  * Results:
410  *     None.
411  *
412  * Side effects:
413  *     None.
414  *
415  *------------------------------------------------------------------------------
416  */
417 
418 void
vmci_mutex_destroy(vmci_mutex * mutex)419 vmci_mutex_destroy(vmci_mutex *mutex)
420 {
421 
422 	mtx_destroy(mutex);
423 }
424 
425 /*
426  *------------------------------------------------------------------------------
427  *
428  * vmci_mutex_acquire --
429  *
430  *     Acquires the mutex.
431  *
432  * Results:
433  *     None.
434  *
435  * Side effects:
436  *     Thread may block.
437  *
438  *------------------------------------------------------------------------------
439  */
440 
441 void
vmci_mutex_acquire(vmci_mutex * mutex)442 vmci_mutex_acquire(vmci_mutex *mutex)
443 {
444 
445 	mtx_lock(mutex);
446 }
447 
448 /*
449  *------------------------------------------------------------------------------
450  *
451  * vmci_mutex_release --
452  *
453  *     Releases the mutex.
454  *
455  * Results:
456  *     None.
457  *
458  * Side effects:
459  *     May wake up the thread blocking on this mutex.
460  *
461  *------------------------------------------------------------------------------
462  */
463 
464 void
vmci_mutex_release(vmci_mutex * mutex)465 vmci_mutex_release(vmci_mutex *mutex)
466 {
467 
468 	mtx_unlock(mutex);
469 }
470 
471 /*
472  *------------------------------------------------------------------------------
473  *
474  * vmci_mutex_initialized
475  *
476  *     Returns whether a mutex has been initialized.
477  *
478  * Results:
479  *     Return 1 if initialized or 0 if unininitialized.
480  *
481  * Side effects:
482  *     None
483  *
484  *------------------------------------------------------------------------------
485  */
486 
487 int
vmci_mutex_initialized(vmci_mutex * mutex)488 vmci_mutex_initialized(vmci_mutex *mutex)
489 {
490 
491 	return mtx_initialized(mutex);
492 }
493 /*
494  *------------------------------------------------------------------------------
495  *
496  * vmci_alloc_queue --
497  *
498  *     Allocates kernel queue pages of specified size with IOMMU mappings, plus
499  *     space for the queue structure/kernel interface and the queue header.
500  *
501  * Results:
502  *     Pointer to the queue on success, NULL otherwise.
503  *
504  * Side effects:
505  *     Memory is allocated.
506  *
507  *------------------------------------------------------------------------------
508  */
509 
510 void *
vmci_alloc_queue(uint64_t size,uint32_t flags)511 vmci_alloc_queue(uint64_t size, uint32_t flags)
512 {
513 	struct vmci_queue *queue;
514 	size_t i;
515 	const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
516 	const size_t dmas_size = num_pages * sizeof(struct vmci_dma_alloc);
517 	const size_t queue_size =
518 	    sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size;
519 
520 	/* Size should be enforced by vmci_qpair_alloc(), double-check here. */
521 	if (size > VMCI_MAX_GUEST_QP_MEMORY) {
522 		ASSERT(false);
523 		return (NULL);
524 	}
525 
526 	queue = malloc(queue_size, M_DEVBUF, M_NOWAIT);
527 	if (!queue)
528 		return (NULL);
529 
530 	queue->q_header = NULL;
531 	queue->saved_header = NULL;
532 	queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1);
533 	queue->kernel_if->num_pages = num_pages;
534 	queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if +
535 	    1);
536 	for (i = 0; i < num_pages; i++) {
537 		vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]);
538 		if (!queue->kernel_if->dmas[i].dma_vaddr) {
539 			/* Size excl. the header. */
540 			vmci_free_queue(queue, i * PAGE_SIZE);
541 			return (NULL);
542 		}
543 	}
544 
545 	/* Queue header is the first page. */
546 	queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr;
547 
548 	return ((void *)queue);
549 }
550 
551 /*
552  *------------------------------------------------------------------------------
553  *
554  * vmci_free_queue --
555  *
556  *     Frees kernel VA space for a given queue and its queue header, and frees
557  *     physical data pages.
558  *
559  * Results:
560  *     None.
561  *
562  * Side effects:
563  *     Memory is freed.
564  *
565  *------------------------------------------------------------------------------
566  */
567 
568 void
vmci_free_queue(void * q,uint64_t size)569 vmci_free_queue(void *q, uint64_t size)
570 {
571 	struct vmci_queue *queue = q;
572 
573 	if (queue) {
574 		const size_t num_pages = CEILING(size, PAGE_SIZE) + 1;
575 		uint64_t i;
576 
577 		/* Given size doesn't include header, so add in a page here. */
578 		for (i = 0; i < num_pages; i++)
579 			vmci_dma_free(&queue->kernel_if->dmas[i]);
580 		free(queue, M_DEVBUF);
581 	}
582 }
583 
584 /*
585  *------------------------------------------------------------------------------
586  *
587  * vmci_alloc_ppn_set --
588  *
589  *     Allocates two list of PPNs --- one for the pages in the produce queue,
590  *     and the other for the pages in the consume queue. Intializes the list of
591  *     PPNs with the page frame numbers of the KVA for the two queues (and the
592  *     queue headers).
593  *
594  * Results:
595  *     Success or failure.
596  *
597  * Side effects:
598  *     Memory may be allocated.
599  *
600  *-----------------------------------------------------------------------------
601  */
602 
603 int
vmci_alloc_ppn_set(void * prod_q,uint64_t num_produce_pages,void * cons_q,uint64_t num_consume_pages,struct ppn_set * ppn_set)604 vmci_alloc_ppn_set(void *prod_q, uint64_t num_produce_pages, void *cons_q,
605     uint64_t num_consume_pages, struct ppn_set *ppn_set)
606 {
607 	struct vmci_queue *consume_q = cons_q;
608 	struct vmci_queue *produce_q = prod_q;
609 	vmci_ppn_list consume_ppns;
610 	vmci_ppn_list produce_ppns;
611 	uint64_t i;
612 
613 	if (!produce_q || !num_produce_pages || !consume_q ||
614 	    !num_consume_pages || !ppn_set)
615 		return (VMCI_ERROR_INVALID_ARGS);
616 
617 	if (ppn_set->initialized)
618 		return (VMCI_ERROR_ALREADY_EXISTS);
619 
620 	produce_ppns =
621 	    vmci_alloc_kernel_mem(num_produce_pages * sizeof(*produce_ppns),
622 	    VMCI_MEMORY_NORMAL);
623 	if (!produce_ppns)
624 		return (VMCI_ERROR_NO_MEM);
625 
626 	consume_ppns =
627 	    vmci_alloc_kernel_mem(num_consume_pages * sizeof(*consume_ppns),
628 	    VMCI_MEMORY_NORMAL);
629 	if (!consume_ppns) {
630 		vmci_free_kernel_mem(produce_ppns,
631 		    num_produce_pages * sizeof(*produce_ppns));
632 		return (VMCI_ERROR_NO_MEM);
633 	}
634 
635 	for (i = 0; i < num_produce_pages; i++) {
636 		unsigned long pfn;
637 
638 		produce_ppns[i] =
639 		    pfn = produce_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
640 
641 		/*
642 		 * Fail allocation if PFN isn't supported by hypervisor.
643 		 */
644 
645 		if (sizeof(pfn) >
646 		    sizeof(*produce_ppns) && pfn != produce_ppns[i])
647 			goto ppn_error;
648 	}
649 	for (i = 0; i < num_consume_pages; i++) {
650 		unsigned long pfn;
651 
652 		consume_ppns[i] =
653 		    pfn = consume_q->kernel_if->dmas[i].dma_paddr >> PAGE_SHIFT;
654 
655 		/*
656 		 * Fail allocation if PFN isn't supported by hypervisor.
657 		 */
658 
659 		if (sizeof(pfn) >
660 		    sizeof(*consume_ppns) && pfn != consume_ppns[i])
661 			goto ppn_error;
662 	}
663 
664 	ppn_set->num_produce_pages = num_produce_pages;
665 	ppn_set->num_consume_pages = num_consume_pages;
666 	ppn_set->produce_ppns = produce_ppns;
667 	ppn_set->consume_ppns = consume_ppns;
668 	ppn_set->initialized = true;
669 	return (VMCI_SUCCESS);
670 
671 ppn_error:
672 	vmci_free_kernel_mem(produce_ppns, num_produce_pages *
673 	    sizeof(*produce_ppns));
674 	vmci_free_kernel_mem(consume_ppns, num_consume_pages *
675 	    sizeof(*consume_ppns));
676 	return (VMCI_ERROR_INVALID_ARGS);
677 }
678 
679 /*
680  *------------------------------------------------------------------------------
681  *
682  * vmci_free_ppn_set --
683  *
684  *     Frees the two list of PPNs for a queue pair.
685  *
686  * Results:
687  *     None.
688  *
689  * Side effects:
690  *     None.
691  *
692  *------------------------------------------------------------------------------
693  */
694 
695 void
vmci_free_ppn_set(struct ppn_set * ppn_set)696 vmci_free_ppn_set(struct ppn_set *ppn_set)
697 {
698 
699 	ASSERT(ppn_set);
700 	if (ppn_set->initialized) {
701 		/* Do not call these functions on NULL inputs. */
702 		ASSERT(ppn_set->produce_ppns && ppn_set->consume_ppns);
703 		vmci_free_kernel_mem(ppn_set->produce_ppns,
704 		    ppn_set->num_produce_pages *
705 		    sizeof(*ppn_set->produce_ppns));
706 		vmci_free_kernel_mem(ppn_set->consume_ppns,
707 		    ppn_set->num_consume_pages *
708 		    sizeof(*ppn_set->consume_ppns));
709 	}
710 	memset(ppn_set, 0, sizeof(*ppn_set));
711 }
712 
713 /*
714  *------------------------------------------------------------------------------
715  *
716  * vmci_populate_ppn_list --
717  *
718  *     Populates the list of PPNs in the hypercall structure with the PPNS
719  *     of the produce queue and the consume queue.
720  *
721  * Results:
722  *     VMCI_SUCCESS.
723  *
724  * Side effects:
725  *     None.
726  *
727  *------------------------------------------------------------------------------
728  */
729 
730 int
vmci_populate_ppn_list(uint8_t * call_buf,const struct ppn_set * ppn_set)731 vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppn_set)
732 {
733 
734 	ASSERT(call_buf && ppn_set && ppn_set->initialized);
735 	memcpy(call_buf, ppn_set->produce_ppns,
736 	    ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
737 	memcpy(call_buf + ppn_set->num_produce_pages *
738 	    sizeof(*ppn_set->produce_ppns), ppn_set->consume_ppns,
739 	    ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
740 
741 	return (VMCI_SUCCESS);
742 }
743 
744 /*
745  *------------------------------------------------------------------------------
746  *
747  * vmci_memcpy_{to,from}iovec --
748  *
749  *     These helper routines will copy the specified bytes to/from memory that's
750  *     specified as a struct iovec.  The routines can not verify the correctness
751  *     of the struct iovec's contents.
752  *
753  * Results:
754  *      None.
755  *
756  * Side effects:
757  *      None.
758  *
759  *------------------------------------------------------------------------------
760  */
761 
762 static inline void
vmci_memcpy_toiovec(struct iovec * iov,uint8_t * src,size_t len)763 vmci_memcpy_toiovec(struct iovec *iov, uint8_t *src, size_t len)
764 {
765 
766 	while (len > 0) {
767 		if (iov->iov_len) {
768 			size_t to_copy = MIN(iov->iov_len, len);
769 			memcpy(iov->iov_base, src, to_copy);
770 			src += to_copy;
771 			len -= to_copy;
772 			iov->iov_base = (void *)((uintptr_t) iov->iov_base +
773 			    to_copy);
774 			iov->iov_len -= to_copy;
775 		}
776 		iov++;
777 	}
778 }
779 
780 static inline void
vmci_memcpy_fromiovec(uint8_t * dst,struct iovec * iov,size_t len)781 vmci_memcpy_fromiovec(uint8_t *dst, struct iovec *iov, size_t len)
782 {
783 
784 	while (len > 0) {
785 		if (iov->iov_len) {
786 			size_t to_copy = MIN(iov->iov_len, len);
787 			memcpy(dst, iov->iov_base, to_copy);
788 			dst += to_copy;
789 			len -= to_copy;
790 			iov->iov_base = (void *)((uintptr_t) iov->iov_base +
791 			    to_copy);
792 			iov->iov_len -= to_copy;
793 		}
794 		iov++;
795 	}
796 }
797 
798 /*
799  *------------------------------------------------------------------------------
800  *
801  * __vmci_memcpy_to_queue --
802  *
803  *     Copies from a given buffer or iovector to a VMCI Queue. Assumes that
804  *     offset + size does not wrap around in the queue.
805  *
806  * Results:
807  *     Zero on success, negative error code on failure.
808  *
809  * Side effects:
810  *     None.
811  *
812  *------------------------------------------------------------------------------
813  */
814 
815 #pragma GCC diagnostic ignored "-Wcast-qual"
816 static int
__vmci_memcpy_to_queue(struct vmci_queue * queue,uint64_t queue_offset,const void * src,size_t size,bool is_iovec)817 __vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
818     const void *src, size_t size, bool is_iovec)
819 {
820 	struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
821 	size_t bytes_copied = 0;
822 
823 	while (bytes_copied < size) {
824 		const uint64_t page_index =
825 		    (queue_offset + bytes_copied) / PAGE_SIZE;
826 		const size_t page_offset =
827 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
828 		void *va;
829 		size_t to_copy;
830 
831 		/* Skip header. */
832 		va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
833 
834 		ASSERT(va);
835 		/*
836 		 * Fill up the page if we have enough payload, or else
837 		 * copy the remaining bytes.
838 		 */
839 		to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
840 
841 		if (is_iovec) {
842 			struct iovec *iov = (struct iovec *)src;
843 
844 			/* The iovec will track bytes_copied internally. */
845 			vmci_memcpy_fromiovec((uint8_t *)va + page_offset,
846 			    iov, to_copy);
847 		} else
848 			memcpy((uint8_t *)va + page_offset,
849 			    (uint8_t *)src + bytes_copied, to_copy);
850 		bytes_copied += to_copy;
851 	}
852 
853 	return (VMCI_SUCCESS);
854 }
855 
856 /*
857  *------------------------------------------------------------------------------
858  *
859  * __vmci_memcpy_from_queue --
860  *
861  *     Copies to a given buffer or iovector from a VMCI Queue. Assumes that
862  *     offset + size does not wrap around in the queue.
863  *
864  * Results:
865  *     Zero on success, negative error code on failure.
866  *
867  * Side effects:
868  *     None.
869  *
870  *------------------------------------------------------------------------------
871  */
872 
873 static int
__vmci_memcpy_from_queue(void * dest,const struct vmci_queue * queue,uint64_t queue_offset,size_t size,bool is_iovec)874 __vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue,
875     uint64_t queue_offset, size_t size, bool is_iovec)
876 {
877 	struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
878 	size_t bytes_copied = 0;
879 
880 	while (bytes_copied < size) {
881 		const uint64_t page_index =
882 		    (queue_offset + bytes_copied) / PAGE_SIZE;
883 		const size_t page_offset =
884 		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
885 		void *va;
886 		size_t to_copy;
887 
888 		/* Skip header. */
889 		va = (void *)kernel_if->dmas[page_index + 1].dma_vaddr;
890 
891 		ASSERT(va);
892 		/*
893 		 * Fill up the page if we have enough payload, or else
894 		 * copy the remaining bytes.
895 		 */
896 		to_copy = MIN(PAGE_SIZE - page_offset, size - bytes_copied);
897 
898 		if (is_iovec) {
899 			struct iovec *iov = (struct iovec *)dest;
900 
901 			/* The iovec will track bytesCopied internally. */
902 			vmci_memcpy_toiovec(iov, (uint8_t *)va +
903 			    page_offset, to_copy);
904 		} else
905 			memcpy((uint8_t *)dest + bytes_copied,
906 			    (uint8_t *)va + page_offset, to_copy);
907 
908 		bytes_copied += to_copy;
909 	}
910 
911 	return (VMCI_SUCCESS);
912 }
913 
914 /*
915  *------------------------------------------------------------------------------
916  *
917  * vmci_memcpy_to_queue --
918  *
919  *     Copies from a given buffer to a VMCI Queue.
920  *
921  * Results:
922  *     Zero on success, negative error code on failure.
923  *
924  * Side effects:
925  *     None.
926  *
927  *------------------------------------------------------------------------------
928  */
929 
930 int
vmci_memcpy_to_queue(struct vmci_queue * queue,uint64_t queue_offset,const void * src,size_t src_offset,size_t size,int buf_type,bool can_block)931 vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
932     const void *src, size_t src_offset, size_t size, int buf_type,
933     bool can_block)
934 {
935 
936 	ASSERT(can_block);
937 
938 	return (__vmci_memcpy_to_queue(queue, queue_offset,
939 	    (uint8_t *)src + src_offset, size, false));
940 }
941 
942 /*
943  *------------------------------------------------------------------------------
944  *
945  * vmci_memcpy_from_queue --
946  *
947  *      Copies to a given buffer from a VMCI Queue.
948  *
949  * Results:
950  *      Zero on success, negative error code on failure.
951  *
952  * Side effects:
953  *      None.
954  *
955  *------------------------------------------------------------------------------
956  */
957 
958 int
vmci_memcpy_from_queue(void * dest,size_t dest_offset,const struct vmci_queue * queue,uint64_t queue_offset,size_t size,int buf_type,bool can_block)959 vmci_memcpy_from_queue(void *dest, size_t dest_offset,
960     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
961     int buf_type, bool can_block)
962 {
963 
964 	ASSERT(can_block);
965 
966 	return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
967 	    queue, queue_offset, size, false));
968 }
969 
970 /*
971  *------------------------------------------------------------------------------
972  *
973  * vmci_memcpy_to_queue_local --
974  *
975  *     Copies from a given buffer to a local VMCI queue. This is the
976  *     same as a regular copy.
977  *
978  * Results:
979  *     Zero on success, negative error code on failure.
980  *
981  * Side effects:
982  *     None.
983  *
984  *------------------------------------------------------------------------------
985  */
986 
987 int
vmci_memcpy_to_queue_local(struct vmci_queue * queue,uint64_t queue_offset,const void * src,size_t src_offset,size_t size,int buf_type,bool can_block)988 vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset,
989     const void *src, size_t src_offset, size_t size, int buf_type,
990     bool can_block)
991 {
992 
993 	ASSERT(can_block);
994 
995 	return (__vmci_memcpy_to_queue(queue, queue_offset,
996 	    (uint8_t *)src + src_offset, size, false));
997 }
998 
999 /*
1000  *------------------------------------------------------------------------------
1001  *
1002  * vmci_memcpy_from_queue_local --
1003  *
1004  *     Copies to a given buffer from a VMCI Queue.
1005  *
1006  * Results:
1007  *     Zero on success, negative error code on failure.
1008  *
1009  * Side effects:
1010  *     None.
1011  *
1012  *------------------------------------------------------------------------------
1013  */
1014 
1015 int
vmci_memcpy_from_queue_local(void * dest,size_t dest_offset,const struct vmci_queue * queue,uint64_t queue_offset,size_t size,int buf_type,bool can_block)1016 vmci_memcpy_from_queue_local(void *dest, size_t dest_offset,
1017     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1018     int buf_type, bool can_block)
1019 {
1020 
1021 	ASSERT(can_block);
1022 
1023 	return (__vmci_memcpy_from_queue((uint8_t *)dest + dest_offset,
1024 	    queue, queue_offset, size, false));
1025 }
1026 
1027 /*------------------------------------------------------------------------------
1028  *
1029  * vmci_memcpy_to_queue_v --
1030  *
1031  *     Copies from a given iovec from a VMCI Queue.
1032  *
1033  * Results:
1034  *     Zero on success, negative error code on failure.
1035  *
1036  * Side effects:
1037  *     None.
1038  *
1039  *------------------------------------------------------------------------------
1040  */
1041 
1042 int
vmci_memcpy_to_queue_v(struct vmci_queue * queue,uint64_t queue_offset,const void * src,size_t src_offset,size_t size,int buf_type,bool can_block)1043 vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
1044     const void *src, size_t src_offset, size_t size, int buf_type,
1045     bool can_block)
1046 {
1047 
1048 	ASSERT(can_block);
1049 
1050 	/*
1051 	 * We ignore src_offset because src is really a struct iovec * and will
1052 	 * maintain offset internally.
1053 	 */
1054 	return (__vmci_memcpy_to_queue(queue, queue_offset, src, size,
1055 	    true));
1056 }
1057 
1058 /*
1059  *------------------------------------------------------------------------------
1060  *
1061  * vmci_memcpy_from_queue_v --
1062  *
1063  *     Copies to a given iovec from a VMCI Queue.
1064  *
1065  * Results:
1066  *     Zero on success, negative error code on failure.
1067  *
1068  * Side effects:
1069  *     None.
1070  *
1071  *------------------------------------------------------------------------------
1072  */
1073 
1074 int
vmci_memcpy_from_queue_v(void * dest,size_t dest_offset,const struct vmci_queue * queue,uint64_t queue_offset,size_t size,int buf_type,bool can_block)1075 vmci_memcpy_from_queue_v(void *dest, size_t dest_offset,
1076     const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
1077     int buf_type, bool can_block)
1078 {
1079 
1080 	ASSERT(can_block);
1081 
1082 	/*
1083 	 * We ignore dest_offset because dest is really a struct iovec * and
1084 	 * will maintain offset internally.
1085 	 */
1086 	return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size,
1087 	    true));
1088 }
1089 
1090 /*
1091  *------------------------------------------------------------------------------
1092  *
1093  * vmci_read_port_bytes --
1094  *
1095  *     Copy memory from an I/O port to kernel memory.
1096  *
1097  * Results:
1098  *     No results.
1099  *
1100  * Side effects:
1101  *     None.
1102  *
1103  *------------------------------------------------------------------------------
1104  */
1105 
1106 void
vmci_read_port_bytes(vmci_io_handle handle,vmci_io_port port,uint8_t * buffer,size_t buffer_length)1107 vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port, uint8_t *buffer,
1108     size_t buffer_length)
1109 {
1110 
1111 	insb(port, buffer, buffer_length);
1112 }
1113