xref: /freebsd/sys/dev/vmware/vmci/vmci_kernel_if.h (revision 61e21613)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file defines helper functions */
8 
9 #ifndef	_VMCI_KERNEL_IF_H_
10 #define	_VMCI_KERNEL_IF_H_
11 
12 #include <sys/param.h>
13 #include <sys/lock.h>
14 #include <sys/mutex.h>
15 #include <sys/queue.h>
16 #include <sys/sema.h>
17 
18 #include "vmci_defs.h"
19 
20 #define VMCI_MEMORY_NORMAL		0x0
21 #define VMCI_MEMORY_ATOMIC		0x1
22 
23 #define vmci_list(_l)			LIST_HEAD(, _l)
24 #define vmci_list_item(_l)		LIST_ENTRY(_l)
25 #define vmci_list_init(_l)		LIST_INIT(_l)
26 #define vmci_list_empty(_l)		LIST_EMPTY(_l)
27 #define vmci_list_first(_l)		LIST_FIRST(_l)
28 #define vmci_list_next(e, f)		LIST_NEXT(e, f)
29 #define vmci_list_insert(_l, _e, n)	LIST_INSERT_HEAD(_l, _e, n)
30 #define vmci_list_remove(_e, n)		LIST_REMOVE(_e, n)
31 #define vmci_list_scan(v, _l, n)	LIST_FOREACH(v, _l, n)
32 #define vmci_list_scan_safe(_e, _l, n, t)				\
33 	LIST_FOREACH_SAFE(_e, _l, n, t)
34 #define vmci_list_swap(_l1, _l2, t, f)	LIST_SWAP(_l1, _l2, t, f)
35 
36 typedef unsigned short int vmci_io_port;
37 typedef int vmci_io_handle;
38 
39 void	vmci_read_port_bytes(vmci_io_handle handle, vmci_io_port port,
40 	    uint8_t *buffer, size_t buffer_length);
41 
42 typedef struct mtx vmci_lock;
43 int	vmci_init_lock(vmci_lock *lock, char *name);
44 void	vmci_cleanup_lock(vmci_lock *lock);
45 void	vmci_grab_lock(vmci_lock *lock);
46 void	vmci_release_lock(vmci_lock *lock);
47 void	vmci_grab_lock_bh(vmci_lock *lock);
48 void	vmci_release_lock_bh(vmci_lock *lock);
49 int	vmci_initialized_lock(vmci_lock *lock);
50 
51 void	*vmci_alloc_kernel_mem(size_t size, int flags);
52 void	vmci_free_kernel_mem(void *ptr, size_t size);
53 
54 typedef struct sema vmci_event;
55 typedef int (*vmci_event_release_cb)(void *client_data);
56 void	vmci_create_event(vmci_event *event);
57 void	vmci_destroy_event(vmci_event *event);
58 void	vmci_signal_event(vmci_event *event);
59 void	vmci_wait_on_event(vmci_event *event, vmci_event_release_cb release_cb,
60 	    void *client_data);
61 bool	vmci_wait_on_event_interruptible(vmci_event *event,
62 	    vmci_event_release_cb release_cb, void *client_data);
63 
64 typedef void (vmci_work_fn)(void *data);
65 bool	vmci_can_schedule_delayed_work(void);
66 int	vmci_schedule_delayed_work(vmci_work_fn *work_fn, void *data);
67 void	vmci_delayed_work_cb(void *context, int data);
68 
69 typedef struct mtx vmci_mutex;
70 int	vmci_mutex_init(vmci_mutex *mutex, char *name);
71 void	vmci_mutex_destroy(vmci_mutex *mutex);
72 void	vmci_mutex_acquire(vmci_mutex *mutex);
73 void	vmci_mutex_release(vmci_mutex *mutex);
74 int	vmci_mutex_initialized(vmci_mutex *mutex);
75 
76 void	*vmci_alloc_queue(uint64_t size, uint32_t flags);
77 void	vmci_free_queue(void *q, uint64_t size);
78 
79 typedef PPN *vmci_ppn_list;
80 struct ppn_set {
81 	uint64_t	num_produce_pages;
82 	uint64_t	num_consume_pages;
83 	vmci_ppn_list	produce_ppns;
84 	vmci_ppn_list	consume_ppns;
85 	bool		initialized;
86 };
87 
88 int	vmci_alloc_ppn_set(void *produce_q, uint64_t num_produce_pages,
89 	    void *consume_q, uint64_t num_consume_pages,
90 	    struct ppn_set *ppn_set);
91 void	vmci_free_ppn_set(struct ppn_set *ppn_set);
92 int	vmci_populate_ppn_list(uint8_t *call_buf, const struct ppn_set *ppnset);
93 
94 #endif /* !_VMCI_KERNEL_IF_H_ */
95