xref: /freebsd/sys/dev/vmware/vmci/vmci_event.c (revision 685dc743)
163a93856SMark Peek /*-
23eeb7511SMark Peek  * Copyright (c) 2018 VMware, Inc.
363a93856SMark Peek  *
48c302b2eSMark Peek  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
563a93856SMark Peek  */
663a93856SMark Peek 
763a93856SMark Peek /* This file implements VMCI Event code. */
863a93856SMark Peek 
963a93856SMark Peek #include <sys/cdefs.h>
1063a93856SMark Peek #include "vmci.h"
1163a93856SMark Peek #include "vmci_driver.h"
1263a93856SMark Peek #include "vmci_event.h"
1363a93856SMark Peek #include "vmci_kernel_api.h"
1463a93856SMark Peek #include "vmci_kernel_defs.h"
1563a93856SMark Peek #include "vmci_kernel_if.h"
1663a93856SMark Peek 
1763a93856SMark Peek #define LGPFX		"vmci_event: "
1863a93856SMark Peek #define EVENT_MAGIC	0xEABE0000
1963a93856SMark Peek 
2063a93856SMark Peek struct vmci_subscription {
2163a93856SMark Peek 	vmci_id		id;
2263a93856SMark Peek 	int		ref_count;
2363a93856SMark Peek 	bool		run_delayed;
2463a93856SMark Peek 	vmci_event	destroy_event;
2563a93856SMark Peek 	vmci_event_type	event;
2663a93856SMark Peek 	vmci_event_cb	callback;
2763a93856SMark Peek 	void		*callback_data;
2863a93856SMark Peek 	vmci_list_item(vmci_subscription) subscriber_list_item;
2963a93856SMark Peek };
3063a93856SMark Peek 
3163a93856SMark Peek static struct	vmci_subscription *vmci_event_find(vmci_id sub_id);
3263a93856SMark Peek static int	vmci_event_deliver(struct vmci_event_msg *event_msg);
3363a93856SMark Peek static int	vmci_event_register_subscription(struct vmci_subscription *sub,
3463a93856SMark Peek 		    vmci_event_type event, uint32_t flags,
3563a93856SMark Peek 		    vmci_event_cb callback, void *callback_data);
3663a93856SMark Peek static struct	vmci_subscription *vmci_event_unregister_subscription(
3763a93856SMark Peek 		    vmci_id sub_id);
3863a93856SMark Peek 
3963a93856SMark Peek static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
4063a93856SMark Peek static vmci_lock subscriber_lock;
4163a93856SMark Peek 
4263a93856SMark Peek struct vmci_delayed_event_info {
4363a93856SMark Peek 	struct vmci_subscription *sub;
4463a93856SMark Peek 	uint8_t event_payload[sizeof(struct vmci_event_data_max)];
4563a93856SMark Peek };
4663a93856SMark Peek 
4763a93856SMark Peek struct vmci_event_ref {
4863a93856SMark Peek 	struct vmci_subscription	*sub;
4963a93856SMark Peek 	vmci_list_item(vmci_event_ref)	list_item;
5063a93856SMark Peek };
5163a93856SMark Peek 
5263a93856SMark Peek /*
5363a93856SMark Peek  *------------------------------------------------------------------------------
5463a93856SMark Peek  *
5563a93856SMark Peek  * vmci_event_init --
5663a93856SMark Peek  *
5763a93856SMark Peek  *     General init code.
5863a93856SMark Peek  *
5963a93856SMark Peek  * Results:
6063a93856SMark Peek  *     VMCI_SUCCESS on success, appropriate error code otherwise.
6163a93856SMark Peek  *
6263a93856SMark Peek  * Side effects:
6363a93856SMark Peek  *     None.
6463a93856SMark Peek  *
6563a93856SMark Peek  *------------------------------------------------------------------------------
6663a93856SMark Peek  */
6763a93856SMark Peek 
6863a93856SMark Peek int
vmci_event_init(void)6963a93856SMark Peek vmci_event_init(void)
7063a93856SMark Peek {
7163a93856SMark Peek 	int i;
7263a93856SMark Peek 
7363a93856SMark Peek 	for (i = 0; i < VMCI_EVENT_MAX; i++)
7463a93856SMark Peek 		vmci_list_init(&subscriber_array[i]);
7563a93856SMark Peek 
7663a93856SMark Peek 	return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
7763a93856SMark Peek }
7863a93856SMark Peek 
7963a93856SMark Peek /*
8063a93856SMark Peek  *------------------------------------------------------------------------------
8163a93856SMark Peek  *
8263a93856SMark Peek  * vmci_event_exit --
8363a93856SMark Peek  *
8463a93856SMark Peek  *     General exit code.
8563a93856SMark Peek  *
8663a93856SMark Peek  * Results:
8763a93856SMark Peek  *     None.
8863a93856SMark Peek  *
8963a93856SMark Peek  * Side effects:
9063a93856SMark Peek  *     None.
9163a93856SMark Peek  *
9263a93856SMark Peek  *------------------------------------------------------------------------------
9363a93856SMark Peek  */
9463a93856SMark Peek 
9563a93856SMark Peek void
vmci_event_exit(void)9663a93856SMark Peek vmci_event_exit(void)
9763a93856SMark Peek {
9863a93856SMark Peek 	struct vmci_subscription *iter, *iter_2;
9963a93856SMark Peek 	vmci_event_type e;
10063a93856SMark Peek 
10163a93856SMark Peek 	/* We free all memory at exit. */
10263a93856SMark Peek 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
10363a93856SMark Peek 		vmci_list_scan_safe(iter, &subscriber_array[e],
10463a93856SMark Peek 		    subscriber_list_item, iter_2) {
10563a93856SMark Peek 			/*
10663a93856SMark Peek 			 * We should never get here because all events should
10763a93856SMark Peek 			 * have been unregistered before we try to unload the
10863a93856SMark Peek 			 * driver module. Also, delayed callbacks could still
10963a93856SMark Peek 			 * be firing so this cleanup would not be safe. Still
11063a93856SMark Peek 			 * it is better to free the memory than not ... so we
11163a93856SMark Peek 			 * leave this code in just in case....
11263a93856SMark Peek 			 */
11363a93856SMark Peek 			ASSERT(false);
11463a93856SMark Peek 
11563a93856SMark Peek 			vmci_free_kernel_mem(iter, sizeof(*iter));
11663a93856SMark Peek 		}
11763a93856SMark Peek 	}
11863a93856SMark Peek 	vmci_cleanup_lock(&subscriber_lock);
11963a93856SMark Peek }
12063a93856SMark Peek 
12163a93856SMark Peek /*
12263a93856SMark Peek  *------------------------------------------------------------------------------
12363a93856SMark Peek  *
12463a93856SMark Peek  * vmci_event_sync --
12563a93856SMark Peek  *
12663a93856SMark Peek  *     Use this as a synchronization point when setting globals, for example,
12763a93856SMark Peek  *     during device shutdown.
12863a93856SMark Peek  *
12963a93856SMark Peek  * Results:
13063a93856SMark Peek  *     true.
13163a93856SMark Peek  *
13263a93856SMark Peek  * Side effects:
13363a93856SMark Peek  *     None.
13463a93856SMark Peek  *
13563a93856SMark Peek  *------------------------------------------------------------------------------
13663a93856SMark Peek  */
13763a93856SMark Peek 
13863a93856SMark Peek void
vmci_event_sync(void)13963a93856SMark Peek vmci_event_sync(void)
14063a93856SMark Peek {
14163a93856SMark Peek 
14263a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
14363a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
14463a93856SMark Peek }
14563a93856SMark Peek 
14663a93856SMark Peek /*
14763a93856SMark Peek  *------------------------------------------------------------------------------
14863a93856SMark Peek  *
14963a93856SMark Peek  * vmci_event_check_host_capabilities --
15063a93856SMark Peek  *
15163a93856SMark Peek  *     Verify that the host supports the hypercalls we need. If it does not,
15263a93856SMark Peek  *     try to find fallback hypercalls and use those instead.
15363a93856SMark Peek  *
15463a93856SMark Peek  * Results:
15563a93856SMark Peek  *     true if required hypercalls (or fallback hypercalls) are
15663a93856SMark Peek  *     supported by the host, false otherwise.
15763a93856SMark Peek  *
15863a93856SMark Peek  * Side effects:
15963a93856SMark Peek  *     None.
16063a93856SMark Peek  *
16163a93856SMark Peek  *------------------------------------------------------------------------------
16263a93856SMark Peek  */
16363a93856SMark Peek 
16463a93856SMark Peek bool
vmci_event_check_host_capabilities(void)16563a93856SMark Peek vmci_event_check_host_capabilities(void)
16663a93856SMark Peek {
16763a93856SMark Peek 
16863a93856SMark Peek 	/* vmci_event does not require any hypercalls. */
16963a93856SMark Peek 	return (true);
17063a93856SMark Peek }
17163a93856SMark Peek 
17263a93856SMark Peek /*
17363a93856SMark Peek  *------------------------------------------------------------------------------
17463a93856SMark Peek  *
17563a93856SMark Peek  * vmci_event_get --
17663a93856SMark Peek  *
17763a93856SMark Peek  *     Gets a reference to the given struct vmci_subscription.
17863a93856SMark Peek  *
17963a93856SMark Peek  * Results:
18063a93856SMark Peek  *     None.
18163a93856SMark Peek  *
18263a93856SMark Peek  * Side effects:
18363a93856SMark Peek  *     None.
18463a93856SMark Peek  *
18563a93856SMark Peek  *------------------------------------------------------------------------------
18663a93856SMark Peek  */
18763a93856SMark Peek 
18863a93856SMark Peek static void
vmci_event_get(struct vmci_subscription * entry)18963a93856SMark Peek vmci_event_get(struct vmci_subscription *entry)
19063a93856SMark Peek {
19163a93856SMark Peek 
19263a93856SMark Peek 	ASSERT(entry);
19363a93856SMark Peek 
19463a93856SMark Peek 	entry->ref_count++;
19563a93856SMark Peek }
19663a93856SMark Peek 
19763a93856SMark Peek /*
19863a93856SMark Peek  *------------------------------------------------------------------------------
19963a93856SMark Peek  *
20063a93856SMark Peek  * vmci_event_release --
20163a93856SMark Peek  *
20263a93856SMark Peek  *     Releases the given struct vmci_subscription.
20363a93856SMark Peek  *
20463a93856SMark Peek  * Results:
20563a93856SMark Peek  *     None.
20663a93856SMark Peek  *
20763a93856SMark Peek  * Side effects:
20863a93856SMark Peek  *     Fires the destroy event if the reference count has gone to zero.
20963a93856SMark Peek  *
21063a93856SMark Peek  *------------------------------------------------------------------------------
21163a93856SMark Peek  */
21263a93856SMark Peek 
21363a93856SMark Peek static void
vmci_event_release(struct vmci_subscription * entry)21463a93856SMark Peek vmci_event_release(struct vmci_subscription *entry)
21563a93856SMark Peek {
21663a93856SMark Peek 
21763a93856SMark Peek 	ASSERT(entry);
21863a93856SMark Peek 	ASSERT(entry->ref_count > 0);
21963a93856SMark Peek 
22063a93856SMark Peek 	entry->ref_count--;
22163a93856SMark Peek 	if (entry->ref_count == 0)
22263a93856SMark Peek 		vmci_signal_event(&entry->destroy_event);
22363a93856SMark Peek }
22463a93856SMark Peek 
22563a93856SMark Peek  /*
22663a93856SMark Peek  *------------------------------------------------------------------------------
22763a93856SMark Peek  *
22863a93856SMark Peek  * event_release_cb --
22963a93856SMark Peek  *
23063a93856SMark Peek  *     Callback to release the event entry reference. It is called by the
23163a93856SMark Peek  *     vmci_wait_on_event function before it blocks.
23263a93856SMark Peek  *
23363a93856SMark Peek  * Result:
23463a93856SMark Peek  *     None.
23563a93856SMark Peek  *
23663a93856SMark Peek  * Side effects:
23763a93856SMark Peek  *     None.
23863a93856SMark Peek  *
23963a93856SMark Peek  *------------------------------------------------------------------------------
24063a93856SMark Peek  */
24163a93856SMark Peek 
24263a93856SMark Peek static int
event_release_cb(void * client_data)24363a93856SMark Peek event_release_cb(void *client_data)
24463a93856SMark Peek {
24563a93856SMark Peek 	struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
24663a93856SMark Peek 
24763a93856SMark Peek 	ASSERT(sub);
24863a93856SMark Peek 
24963a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
25063a93856SMark Peek 	vmci_event_release(sub);
25163a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
25263a93856SMark Peek 
25363a93856SMark Peek 	return (0);
25463a93856SMark Peek }
25563a93856SMark Peek 
25663a93856SMark Peek /*
25763a93856SMark Peek  *------------------------------------------------------------------------------
25863a93856SMark Peek  *
25963a93856SMark Peek  * vmci_event_find --
26063a93856SMark Peek  *
26163a93856SMark Peek  *     Find entry. Assumes lock is held.
26263a93856SMark Peek  *
26363a93856SMark Peek  * Results:
26463a93856SMark Peek  *     Entry if found, NULL if not.
26563a93856SMark Peek  *
26663a93856SMark Peek  * Side effects:
26763a93856SMark Peek  *     Increments the struct vmci_subscription refcount if an entry is found.
26863a93856SMark Peek  *
26963a93856SMark Peek  *------------------------------------------------------------------------------
27063a93856SMark Peek  */
27163a93856SMark Peek 
27263a93856SMark Peek static struct vmci_subscription *
vmci_event_find(vmci_id sub_id)27363a93856SMark Peek vmci_event_find(vmci_id sub_id)
27463a93856SMark Peek {
27563a93856SMark Peek 	struct vmci_subscription *iter;
27663a93856SMark Peek 	vmci_event_type e;
27763a93856SMark Peek 
27863a93856SMark Peek 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
27963a93856SMark Peek 		vmci_list_scan(iter, &subscriber_array[e],
28063a93856SMark Peek 		    subscriber_list_item) {
28163a93856SMark Peek 			if (iter->id == sub_id) {
28263a93856SMark Peek 				vmci_event_get(iter);
28363a93856SMark Peek 				return (iter);
28463a93856SMark Peek 			}
28563a93856SMark Peek 		}
28663a93856SMark Peek 	}
28763a93856SMark Peek 	return (NULL);
28863a93856SMark Peek }
28963a93856SMark Peek 
29063a93856SMark Peek /*
29163a93856SMark Peek  *------------------------------------------------------------------------------
29263a93856SMark Peek  *
29363a93856SMark Peek  * vmci_event_delayed_dispatch_cb --
29463a93856SMark Peek  *
29563a93856SMark Peek  *     Calls the specified callback in a delayed context.
29663a93856SMark Peek  *
29763a93856SMark Peek  * Results:
29863a93856SMark Peek  *     None.
29963a93856SMark Peek  *
30063a93856SMark Peek  * Side effects:
30163a93856SMark Peek  *     None.
30263a93856SMark Peek  *
30363a93856SMark Peek  *------------------------------------------------------------------------------
30463a93856SMark Peek  */
30563a93856SMark Peek 
30663a93856SMark Peek static void
vmci_event_delayed_dispatch_cb(void * data)30763a93856SMark Peek vmci_event_delayed_dispatch_cb(void *data)
30863a93856SMark Peek {
30963a93856SMark Peek 	struct vmci_delayed_event_info *event_info;
31063a93856SMark Peek 	struct vmci_subscription *sub;
31163a93856SMark Peek 	struct vmci_event_data *ed;
31263a93856SMark Peek 
31363a93856SMark Peek 	event_info = (struct vmci_delayed_event_info *)data;
31463a93856SMark Peek 
31563a93856SMark Peek 	ASSERT(event_info);
31663a93856SMark Peek 	ASSERT(event_info->sub);
31763a93856SMark Peek 
31863a93856SMark Peek 	sub = event_info->sub;
31963a93856SMark Peek 	ed = (struct vmci_event_data *)event_info->event_payload;
32063a93856SMark Peek 
32163a93856SMark Peek 	sub->callback(sub->id, ed, sub->callback_data);
32263a93856SMark Peek 
32363a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
32463a93856SMark Peek 	vmci_event_release(sub);
32563a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
32663a93856SMark Peek 
32763a93856SMark Peek 	vmci_free_kernel_mem(event_info, sizeof(*event_info));
32863a93856SMark Peek }
32963a93856SMark Peek 
33063a93856SMark Peek /*
33163a93856SMark Peek  *------------------------------------------------------------------------------
33263a93856SMark Peek  *
33363a93856SMark Peek  * vmci_event_deliver --
33463a93856SMark Peek  *
33563a93856SMark Peek  *     Actually delivers the events to the subscribers.
33663a93856SMark Peek  *
33763a93856SMark Peek  * Results:
33863a93856SMark Peek  *     None.
33963a93856SMark Peek  *
34063a93856SMark Peek  * Side effects:
34163a93856SMark Peek  *     The callback function for each subscriber is invoked.
34263a93856SMark Peek  *
34363a93856SMark Peek  *------------------------------------------------------------------------------
34463a93856SMark Peek  */
34563a93856SMark Peek 
34663a93856SMark Peek static int
vmci_event_deliver(struct vmci_event_msg * event_msg)34763a93856SMark Peek vmci_event_deliver(struct vmci_event_msg *event_msg)
34863a93856SMark Peek {
34963a93856SMark Peek 	struct vmci_subscription *iter;
35063a93856SMark Peek 	int err = VMCI_SUCCESS;
35163a93856SMark Peek 
35263a93856SMark Peek 	vmci_list(vmci_event_ref) no_delay_list;
35363a93856SMark Peek 	vmci_list_init(&no_delay_list);
35463a93856SMark Peek 
35563a93856SMark Peek 	ASSERT(event_msg);
35663a93856SMark Peek 
35763a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
35863a93856SMark Peek 	vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
35963a93856SMark Peek 	    subscriber_list_item) {
36063a93856SMark Peek 		if (iter->run_delayed) {
36163a93856SMark Peek 			struct vmci_delayed_event_info *event_info;
36263a93856SMark Peek 			if ((event_info =
36363a93856SMark Peek 			    vmci_alloc_kernel_mem(sizeof(*event_info),
36463a93856SMark Peek 			    VMCI_MEMORY_ATOMIC)) == NULL) {
36563a93856SMark Peek 				err = VMCI_ERROR_NO_MEM;
36663a93856SMark Peek 				goto out;
36763a93856SMark Peek 			}
36863a93856SMark Peek 
36963a93856SMark Peek 			vmci_event_get(iter);
37063a93856SMark Peek 
37163a93856SMark Peek 			memset(event_info, 0, sizeof(*event_info));
37263a93856SMark Peek 			memcpy(event_info->event_payload,
37363a93856SMark Peek 			    VMCI_DG_PAYLOAD(event_msg),
37463a93856SMark Peek 			    (size_t)event_msg->hdr.payload_size);
37563a93856SMark Peek 			event_info->sub = iter;
37663a93856SMark Peek 			err =
37763a93856SMark Peek 			    vmci_schedule_delayed_work(
37863a93856SMark Peek 			    vmci_event_delayed_dispatch_cb, event_info);
37963a93856SMark Peek 			if (err != VMCI_SUCCESS) {
38063a93856SMark Peek 				vmci_event_release(iter);
38163a93856SMark Peek 				vmci_free_kernel_mem(
38263a93856SMark Peek 				    event_info, sizeof(*event_info));
38363a93856SMark Peek 				goto out;
38463a93856SMark Peek 			}
38563a93856SMark Peek 
38663a93856SMark Peek 		} else {
38763a93856SMark Peek 			struct vmci_event_ref *event_ref;
38863a93856SMark Peek 
38963a93856SMark Peek 			/*
39063a93856SMark Peek 			 * We construct a local list of subscribers and release
39163a93856SMark Peek 			 * subscriber_lock before invoking the callbacks. This
39263a93856SMark Peek 			 * is similar to delayed callbacks, but callbacks are
39363a93856SMark Peek 			 * invoked right away here.
39463a93856SMark Peek 			 */
39563a93856SMark Peek 			if ((event_ref = vmci_alloc_kernel_mem(
39663a93856SMark Peek 			    sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
39763a93856SMark Peek 				err = VMCI_ERROR_NO_MEM;
39863a93856SMark Peek 				goto out;
39963a93856SMark Peek 			}
40063a93856SMark Peek 
40163a93856SMark Peek 			vmci_event_get(iter);
40263a93856SMark Peek 			event_ref->sub = iter;
40363a93856SMark Peek 			vmci_list_insert(&no_delay_list, event_ref, list_item);
40463a93856SMark Peek 		}
40563a93856SMark Peek 	}
40663a93856SMark Peek 
40763a93856SMark Peek out:
40863a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
40963a93856SMark Peek 
41063a93856SMark Peek 	if (!vmci_list_empty(&no_delay_list)) {
41163a93856SMark Peek 		struct vmci_event_data *ed;
41263a93856SMark Peek 		struct vmci_event_ref *iter;
41363a93856SMark Peek 		struct vmci_event_ref *iter_2;
41463a93856SMark Peek 
41563a93856SMark Peek 		vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
41663a93856SMark Peek 			struct vmci_subscription *cur;
41763a93856SMark Peek 			uint8_t event_payload[sizeof(
41863a93856SMark Peek 			    struct vmci_event_data_max)];
41963a93856SMark Peek 
42063a93856SMark Peek 			cur = iter->sub;
42163a93856SMark Peek 
42263a93856SMark Peek 			/*
42363a93856SMark Peek 			 * We set event data before each callback to ensure
42463a93856SMark Peek 			 * isolation.
42563a93856SMark Peek 			 */
42663a93856SMark Peek 			memset(event_payload, 0, sizeof(event_payload));
42763a93856SMark Peek 			memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
42863a93856SMark Peek 			    (size_t)event_msg->hdr.payload_size);
42963a93856SMark Peek 			ed = (struct vmci_event_data *)event_payload;
43063a93856SMark Peek 			cur->callback(cur->id, ed, cur->callback_data);
43163a93856SMark Peek 
43263a93856SMark Peek 			vmci_grab_lock_bh(&subscriber_lock);
43363a93856SMark Peek 			vmci_event_release(cur);
43463a93856SMark Peek 			vmci_release_lock_bh(&subscriber_lock);
43563a93856SMark Peek 			vmci_free_kernel_mem(iter, sizeof(*iter));
43663a93856SMark Peek 		}
43763a93856SMark Peek 	}
43863a93856SMark Peek 
43963a93856SMark Peek 	return (err);
44063a93856SMark Peek }
44163a93856SMark Peek 
44263a93856SMark Peek /*
44363a93856SMark Peek  *------------------------------------------------------------------------------
44463a93856SMark Peek  *
44563a93856SMark Peek  * vmci_event_dispatch --
44663a93856SMark Peek  *
44763a93856SMark Peek  *     Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
44863a93856SMark Peek  *     subscribers for given event.
44963a93856SMark Peek  *
45063a93856SMark Peek  * Results:
45163a93856SMark Peek  *     VMCI_SUCCESS on success, error code otherwise.
45263a93856SMark Peek  *
45363a93856SMark Peek  * Side effects:
45463a93856SMark Peek  *     None.
45563a93856SMark Peek  *
45663a93856SMark Peek  *------------------------------------------------------------------------------
45763a93856SMark Peek  */
45863a93856SMark Peek 
45963a93856SMark Peek int
vmci_event_dispatch(struct vmci_datagram * msg)46063a93856SMark Peek vmci_event_dispatch(struct vmci_datagram *msg)
46163a93856SMark Peek {
46263a93856SMark Peek 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
46363a93856SMark Peek 
46463a93856SMark Peek 	ASSERT(msg &&
46563a93856SMark Peek 	    msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
46663a93856SMark Peek 	    msg->dst.resource == VMCI_EVENT_HANDLER);
46763a93856SMark Peek 
46863a93856SMark Peek 	if (msg->payload_size < sizeof(vmci_event_type) ||
46963a93856SMark Peek 	    msg->payload_size > sizeof(struct vmci_event_data_max))
47063a93856SMark Peek 		return (VMCI_ERROR_INVALID_ARGS);
47163a93856SMark Peek 
47263a93856SMark Peek 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
47363a93856SMark Peek 		return (VMCI_ERROR_EVENT_UNKNOWN);
47463a93856SMark Peek 
47563a93856SMark Peek 	vmci_event_deliver(event_msg);
47663a93856SMark Peek 
47763a93856SMark Peek 	return (VMCI_SUCCESS);
47863a93856SMark Peek }
47963a93856SMark Peek 
48063a93856SMark Peek /*
48163a93856SMark Peek  *------------------------------------------------------------------------------
48263a93856SMark Peek  *
48363a93856SMark Peek  * vmci_event_register_subscription --
48463a93856SMark Peek  *
48563a93856SMark Peek  *     Initialize and add subscription to subscriber list.
48663a93856SMark Peek  *
48763a93856SMark Peek  * Results:
48863a93856SMark Peek  *     VMCI_SUCCESS on success, error code otherwise.
48963a93856SMark Peek  *
49063a93856SMark Peek  * Side effects:
49163a93856SMark Peek  *     None.
49263a93856SMark Peek  *
49363a93856SMark Peek  *------------------------------------------------------------------------------
49463a93856SMark Peek  */
49563a93856SMark Peek 
49663a93856SMark Peek static int
vmci_event_register_subscription(struct vmci_subscription * sub,vmci_event_type event,uint32_t flags,vmci_event_cb callback,void * callback_data)49763a93856SMark Peek vmci_event_register_subscription(struct vmci_subscription *sub,
49863a93856SMark Peek     vmci_event_type event, uint32_t flags, vmci_event_cb callback,
49963a93856SMark Peek     void *callback_data)
50063a93856SMark Peek {
50163a93856SMark Peek #define VMCI_EVENT_MAX_ATTEMPTS	10
50263a93856SMark Peek 	static vmci_id subscription_id = 0;
50363a93856SMark Peek 	int result;
50463a93856SMark Peek 	uint32_t attempts = 0;
50563a93856SMark Peek 	bool success;
50663a93856SMark Peek 
50763a93856SMark Peek 	ASSERT(sub);
50863a93856SMark Peek 
50963a93856SMark Peek 	if (!VMCI_EVENT_VALID(event) || callback == NULL) {
51063a93856SMark Peek 		VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
51163a93856SMark Peek 		    " (type=%d) (callback=%p) (data=%p).\n",
51263a93856SMark Peek 		    event, callback, callback_data);
51363a93856SMark Peek 		return (VMCI_ERROR_INVALID_ARGS);
51463a93856SMark Peek 	}
51563a93856SMark Peek 
51663a93856SMark Peek 	if (!vmci_can_schedule_delayed_work()) {
51763a93856SMark Peek 		/*
51863a93856SMark Peek 		 * If the platform doesn't support delayed work callbacks then
51963a93856SMark Peek 		 * don't allow registration for them.
52063a93856SMark Peek 		 */
52163a93856SMark Peek 		if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
52263a93856SMark Peek 			return (VMCI_ERROR_INVALID_ARGS);
52363a93856SMark Peek 		sub->run_delayed = false;
52463a93856SMark Peek 	} else {
52563a93856SMark Peek 		/*
52663a93856SMark Peek 		 * The platform supports delayed work callbacks. Honor the
52763a93856SMark Peek 		 * requested flags
52863a93856SMark Peek 		 */
52963a93856SMark Peek 		sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
53063a93856SMark Peek 		    true : false;
53163a93856SMark Peek 	}
53263a93856SMark Peek 
53363a93856SMark Peek 	sub->ref_count = 1;
53463a93856SMark Peek 	sub->event = event;
53563a93856SMark Peek 	sub->callback = callback;
53663a93856SMark Peek 	sub->callback_data = callback_data;
53763a93856SMark Peek 
53863a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
53963a93856SMark Peek 
54063a93856SMark Peek 	for (success = false, attempts = 0;
54163a93856SMark Peek 	    success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
54263a93856SMark Peek 	    attempts++) {
54363a93856SMark Peek 		struct vmci_subscription *existing_sub = NULL;
54463a93856SMark Peek 
54563a93856SMark Peek 		/*
54663a93856SMark Peek 		 * We try to get an id a couple of time before claiming we are
54763a93856SMark Peek 		 * out of resources.
54863a93856SMark Peek 		 */
54963a93856SMark Peek 		sub->id = ++subscription_id;
55063a93856SMark Peek 
55163a93856SMark Peek 		/* Test for duplicate id. */
55263a93856SMark Peek 		existing_sub = vmci_event_find(sub->id);
55363a93856SMark Peek 		if (existing_sub == NULL) {
55463a93856SMark Peek 			/* We succeeded if we didn't find a duplicate. */
55563a93856SMark Peek 			success = true;
55663a93856SMark Peek 		} else
55763a93856SMark Peek 			vmci_event_release(existing_sub);
55863a93856SMark Peek 	}
55963a93856SMark Peek 
56063a93856SMark Peek 	if (success) {
56163a93856SMark Peek 		vmci_create_event(&sub->destroy_event);
56263a93856SMark Peek 		vmci_list_insert(&subscriber_array[event], sub,
56363a93856SMark Peek 		    subscriber_list_item);
56463a93856SMark Peek 		result = VMCI_SUCCESS;
56563a93856SMark Peek 	} else
56663a93856SMark Peek 		result = VMCI_ERROR_NO_RESOURCES;
56763a93856SMark Peek 
56863a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
56963a93856SMark Peek 	return (result);
57063a93856SMark Peek #undef VMCI_EVENT_MAX_ATTEMPTS
57163a93856SMark Peek }
57263a93856SMark Peek 
57363a93856SMark Peek /*
57463a93856SMark Peek  *------------------------------------------------------------------------------
57563a93856SMark Peek  *
57663a93856SMark Peek  * vmci_event_unregister_subscription --
57763a93856SMark Peek  *
57863a93856SMark Peek  *     Remove subscription from subscriber list.
57963a93856SMark Peek  *
58063a93856SMark Peek  * Results:
58163a93856SMark Peek  *     struct vmci_subscription when found, NULL otherwise.
58263a93856SMark Peek  *
58363a93856SMark Peek  * Side effects:
58463a93856SMark Peek  *     None.
58563a93856SMark Peek  *
58663a93856SMark Peek  *------------------------------------------------------------------------------
58763a93856SMark Peek  */
58863a93856SMark Peek 
58963a93856SMark Peek static struct vmci_subscription *
vmci_event_unregister_subscription(vmci_id sub_id)59063a93856SMark Peek vmci_event_unregister_subscription(vmci_id sub_id)
59163a93856SMark Peek {
59263a93856SMark Peek 	struct vmci_subscription *s;
59363a93856SMark Peek 
5940f14bcbeSMark Peek 	if (!vmci_initialized_lock(&subscriber_lock))
5950f14bcbeSMark Peek 		return NULL;
5960f14bcbeSMark Peek 
59763a93856SMark Peek 	vmci_grab_lock_bh(&subscriber_lock);
59863a93856SMark Peek 	s = vmci_event_find(sub_id);
59963a93856SMark Peek 	if (s != NULL) {
60063a93856SMark Peek 		vmci_event_release(s);
60163a93856SMark Peek 		vmci_list_remove(s, subscriber_list_item);
60263a93856SMark Peek 	}
60363a93856SMark Peek 	vmci_release_lock_bh(&subscriber_lock);
60463a93856SMark Peek 
60563a93856SMark Peek 	if (s != NULL) {
60663a93856SMark Peek 		vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
60763a93856SMark Peek 		vmci_destroy_event(&s->destroy_event);
60863a93856SMark Peek 	}
60963a93856SMark Peek 
61063a93856SMark Peek 	return (s);
61163a93856SMark Peek }
61263a93856SMark Peek 
61363a93856SMark Peek /*
61463a93856SMark Peek  *------------------------------------------------------------------------------
61563a93856SMark Peek  *
61663a93856SMark Peek  * vmci_event_subscribe --
61763a93856SMark Peek  *
61863a93856SMark Peek  *     Subscribe to given event. The callback specified can be fired in
61963a93856SMark Peek  *     different contexts depending on what flag is specified while registering.
62063a93856SMark Peek  *     If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
62163a93856SMark Peek  *     the subscriber lock held (and BH context on the guest). If flags contain
62263a93856SMark Peek  *     VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
62363a93856SMark Peek  *     in thread context. This is useful because other vmci_event functions can
62463a93856SMark Peek  *     be called, but it also increases the chances that an event will be
62563a93856SMark Peek  *     dropped.
62663a93856SMark Peek  *
62763a93856SMark Peek  * Results:
62863a93856SMark Peek  *     VMCI_SUCCESS on success, error code otherwise.
62963a93856SMark Peek  *
63063a93856SMark Peek  * Side effects:
63163a93856SMark Peek  *     None.
63263a93856SMark Peek  *
63363a93856SMark Peek  *------------------------------------------------------------------------------
63463a93856SMark Peek  */
63563a93856SMark Peek 
63663a93856SMark Peek int
vmci_event_subscribe(vmci_event_type event,vmci_event_cb callback,void * callback_data,vmci_id * subscription_id)63763a93856SMark Peek vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
63863a93856SMark Peek     void *callback_data, vmci_id *subscription_id)
63963a93856SMark Peek {
64063a93856SMark Peek 	int retval;
64163a93856SMark Peek 	uint32_t flags = VMCI_FLAG_EVENT_NONE;
64263a93856SMark Peek 	struct vmci_subscription *s = NULL;
64363a93856SMark Peek 
64463a93856SMark Peek 	if (subscription_id == NULL) {
64563a93856SMark Peek 		VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
64663a93856SMark Peek 		return (VMCI_ERROR_INVALID_ARGS);
64763a93856SMark Peek 	}
64863a93856SMark Peek 
64963a93856SMark Peek 	s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
65063a93856SMark Peek 	if (s == NULL)
65163a93856SMark Peek 		return (VMCI_ERROR_NO_MEM);
65263a93856SMark Peek 
65363a93856SMark Peek 	retval = vmci_event_register_subscription(s, event, flags,
65463a93856SMark Peek 	    callback, callback_data);
65563a93856SMark Peek 	if (retval < VMCI_SUCCESS) {
65663a93856SMark Peek 		vmci_free_kernel_mem(s, sizeof(*s));
65763a93856SMark Peek 		return (retval);
65863a93856SMark Peek 	}
65963a93856SMark Peek 
66063a93856SMark Peek 	*subscription_id = s->id;
66163a93856SMark Peek 	return (retval);
66263a93856SMark Peek }
66363a93856SMark Peek 
66463a93856SMark Peek /*
66563a93856SMark Peek  *------------------------------------------------------------------------------
66663a93856SMark Peek  *
66763a93856SMark Peek  * vmci_event_unsubscribe --
66863a93856SMark Peek  *
66963a93856SMark Peek  *     Unsubscribe to given event. Removes it from list and frees it.
67063a93856SMark Peek  *     Will return callback_data if requested by caller.
67163a93856SMark Peek  *
67263a93856SMark Peek  * Results:
67363a93856SMark Peek  *     VMCI_SUCCESS on success, error code otherwise.
67463a93856SMark Peek  *
67563a93856SMark Peek  * Side effects:
67663a93856SMark Peek  *     None.
67763a93856SMark Peek  *
67863a93856SMark Peek  *------------------------------------------------------------------------------
67963a93856SMark Peek  */
68063a93856SMark Peek 
68163a93856SMark Peek int
vmci_event_unsubscribe(vmci_id sub_id)68263a93856SMark Peek vmci_event_unsubscribe(vmci_id sub_id)
68363a93856SMark Peek {
68463a93856SMark Peek 	struct vmci_subscription *s;
68563a93856SMark Peek 
68663a93856SMark Peek 	/*
68763a93856SMark Peek 	 * Return subscription. At this point we know noone else is accessing
68863a93856SMark Peek 	 * the subscription so we can free it.
68963a93856SMark Peek 	 */
69063a93856SMark Peek 	s = vmci_event_unregister_subscription(sub_id);
69163a93856SMark Peek 	if (s == NULL)
69263a93856SMark Peek 		return (VMCI_ERROR_NOT_FOUND);
69363a93856SMark Peek 	vmci_free_kernel_mem(s, sizeof(*s));
69463a93856SMark Peek 
69563a93856SMark Peek 	return (VMCI_SUCCESS);
69663a93856SMark Peek }
697