xref: /freebsd/sys/dev/vmware/vmci/vmci_event.c (revision c697fb7f)
1 /*-
2  * Copyright (c) 2018 VMware, Inc.
3  *
4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
5  */
6 
7 /* This file implements VMCI Event code. */
8 
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11 
12 #include "vmci.h"
13 #include "vmci_driver.h"
14 #include "vmci_event.h"
15 #include "vmci_kernel_api.h"
16 #include "vmci_kernel_defs.h"
17 #include "vmci_kernel_if.h"
18 
19 #define LGPFX		"vmci_event: "
20 #define EVENT_MAGIC	0xEABE0000
21 
22 struct vmci_subscription {
23 	vmci_id		id;
24 	int		ref_count;
25 	bool		run_delayed;
26 	vmci_event	destroy_event;
27 	vmci_event_type	event;
28 	vmci_event_cb	callback;
29 	void		*callback_data;
30 	vmci_list_item(vmci_subscription) subscriber_list_item;
31 };
32 
33 static struct	vmci_subscription *vmci_event_find(vmci_id sub_id);
34 static int	vmci_event_deliver(struct vmci_event_msg *event_msg);
35 static int	vmci_event_register_subscription(struct vmci_subscription *sub,
36 		    vmci_event_type event, uint32_t flags,
37 		    vmci_event_cb callback, void *callback_data);
38 static struct	vmci_subscription *vmci_event_unregister_subscription(
39 		    vmci_id sub_id);
40 
41 static vmci_list(vmci_subscription) subscriber_array[VMCI_EVENT_MAX];
42 static vmci_lock subscriber_lock;
43 
44 struct vmci_delayed_event_info {
45 	struct vmci_subscription *sub;
46 	uint8_t event_payload[sizeof(struct vmci_event_data_max)];
47 };
48 
49 struct vmci_event_ref {
50 	struct vmci_subscription	*sub;
51 	vmci_list_item(vmci_event_ref)	list_item;
52 };
53 
54 /*
55  *------------------------------------------------------------------------------
56  *
57  * vmci_event_init --
58  *
59  *     General init code.
60  *
61  * Results:
62  *     VMCI_SUCCESS on success, appropriate error code otherwise.
63  *
64  * Side effects:
65  *     None.
66  *
67  *------------------------------------------------------------------------------
68  */
69 
70 int
71 vmci_event_init(void)
72 {
73 	int i;
74 
75 	for (i = 0; i < VMCI_EVENT_MAX; i++)
76 		vmci_list_init(&subscriber_array[i]);
77 
78 	return (vmci_init_lock(&subscriber_lock, "VMCI Event subscriber lock"));
79 }
80 
81 /*
82  *------------------------------------------------------------------------------
83  *
84  * vmci_event_exit --
85  *
86  *     General exit code.
87  *
88  * Results:
89  *     None.
90  *
91  * Side effects:
92  *     None.
93  *
94  *------------------------------------------------------------------------------
95  */
96 
97 void
98 vmci_event_exit(void)
99 {
100 	struct vmci_subscription *iter, *iter_2;
101 	vmci_event_type e;
102 
103 	/* We free all memory at exit. */
104 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
105 		vmci_list_scan_safe(iter, &subscriber_array[e],
106 		    subscriber_list_item, iter_2) {
107 
108 			/*
109 			 * We should never get here because all events should
110 			 * have been unregistered before we try to unload the
111 			 * driver module. Also, delayed callbacks could still
112 			 * be firing so this cleanup would not be safe. Still
113 			 * it is better to free the memory than not ... so we
114 			 * leave this code in just in case....
115 			 */
116 			ASSERT(false);
117 
118 			vmci_free_kernel_mem(iter, sizeof(*iter));
119 		}
120 	}
121 	vmci_cleanup_lock(&subscriber_lock);
122 }
123 
124 /*
125  *------------------------------------------------------------------------------
126  *
127  * vmci_event_sync --
128  *
129  *     Use this as a synchronization point when setting globals, for example,
130  *     during device shutdown.
131  *
132  * Results:
133  *     true.
134  *
135  * Side effects:
136  *     None.
137  *
138  *------------------------------------------------------------------------------
139  */
140 
141 void
142 vmci_event_sync(void)
143 {
144 
145 	vmci_grab_lock_bh(&subscriber_lock);
146 	vmci_release_lock_bh(&subscriber_lock);
147 }
148 
149 /*
150  *------------------------------------------------------------------------------
151  *
152  * vmci_event_check_host_capabilities --
153  *
154  *     Verify that the host supports the hypercalls we need. If it does not,
155  *     try to find fallback hypercalls and use those instead.
156  *
157  * Results:
158  *     true if required hypercalls (or fallback hypercalls) are
159  *     supported by the host, false otherwise.
160  *
161  * Side effects:
162  *     None.
163  *
164  *------------------------------------------------------------------------------
165  */
166 
167 bool
168 vmci_event_check_host_capabilities(void)
169 {
170 
171 	/* vmci_event does not require any hypercalls. */
172 	return (true);
173 }
174 
175 /*
176  *------------------------------------------------------------------------------
177  *
178  * vmci_event_get --
179  *
180  *     Gets a reference to the given struct vmci_subscription.
181  *
182  * Results:
183  *     None.
184  *
185  * Side effects:
186  *     None.
187  *
188  *------------------------------------------------------------------------------
189  */
190 
191 static void
192 vmci_event_get(struct vmci_subscription *entry)
193 {
194 
195 	ASSERT(entry);
196 
197 	entry->ref_count++;
198 }
199 
200 /*
201  *------------------------------------------------------------------------------
202  *
203  * vmci_event_release --
204  *
205  *     Releases the given struct vmci_subscription.
206  *
207  * Results:
208  *     None.
209  *
210  * Side effects:
211  *     Fires the destroy event if the reference count has gone to zero.
212  *
213  *------------------------------------------------------------------------------
214  */
215 
216 static void
217 vmci_event_release(struct vmci_subscription *entry)
218 {
219 
220 	ASSERT(entry);
221 	ASSERT(entry->ref_count > 0);
222 
223 	entry->ref_count--;
224 	if (entry->ref_count == 0)
225 		vmci_signal_event(&entry->destroy_event);
226 }
227 
228  /*
229  *------------------------------------------------------------------------------
230  *
231  * event_release_cb --
232  *
233  *     Callback to release the event entry reference. It is called by the
234  *     vmci_wait_on_event function before it blocks.
235  *
236  * Result:
237  *     None.
238  *
239  * Side effects:
240  *     None.
241  *
242  *------------------------------------------------------------------------------
243  */
244 
245 static int
246 event_release_cb(void *client_data)
247 {
248 	struct vmci_subscription *sub = (struct vmci_subscription *)client_data;
249 
250 	ASSERT(sub);
251 
252 	vmci_grab_lock_bh(&subscriber_lock);
253 	vmci_event_release(sub);
254 	vmci_release_lock_bh(&subscriber_lock);
255 
256 	return (0);
257 }
258 
259 /*
260  *------------------------------------------------------------------------------
261  *
262  * vmci_event_find --
263  *
264  *     Find entry. Assumes lock is held.
265  *
266  * Results:
267  *     Entry if found, NULL if not.
268  *
269  * Side effects:
270  *     Increments the struct vmci_subscription refcount if an entry is found.
271  *
272  *------------------------------------------------------------------------------
273  */
274 
275 static struct vmci_subscription *
276 vmci_event_find(vmci_id sub_id)
277 {
278 	struct vmci_subscription *iter;
279 	vmci_event_type e;
280 
281 	for (e = 0; e < VMCI_EVENT_MAX; e++) {
282 		vmci_list_scan(iter, &subscriber_array[e],
283 		    subscriber_list_item) {
284 			if (iter->id == sub_id) {
285 				vmci_event_get(iter);
286 				return (iter);
287 			}
288 		}
289 	}
290 	return (NULL);
291 }
292 
293 /*
294  *------------------------------------------------------------------------------
295  *
296  * vmci_event_delayed_dispatch_cb --
297  *
298  *     Calls the specified callback in a delayed context.
299  *
300  * Results:
301  *     None.
302  *
303  * Side effects:
304  *     None.
305  *
306  *------------------------------------------------------------------------------
307  */
308 
309 static void
310 vmci_event_delayed_dispatch_cb(void *data)
311 {
312 	struct vmci_delayed_event_info *event_info;
313 	struct vmci_subscription *sub;
314 	struct vmci_event_data *ed;
315 
316 	event_info = (struct vmci_delayed_event_info *)data;
317 
318 	ASSERT(event_info);
319 	ASSERT(event_info->sub);
320 
321 	sub = event_info->sub;
322 	ed = (struct vmci_event_data *)event_info->event_payload;
323 
324 	sub->callback(sub->id, ed, sub->callback_data);
325 
326 	vmci_grab_lock_bh(&subscriber_lock);
327 	vmci_event_release(sub);
328 	vmci_release_lock_bh(&subscriber_lock);
329 
330 	vmci_free_kernel_mem(event_info, sizeof(*event_info));
331 }
332 
333 /*
334  *------------------------------------------------------------------------------
335  *
336  * vmci_event_deliver --
337  *
338  *     Actually delivers the events to the subscribers.
339  *
340  * Results:
341  *     None.
342  *
343  * Side effects:
344  *     The callback function for each subscriber is invoked.
345  *
346  *------------------------------------------------------------------------------
347  */
348 
349 static int
350 vmci_event_deliver(struct vmci_event_msg *event_msg)
351 {
352 	struct vmci_subscription *iter;
353 	int err = VMCI_SUCCESS;
354 
355 	vmci_list(vmci_event_ref) no_delay_list;
356 	vmci_list_init(&no_delay_list);
357 
358 	ASSERT(event_msg);
359 
360 	vmci_grab_lock_bh(&subscriber_lock);
361 	vmci_list_scan(iter, &subscriber_array[event_msg->event_data.event],
362 	    subscriber_list_item) {
363 		if (iter->run_delayed) {
364 			struct vmci_delayed_event_info *event_info;
365 			if ((event_info =
366 			    vmci_alloc_kernel_mem(sizeof(*event_info),
367 			    VMCI_MEMORY_ATOMIC)) == NULL) {
368 				err = VMCI_ERROR_NO_MEM;
369 				goto out;
370 			}
371 
372 			vmci_event_get(iter);
373 
374 			memset(event_info, 0, sizeof(*event_info));
375 			memcpy(event_info->event_payload,
376 			    VMCI_DG_PAYLOAD(event_msg),
377 			    (size_t)event_msg->hdr.payload_size);
378 			event_info->sub = iter;
379 			err =
380 			    vmci_schedule_delayed_work(
381 			    vmci_event_delayed_dispatch_cb, event_info);
382 			if (err != VMCI_SUCCESS) {
383 				vmci_event_release(iter);
384 				vmci_free_kernel_mem(
385 				    event_info, sizeof(*event_info));
386 				goto out;
387 			}
388 
389 		} else {
390 			struct vmci_event_ref *event_ref;
391 
392 			/*
393 			 * We construct a local list of subscribers and release
394 			 * subscriber_lock before invoking the callbacks. This
395 			 * is similar to delayed callbacks, but callbacks are
396 			 * invoked right away here.
397 			 */
398 			if ((event_ref = vmci_alloc_kernel_mem(
399 			    sizeof(*event_ref), VMCI_MEMORY_ATOMIC)) == NULL) {
400 				err = VMCI_ERROR_NO_MEM;
401 				goto out;
402 			}
403 
404 			vmci_event_get(iter);
405 			event_ref->sub = iter;
406 			vmci_list_insert(&no_delay_list, event_ref, list_item);
407 		}
408 	}
409 
410 out:
411 	vmci_release_lock_bh(&subscriber_lock);
412 
413 	if (!vmci_list_empty(&no_delay_list)) {
414 		struct vmci_event_data *ed;
415 		struct vmci_event_ref *iter;
416 		struct vmci_event_ref *iter_2;
417 
418 		vmci_list_scan_safe(iter, &no_delay_list, list_item, iter_2) {
419 			struct vmci_subscription *cur;
420 			uint8_t event_payload[sizeof(
421 			    struct vmci_event_data_max)];
422 
423 			cur = iter->sub;
424 
425 			/*
426 			 * We set event data before each callback to ensure
427 			 * isolation.
428 			 */
429 			memset(event_payload, 0, sizeof(event_payload));
430 			memcpy(event_payload, VMCI_DG_PAYLOAD(event_msg),
431 			    (size_t)event_msg->hdr.payload_size);
432 			ed = (struct vmci_event_data *)event_payload;
433 			cur->callback(cur->id, ed, cur->callback_data);
434 
435 			vmci_grab_lock_bh(&subscriber_lock);
436 			vmci_event_release(cur);
437 			vmci_release_lock_bh(&subscriber_lock);
438 			vmci_free_kernel_mem(iter, sizeof(*iter));
439 		}
440 	}
441 
442 	return (err);
443 }
444 
445 /*
446  *------------------------------------------------------------------------------
447  *
448  * vmci_event_dispatch --
449  *
450  *     Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
451  *     subscribers for given event.
452  *
453  * Results:
454  *     VMCI_SUCCESS on success, error code otherwise.
455  *
456  * Side effects:
457  *     None.
458  *
459  *------------------------------------------------------------------------------
460  */
461 
462 int
463 vmci_event_dispatch(struct vmci_datagram *msg)
464 {
465 	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
466 
467 	ASSERT(msg &&
468 	    msg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
469 	    msg->dst.resource == VMCI_EVENT_HANDLER);
470 
471 	if (msg->payload_size < sizeof(vmci_event_type) ||
472 	    msg->payload_size > sizeof(struct vmci_event_data_max))
473 		return (VMCI_ERROR_INVALID_ARGS);
474 
475 	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
476 		return (VMCI_ERROR_EVENT_UNKNOWN);
477 
478 	vmci_event_deliver(event_msg);
479 
480 	return (VMCI_SUCCESS);
481 }
482 
483 /*
484  *------------------------------------------------------------------------------
485  *
486  * vmci_event_register_subscription --
487  *
488  *     Initialize and add subscription to subscriber list.
489  *
490  * Results:
491  *     VMCI_SUCCESS on success, error code otherwise.
492  *
493  * Side effects:
494  *     None.
495  *
496  *------------------------------------------------------------------------------
497  */
498 
499 static int
500 vmci_event_register_subscription(struct vmci_subscription *sub,
501     vmci_event_type event, uint32_t flags, vmci_event_cb callback,
502     void *callback_data)
503 {
504 #define VMCI_EVENT_MAX_ATTEMPTS	10
505 	static vmci_id subscription_id = 0;
506 	int result;
507 	uint32_t attempts = 0;
508 	bool success;
509 
510 	ASSERT(sub);
511 
512 	if (!VMCI_EVENT_VALID(event) || callback == NULL) {
513 		VMCI_LOG_DEBUG(LGPFX"Failed to subscribe to event"
514 		    " (type=%d) (callback=%p) (data=%p).\n",
515 		    event, callback, callback_data);
516 		return (VMCI_ERROR_INVALID_ARGS);
517 	}
518 
519 	if (!vmci_can_schedule_delayed_work()) {
520 		/*
521 		 * If the platform doesn't support delayed work callbacks then
522 		 * don't allow registration for them.
523 		 */
524 		if (flags & VMCI_FLAG_EVENT_DELAYED_CB)
525 			return (VMCI_ERROR_INVALID_ARGS);
526 		sub->run_delayed = false;
527 	} else {
528 		/*
529 		 * The platform supports delayed work callbacks. Honor the
530 		 * requested flags
531 		 */
532 		sub->run_delayed = (flags & VMCI_FLAG_EVENT_DELAYED_CB) ?
533 		    true : false;
534 	}
535 
536 	sub->ref_count = 1;
537 	sub->event = event;
538 	sub->callback = callback;
539 	sub->callback_data = callback_data;
540 
541 	vmci_grab_lock_bh(&subscriber_lock);
542 
543 	for (success = false, attempts = 0;
544 	    success == false && attempts < VMCI_EVENT_MAX_ATTEMPTS;
545 	    attempts++) {
546 		struct vmci_subscription *existing_sub = NULL;
547 
548 		/*
549 		 * We try to get an id a couple of time before claiming we are
550 		 * out of resources.
551 		 */
552 		sub->id = ++subscription_id;
553 
554 		/* Test for duplicate id. */
555 		existing_sub = vmci_event_find(sub->id);
556 		if (existing_sub == NULL) {
557 			/* We succeeded if we didn't find a duplicate. */
558 			success = true;
559 		} else
560 			vmci_event_release(existing_sub);
561 	}
562 
563 	if (success) {
564 		vmci_create_event(&sub->destroy_event);
565 		vmci_list_insert(&subscriber_array[event], sub,
566 		    subscriber_list_item);
567 		result = VMCI_SUCCESS;
568 	} else
569 		result = VMCI_ERROR_NO_RESOURCES;
570 
571 	vmci_release_lock_bh(&subscriber_lock);
572 	return (result);
573 #undef VMCI_EVENT_MAX_ATTEMPTS
574 }
575 
576 /*
577  *------------------------------------------------------------------------------
578  *
579  * vmci_event_unregister_subscription --
580  *
581  *     Remove subscription from subscriber list.
582  *
583  * Results:
584  *     struct vmci_subscription when found, NULL otherwise.
585  *
586  * Side effects:
587  *     None.
588  *
589  *------------------------------------------------------------------------------
590  */
591 
592 static struct vmci_subscription *
593 vmci_event_unregister_subscription(vmci_id sub_id)
594 {
595 	struct vmci_subscription *s;
596 
597 	vmci_grab_lock_bh(&subscriber_lock);
598 	s = vmci_event_find(sub_id);
599 	if (s != NULL) {
600 		vmci_event_release(s);
601 		vmci_list_remove(s, subscriber_list_item);
602 	}
603 	vmci_release_lock_bh(&subscriber_lock);
604 
605 	if (s != NULL) {
606 		vmci_wait_on_event(&s->destroy_event, event_release_cb, s);
607 		vmci_destroy_event(&s->destroy_event);
608 	}
609 
610 	return (s);
611 }
612 
613 /*
614  *------------------------------------------------------------------------------
615  *
616  * vmci_event_subscribe --
617  *
618  *     Subscribe to given event. The callback specified can be fired in
619  *     different contexts depending on what flag is specified while registering.
620  *     If flags contains VMCI_FLAG_EVENT_NONE then the callback is fired with
621  *     the subscriber lock held (and BH context on the guest). If flags contain
622  *     VMCI_FLAG_EVENT_DELAYED_CB then the callback is fired with no locks held
623  *     in thread context. This is useful because other vmci_event functions can
624  *     be called, but it also increases the chances that an event will be
625  *     dropped.
626  *
627  * Results:
628  *     VMCI_SUCCESS on success, error code otherwise.
629  *
630  * Side effects:
631  *     None.
632  *
633  *------------------------------------------------------------------------------
634  */
635 
636 int
637 vmci_event_subscribe(vmci_event_type event, vmci_event_cb callback,
638     void *callback_data, vmci_id *subscription_id)
639 {
640 	int retval;
641 	uint32_t flags = VMCI_FLAG_EVENT_NONE;
642 	struct vmci_subscription *s = NULL;
643 
644 	if (subscription_id == NULL) {
645 		VMCI_LOG_DEBUG(LGPFX"Invalid subscription (NULL).\n");
646 		return (VMCI_ERROR_INVALID_ARGS);
647 	}
648 
649 	s = vmci_alloc_kernel_mem(sizeof(*s), VMCI_MEMORY_NORMAL);
650 	if (s == NULL)
651 		return (VMCI_ERROR_NO_MEM);
652 
653 	retval = vmci_event_register_subscription(s, event, flags,
654 	    callback, callback_data);
655 	if (retval < VMCI_SUCCESS) {
656 		vmci_free_kernel_mem(s, sizeof(*s));
657 		return (retval);
658 	}
659 
660 	*subscription_id = s->id;
661 	return (retval);
662 }
663 
664 /*
665  *------------------------------------------------------------------------------
666  *
667  * vmci_event_unsubscribe --
668  *
669  *     Unsubscribe to given event. Removes it from list and frees it.
670  *     Will return callback_data if requested by caller.
671  *
672  * Results:
673  *     VMCI_SUCCESS on success, error code otherwise.
674  *
675  * Side effects:
676  *     None.
677  *
678  *------------------------------------------------------------------------------
679  */
680 
681 int
682 vmci_event_unsubscribe(vmci_id sub_id)
683 {
684 	struct vmci_subscription *s;
685 
686 	/*
687 	 * Return subscription. At this point we know noone else is accessing
688 	 * the subscription so we can free it.
689 	 */
690 	s = vmci_event_unregister_subscription(sub_id);
691 	if (s == NULL)
692 		return (VMCI_ERROR_NOT_FOUND);
693 	vmci_free_kernel_mem(s, sizeof(*s));
694 
695 	return (VMCI_SUCCESS);
696 }
697