1 /**
2 * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 * SPDX-License-Identifier: Apache-2.0.
4 */
5
6 #include <aws/io/event_loop.h>
7
8 #include <aws/common/clock.h>
9 #include <aws/common/device_random.h>
10 #include <aws/common/system_info.h>
11 #include <aws/common/thread.h>
12
aws_event_loop_new_default(struct aws_allocator * alloc,aws_io_clock_fn * clock)13 struct aws_event_loop *aws_event_loop_new_default(struct aws_allocator *alloc, aws_io_clock_fn *clock) {
14 struct aws_event_loop_options options = {
15 .thread_options = NULL,
16 .clock = clock,
17 };
18
19 return aws_event_loop_new_default_with_options(alloc, &options);
20 }
21
s_event_loop_group_thread_exit(void * user_data)22 static void s_event_loop_group_thread_exit(void *user_data) {
23 struct aws_event_loop_group *el_group = user_data;
24
25 aws_simple_completion_callback *completion_callback = el_group->shutdown_options.shutdown_callback_fn;
26 void *completion_user_data = el_group->shutdown_options.shutdown_callback_user_data;
27
28 aws_mem_release(el_group->allocator, el_group);
29
30 if (completion_callback != NULL) {
31 completion_callback(completion_user_data);
32 }
33 }
34
s_aws_event_loop_group_shutdown_sync(struct aws_event_loop_group * el_group)35 static void s_aws_event_loop_group_shutdown_sync(struct aws_event_loop_group *el_group) {
36 while (aws_array_list_length(&el_group->event_loops) > 0) {
37 struct aws_event_loop *loop = NULL;
38
39 if (!aws_array_list_back(&el_group->event_loops, &loop)) {
40 aws_event_loop_destroy(loop);
41 }
42
43 aws_array_list_pop_back(&el_group->event_loops);
44 }
45
46 aws_array_list_clean_up(&el_group->event_loops);
47 }
48
s_event_loop_destroy_async_thread_fn(void * thread_data)49 static void s_event_loop_destroy_async_thread_fn(void *thread_data) {
50 struct aws_event_loop_group *el_group = thread_data;
51
52 s_aws_event_loop_group_shutdown_sync(el_group);
53
54 aws_thread_current_at_exit(s_event_loop_group_thread_exit, el_group);
55 }
56
s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group * el_group)57 static void s_aws_event_loop_group_shutdown_async(struct aws_event_loop_group *el_group) {
58
59 /* It's possible that the last refcount was released on an event-loop thread,
60 * so we would deadlock if we waited here for all the event-loop threads to shut down.
61 * Therefore, we spawn a NEW thread and have it wait for all the event-loop threads to shut down
62 */
63 struct aws_thread cleanup_thread;
64 AWS_ZERO_STRUCT(cleanup_thread);
65
66 AWS_FATAL_ASSERT(aws_thread_init(&cleanup_thread, el_group->allocator) == AWS_OP_SUCCESS);
67
68 struct aws_thread_options thread_options;
69 AWS_ZERO_STRUCT(thread_options);
70 thread_options.join_strategy = AWS_TJS_MANAGED;
71
72 AWS_FATAL_ASSERT(
73 aws_thread_launch(&cleanup_thread, s_event_loop_destroy_async_thread_fn, el_group, &thread_options) ==
74 AWS_OP_SUCCESS);
75 }
76
s_event_loop_group_new(struct aws_allocator * alloc,aws_io_clock_fn * clock,uint16_t el_count,uint16_t cpu_group,bool pin_threads,aws_new_event_loop_fn * new_loop_fn,void * new_loop_user_data,const struct aws_shutdown_callback_options * shutdown_options)77 static struct aws_event_loop_group *s_event_loop_group_new(
78 struct aws_allocator *alloc,
79 aws_io_clock_fn *clock,
80 uint16_t el_count,
81 uint16_t cpu_group,
82 bool pin_threads,
83 aws_new_event_loop_fn *new_loop_fn,
84 void *new_loop_user_data,
85 const struct aws_shutdown_callback_options *shutdown_options) {
86 AWS_ASSERT(new_loop_fn);
87
88 size_t group_cpu_count = 0;
89 struct aws_cpu_info *usable_cpus = NULL;
90
91 if (pin_threads) {
92 group_cpu_count = aws_get_cpu_count_for_group(cpu_group);
93
94 if (!group_cpu_count) {
95 aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
96 return NULL;
97 }
98
99 usable_cpus = aws_mem_calloc(alloc, group_cpu_count, sizeof(struct aws_cpu_info));
100
101 if (usable_cpus == NULL) {
102 return NULL;
103 }
104
105 aws_get_cpu_ids_for_group(cpu_group, usable_cpus, group_cpu_count);
106 }
107
108 struct aws_event_loop_group *el_group = aws_mem_calloc(alloc, 1, sizeof(struct aws_event_loop_group));
109 if (el_group == NULL) {
110 return NULL;
111 }
112
113 el_group->allocator = alloc;
114 aws_ref_count_init(
115 &el_group->ref_count, el_group, (aws_simple_completion_callback *)s_aws_event_loop_group_shutdown_async);
116
117 if (aws_array_list_init_dynamic(&el_group->event_loops, alloc, el_count, sizeof(struct aws_event_loop *))) {
118 goto on_error;
119 }
120
121 for (uint16_t i = 0; i < el_count; ++i) {
122 /* Don't pin to hyper-threads if a user cared enough to specify a NUMA node */
123 if (!pin_threads || (i < group_cpu_count && !usable_cpus[i].suspected_hyper_thread)) {
124 struct aws_thread_options thread_options = *aws_default_thread_options();
125
126 struct aws_event_loop_options options = {
127 .clock = clock,
128 };
129
130 if (pin_threads) {
131 thread_options.cpu_id = usable_cpus[i].cpu_id;
132 options.thread_options = &thread_options;
133 }
134
135 struct aws_event_loop *loop = new_loop_fn(alloc, &options, new_loop_user_data);
136
137 if (!loop) {
138 goto on_error;
139 }
140
141 if (aws_array_list_push_back(&el_group->event_loops, (const void *)&loop)) {
142 aws_event_loop_destroy(loop);
143 goto on_error;
144 }
145
146 if (aws_event_loop_run(loop)) {
147 goto on_error;
148 }
149 }
150 }
151
152 if (shutdown_options != NULL) {
153 el_group->shutdown_options = *shutdown_options;
154 }
155
156 if (pin_threads) {
157 aws_mem_release(alloc, usable_cpus);
158 }
159
160 return el_group;
161
162 on_error:
163
164 aws_mem_release(alloc, usable_cpus);
165 s_aws_event_loop_group_shutdown_sync(el_group);
166 s_event_loop_group_thread_exit(el_group);
167
168 return NULL;
169 }
170
aws_event_loop_group_new(struct aws_allocator * alloc,aws_io_clock_fn * clock,uint16_t el_count,aws_new_event_loop_fn * new_loop_fn,void * new_loop_user_data,const struct aws_shutdown_callback_options * shutdown_options)171 struct aws_event_loop_group *aws_event_loop_group_new(
172 struct aws_allocator *alloc,
173 aws_io_clock_fn *clock,
174 uint16_t el_count,
175 aws_new_event_loop_fn *new_loop_fn,
176 void *new_loop_user_data,
177 const struct aws_shutdown_callback_options *shutdown_options) {
178
179 AWS_ASSERT(new_loop_fn);
180 AWS_ASSERT(el_count);
181
182 return s_event_loop_group_new(alloc, clock, el_count, 0, false, new_loop_fn, new_loop_user_data, shutdown_options);
183 }
184
s_default_new_event_loop(struct aws_allocator * allocator,const struct aws_event_loop_options * options,void * user_data)185 static struct aws_event_loop *s_default_new_event_loop(
186 struct aws_allocator *allocator,
187 const struct aws_event_loop_options *options,
188 void *user_data) {
189
190 (void)user_data;
191 return aws_event_loop_new_default_with_options(allocator, options);
192 }
193
aws_event_loop_group_new_default(struct aws_allocator * alloc,uint16_t max_threads,const struct aws_shutdown_callback_options * shutdown_options)194 struct aws_event_loop_group *aws_event_loop_group_new_default(
195 struct aws_allocator *alloc,
196 uint16_t max_threads,
197 const struct aws_shutdown_callback_options *shutdown_options) {
198 if (!max_threads) {
199 uint16_t processor_count = (uint16_t)aws_system_info_processor_count();
200 /* cut them in half to avoid using hyper threads for the IO work. */
201 max_threads = processor_count > 1 ? processor_count / 2 : processor_count;
202 }
203
204 return aws_event_loop_group_new(
205 alloc, aws_high_res_clock_get_ticks, max_threads, s_default_new_event_loop, NULL, shutdown_options);
206 }
207
aws_event_loop_group_new_pinned_to_cpu_group(struct aws_allocator * alloc,aws_io_clock_fn * clock,uint16_t el_count,uint16_t cpu_group,aws_new_event_loop_fn * new_loop_fn,void * new_loop_user_data,const struct aws_shutdown_callback_options * shutdown_options)208 struct aws_event_loop_group *aws_event_loop_group_new_pinned_to_cpu_group(
209 struct aws_allocator *alloc,
210 aws_io_clock_fn *clock,
211 uint16_t el_count,
212 uint16_t cpu_group,
213 aws_new_event_loop_fn *new_loop_fn,
214 void *new_loop_user_data,
215 const struct aws_shutdown_callback_options *shutdown_options) {
216 AWS_ASSERT(new_loop_fn);
217 AWS_ASSERT(el_count);
218
219 return s_event_loop_group_new(
220 alloc, clock, el_count, cpu_group, true, new_loop_fn, new_loop_user_data, shutdown_options);
221 }
222
aws_event_loop_group_new_default_pinned_to_cpu_group(struct aws_allocator * alloc,uint16_t max_threads,uint16_t cpu_group,const struct aws_shutdown_callback_options * shutdown_options)223 struct aws_event_loop_group *aws_event_loop_group_new_default_pinned_to_cpu_group(
224 struct aws_allocator *alloc,
225 uint16_t max_threads,
226 uint16_t cpu_group,
227 const struct aws_shutdown_callback_options *shutdown_options) {
228
229 if (!max_threads) {
230 uint16_t processor_count = (uint16_t)aws_system_info_processor_count();
231 /* cut them in half to avoid using hyper threads for the IO work. */
232 max_threads = processor_count > 1 ? processor_count / 2 : processor_count;
233 }
234
235 return aws_event_loop_group_new_pinned_to_cpu_group(
236 alloc, aws_high_res_clock_get_ticks, max_threads, cpu_group, s_default_new_event_loop, NULL, shutdown_options);
237 }
238
aws_event_loop_group_acquire(struct aws_event_loop_group * el_group)239 struct aws_event_loop_group *aws_event_loop_group_acquire(struct aws_event_loop_group *el_group) {
240 if (el_group != NULL) {
241 aws_ref_count_acquire(&el_group->ref_count);
242 }
243
244 return el_group;
245 }
246
aws_event_loop_group_release(struct aws_event_loop_group * el_group)247 void aws_event_loop_group_release(struct aws_event_loop_group *el_group) {
248 if (el_group != NULL) {
249 aws_ref_count_release(&el_group->ref_count);
250 }
251 }
252
aws_event_loop_group_get_loop_count(struct aws_event_loop_group * el_group)253 size_t aws_event_loop_group_get_loop_count(struct aws_event_loop_group *el_group) {
254 return aws_array_list_length(&el_group->event_loops);
255 }
256
aws_event_loop_group_get_loop_at(struct aws_event_loop_group * el_group,size_t index)257 struct aws_event_loop *aws_event_loop_group_get_loop_at(struct aws_event_loop_group *el_group, size_t index) {
258 struct aws_event_loop *el = NULL;
259 aws_array_list_get_at(&el_group->event_loops, &el, index);
260 return el;
261 }
262
aws_event_loop_group_get_next_loop(struct aws_event_loop_group * el_group)263 struct aws_event_loop *aws_event_loop_group_get_next_loop(struct aws_event_loop_group *el_group) {
264 size_t loop_count = aws_array_list_length(&el_group->event_loops);
265 AWS_ASSERT(loop_count > 0);
266 if (loop_count == 0) {
267 return NULL;
268 }
269
270 /* do one call to get 32 random bits because this hits an actual entropy source and it's not cheap */
271 uint32_t random_32_bit_num = 0;
272 aws_device_random_u32(&random_32_bit_num);
273
274 /* use the best of two algorithm to select the loop with the lowest load.
275 * If we find device random is too hard on the kernel, we can seed it and use another random
276 * number generator. */
277
278 /* it's fine and intentional, the case will throw off the top 16 bits and that's what we want. */
279 uint16_t random_num_a = (uint16_t)random_32_bit_num;
280 random_num_a = random_num_a % loop_count;
281
282 uint16_t random_num_b = (uint16_t)(random_32_bit_num >> 16);
283 random_num_b = random_num_b % loop_count;
284
285 struct aws_event_loop *random_loop_a = NULL;
286 struct aws_event_loop *random_loop_b = NULL;
287 aws_array_list_get_at(&el_group->event_loops, &random_loop_a, random_num_a);
288 aws_array_list_get_at(&el_group->event_loops, &random_loop_b, random_num_b);
289
290 /* there's no logical reason why this should ever be possible. It's just best to die if it happens. */
291 AWS_FATAL_ASSERT((random_loop_a && random_loop_b) && "random_loop_a or random_loop_b is NULL.");
292
293 size_t load_a = aws_event_loop_get_load_factor(random_loop_a);
294 size_t load_b = aws_event_loop_get_load_factor(random_loop_b);
295
296 return load_a < load_b ? random_loop_a : random_loop_b;
297 }
298
s_object_removed(void * value)299 static void s_object_removed(void *value) {
300 struct aws_event_loop_local_object *object = (struct aws_event_loop_local_object *)value;
301 if (object->on_object_removed) {
302 object->on_object_removed(object);
303 }
304 }
305
aws_event_loop_init_base(struct aws_event_loop * event_loop,struct aws_allocator * alloc,aws_io_clock_fn * clock)306 int aws_event_loop_init_base(struct aws_event_loop *event_loop, struct aws_allocator *alloc, aws_io_clock_fn *clock) {
307 AWS_ZERO_STRUCT(*event_loop);
308
309 event_loop->alloc = alloc;
310 event_loop->clock = clock;
311 aws_atomic_init_int(&event_loop->current_load_factor, 0u);
312 aws_atomic_init_int(&event_loop->next_flush_time, 0u);
313
314 if (aws_hash_table_init(&event_loop->local_data, alloc, 20, aws_hash_ptr, aws_ptr_eq, NULL, s_object_removed)) {
315 return AWS_OP_ERR;
316 }
317
318 return AWS_OP_SUCCESS;
319 }
320
aws_event_loop_clean_up_base(struct aws_event_loop * event_loop)321 void aws_event_loop_clean_up_base(struct aws_event_loop *event_loop) {
322 aws_hash_table_clean_up(&event_loop->local_data);
323 }
324
aws_event_loop_register_tick_start(struct aws_event_loop * event_loop)325 void aws_event_loop_register_tick_start(struct aws_event_loop *event_loop) {
326 aws_high_res_clock_get_ticks(&event_loop->latest_tick_start);
327 }
328
aws_event_loop_register_tick_end(struct aws_event_loop * event_loop)329 void aws_event_loop_register_tick_end(struct aws_event_loop *event_loop) {
330 /* increment the timestamp diff counter (this should always be called from the same thread), the concurrency
331 * work happens during the flush. */
332 uint64_t end_tick = 0;
333 aws_high_res_clock_get_ticks(&end_tick);
334
335 size_t elapsed = (size_t)aws_min_u64(end_tick - event_loop->latest_tick_start, SIZE_MAX);
336 event_loop->current_tick_latency_sum = aws_add_size_saturating(event_loop->current_tick_latency_sum, elapsed);
337 event_loop->latest_tick_start = 0;
338
339 size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time);
340 /* store as seconds because we can't make a 64-bit integer reliably atomic across platforms. */
341 uint64_t end_tick_secs = aws_timestamp_convert(end_tick, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
342
343 /* if a second has passed, flush the load-factor. */
344 if (end_tick_secs > next_flush_time_secs) {
345 aws_atomic_store_int(&event_loop->current_load_factor, event_loop->current_tick_latency_sum);
346 event_loop->current_tick_latency_sum = 0;
347 /* run again in a second. */
348 aws_atomic_store_int(&event_loop->next_flush_time, (size_t)(end_tick_secs + 1));
349 }
350 }
351
aws_event_loop_get_load_factor(struct aws_event_loop * event_loop)352 size_t aws_event_loop_get_load_factor(struct aws_event_loop *event_loop) {
353 uint64_t current_time = 0;
354 aws_high_res_clock_get_ticks(¤t_time);
355
356 uint64_t current_time_secs = aws_timestamp_convert(current_time, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
357 size_t next_flush_time_secs = aws_atomic_load_int(&event_loop->next_flush_time);
358
359 /* safety valve just in case an event-loop had heavy load and then went completely idle. If we haven't
360 * had an update from the event-loop in 10 seconds, just assume idle. Also, yes this is racy, but it should
361 * be good enough because an active loop will be updating its counter frequently ( more than once per 10 seconds
362 * for sure ), in the case where we hit the technical race condition, we don't care anyways and returning 0
363 * is the desired behavior. */
364 if (current_time_secs > next_flush_time_secs + 10) {
365 return 0;
366 }
367
368 return aws_atomic_load_int(&event_loop->current_load_factor);
369 }
370
aws_event_loop_destroy(struct aws_event_loop * event_loop)371 void aws_event_loop_destroy(struct aws_event_loop *event_loop) {
372 if (!event_loop) {
373 return;
374 }
375
376 AWS_ASSERT(event_loop->vtable && event_loop->vtable->destroy);
377 AWS_ASSERT(!aws_event_loop_thread_is_callers_thread(event_loop));
378
379 event_loop->vtable->destroy(event_loop);
380 }
381
aws_event_loop_fetch_local_object(struct aws_event_loop * event_loop,void * key,struct aws_event_loop_local_object * obj)382 int aws_event_loop_fetch_local_object(
383 struct aws_event_loop *event_loop,
384 void *key,
385 struct aws_event_loop_local_object *obj) {
386
387 AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop));
388
389 struct aws_hash_element *object = NULL;
390 if (!aws_hash_table_find(&event_loop->local_data, key, &object) && object) {
391 *obj = *(struct aws_event_loop_local_object *)object->value;
392 return AWS_OP_SUCCESS;
393 }
394
395 return AWS_OP_ERR;
396 }
397
aws_event_loop_put_local_object(struct aws_event_loop * event_loop,struct aws_event_loop_local_object * obj)398 int aws_event_loop_put_local_object(struct aws_event_loop *event_loop, struct aws_event_loop_local_object *obj) {
399 AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop));
400
401 struct aws_hash_element *object = NULL;
402 int was_created = 0;
403
404 if (!aws_hash_table_create(&event_loop->local_data, obj->key, &object, &was_created)) {
405 object->key = obj->key;
406 object->value = obj;
407 return AWS_OP_SUCCESS;
408 }
409
410 return AWS_OP_ERR;
411 }
412
aws_event_loop_remove_local_object(struct aws_event_loop * event_loop,void * key,struct aws_event_loop_local_object * removed_obj)413 int aws_event_loop_remove_local_object(
414 struct aws_event_loop *event_loop,
415 void *key,
416 struct aws_event_loop_local_object *removed_obj) {
417
418 AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop));
419
420 struct aws_hash_element existing_object;
421 AWS_ZERO_STRUCT(existing_object);
422
423 int was_present = 0;
424
425 struct aws_hash_element *remove_candidate = removed_obj ? &existing_object : NULL;
426
427 if (!aws_hash_table_remove(&event_loop->local_data, key, remove_candidate, &was_present)) {
428 if (remove_candidate && was_present) {
429 *removed_obj = *(struct aws_event_loop_local_object *)existing_object.value;
430 }
431
432 return AWS_OP_SUCCESS;
433 }
434
435 return AWS_OP_ERR;
436 }
437
aws_event_loop_run(struct aws_event_loop * event_loop)438 int aws_event_loop_run(struct aws_event_loop *event_loop) {
439 AWS_ASSERT(event_loop->vtable && event_loop->vtable->run);
440 return event_loop->vtable->run(event_loop);
441 }
442
aws_event_loop_stop(struct aws_event_loop * event_loop)443 int aws_event_loop_stop(struct aws_event_loop *event_loop) {
444 AWS_ASSERT(event_loop->vtable && event_loop->vtable->stop);
445 return event_loop->vtable->stop(event_loop);
446 }
447
aws_event_loop_wait_for_stop_completion(struct aws_event_loop * event_loop)448 int aws_event_loop_wait_for_stop_completion(struct aws_event_loop *event_loop) {
449 AWS_ASSERT(!aws_event_loop_thread_is_callers_thread(event_loop));
450 AWS_ASSERT(event_loop->vtable && event_loop->vtable->wait_for_stop_completion);
451 return event_loop->vtable->wait_for_stop_completion(event_loop);
452 }
453
aws_event_loop_schedule_task_now(struct aws_event_loop * event_loop,struct aws_task * task)454 void aws_event_loop_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task) {
455 AWS_ASSERT(event_loop->vtable && event_loop->vtable->schedule_task_now);
456 AWS_ASSERT(task);
457 event_loop->vtable->schedule_task_now(event_loop, task);
458 }
459
aws_event_loop_schedule_task_future(struct aws_event_loop * event_loop,struct aws_task * task,uint64_t run_at_nanos)460 void aws_event_loop_schedule_task_future(
461 struct aws_event_loop *event_loop,
462 struct aws_task *task,
463 uint64_t run_at_nanos) {
464
465 AWS_ASSERT(event_loop->vtable && event_loop->vtable->schedule_task_future);
466 AWS_ASSERT(task);
467 event_loop->vtable->schedule_task_future(event_loop, task, run_at_nanos);
468 }
469
aws_event_loop_cancel_task(struct aws_event_loop * event_loop,struct aws_task * task)470 void aws_event_loop_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task) {
471 AWS_ASSERT(event_loop->vtable && event_loop->vtable->cancel_task);
472 AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop));
473 AWS_ASSERT(task);
474 event_loop->vtable->cancel_task(event_loop, task);
475 }
476
477 #if AWS_USE_IO_COMPLETION_PORTS
478
aws_event_loop_connect_handle_to_io_completion_port(struct aws_event_loop * event_loop,struct aws_io_handle * handle)479 int aws_event_loop_connect_handle_to_io_completion_port(
480 struct aws_event_loop *event_loop,
481 struct aws_io_handle *handle) {
482
483 AWS_ASSERT(event_loop->vtable && event_loop->vtable->connect_to_io_completion_port);
484 return event_loop->vtable->connect_to_io_completion_port(event_loop, handle);
485 }
486
487 #else /* !AWS_USE_IO_COMPLETION_PORTS */
488
aws_event_loop_subscribe_to_io_events(struct aws_event_loop * event_loop,struct aws_io_handle * handle,int events,aws_event_loop_on_event_fn * on_event,void * user_data)489 int aws_event_loop_subscribe_to_io_events(
490 struct aws_event_loop *event_loop,
491 struct aws_io_handle *handle,
492 int events,
493 aws_event_loop_on_event_fn *on_event,
494 void *user_data) {
495
496 AWS_ASSERT(event_loop->vtable && event_loop->vtable->subscribe_to_io_events);
497 return event_loop->vtable->subscribe_to_io_events(event_loop, handle, events, on_event, user_data);
498 }
499 #endif /* AWS_USE_IO_COMPLETION_PORTS */
500
aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop * event_loop,struct aws_io_handle * handle)501 int aws_event_loop_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle) {
502 AWS_ASSERT(aws_event_loop_thread_is_callers_thread(event_loop));
503 AWS_ASSERT(event_loop->vtable && event_loop->vtable->unsubscribe_from_io_events);
504 return event_loop->vtable->unsubscribe_from_io_events(event_loop, handle);
505 }
506
aws_event_loop_free_io_event_resources(struct aws_event_loop * event_loop,struct aws_io_handle * handle)507 void aws_event_loop_free_io_event_resources(struct aws_event_loop *event_loop, struct aws_io_handle *handle) {
508 AWS_ASSERT(event_loop && event_loop->vtable->free_io_event_resources);
509 event_loop->vtable->free_io_event_resources(handle->additional_data);
510 }
511
aws_event_loop_thread_is_callers_thread(struct aws_event_loop * event_loop)512 bool aws_event_loop_thread_is_callers_thread(struct aws_event_loop *event_loop) {
513 AWS_ASSERT(event_loop->vtable && event_loop->vtable->is_on_callers_thread);
514 return event_loop->vtable->is_on_callers_thread(event_loop);
515 }
516
aws_event_loop_current_clock_time(struct aws_event_loop * event_loop,uint64_t * time_nanos)517 int aws_event_loop_current_clock_time(struct aws_event_loop *event_loop, uint64_t *time_nanos) {
518 AWS_ASSERT(event_loop->clock);
519 return event_loop->clock(time_nanos);
520 }
521