1 #ifndef PREAMBLE_H
2 #define PREAMBLE_H
3 /*
4  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
5  *
6  * Licensed under the Apache License, Version 2.0 (the "License").
7  * You may not use this file except in compliance with the License.
8  * A copy of the License is located at
9  *
10  *  http://aws.amazon.com/apache2.0
11  *
12  * or in the "license" file accompanying this file. This file is distributed
13  * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
14  * express or implied. See the License for the specific language governing
15  * permissions and limitations under the License.
16  */
17 
18 /* clang-format off */
19 #include <vcc.h>
20 #include <stdlib.h>
21 #include <stddef.h>
22 #include <stdint.h>
23 
24 /* Fake-up sys/types.h */
25 typedef signed int ssize_t;
26 
27 /* Fake-up stdbool.h */
28 typedef int bool;
29 const int true = 1;
30 const int false = 0;
31 
32 /* Definitions from epoll.h */
33 typedef union epoll_data
34 {
35   _(backing_member) void *ptr;
36   int fd;
37   uint32_t u32;
38   uint64_t u64;
39 } epoll_data_t;
40 
41 struct epoll_event
42 {
43   uint32_t events;        /* Epoll events */
44   _(inline) epoll_data_t data;        /* User data variable */
45 };
46 
47 enum EPOLL_EVENTS
48   {
49     EPOLLIN = 0x001,
50 #define EPOLLIN EPOLLIN
51     EPOLLPRI = 0x002,
52 #define EPOLLPRI EPOLLPRI
53     EPOLLOUT = 0x004,
54 #define EPOLLOUT EPOLLOUT
55     EPOLLRDNORM = 0x040,
56 #define EPOLLRDNORM EPOLLRDNORM
57     EPOLLRDBAND = 0x080,
58 #define EPOLLRDBAND EPOLLRDBAND
59     EPOLLWRNORM = 0x100,
60 #define EPOLLWRNORM EPOLLWRNORM
61     EPOLLWRBAND = 0x200,
62 #define EPOLLWRBAND EPOLLWRBAND
63     EPOLLMSG = 0x400,
64 #define EPOLLMSG EPOLLMSG
65     EPOLLERR = 0x008,
66 #define EPOLLERR EPOLLERR
67     EPOLLHUP = 0x010,
68 #define EPOLLHUP EPOLLHUP
69     EPOLLRDHUP = 0x2000,
70 #define EPOLLRDHUP EPOLLRDHUP
71     EPOLLONESHOT = (1 << 30),
72 #define EPOLLONESHOT EPOLLONESHOT
73     EPOLLET = (1 << 31)
74 #define EPOLLET EPOLLET
75   };
76 
77 #define EPOLL_CTL_ADD 1        /* Add a file decriptor to the interface.  */
78 #define EPOLL_CTL_DEL 2        /* Remove a file decriptor from the interface.  */
79 #define EPOLL_CTL_MOD 3        /* Change file decriptor epoll_event structure.  */
80 
81 struct abstract_os_t {
82   int unused;
83   _(ghost \bool watched[int])
84 } abstract_os_data;
85 struct abstract_os_t *abstract_os = &abstract_os_data;
86 
87 int epoll_ctl (int __epfd, int __op, int __fd, struct epoll_event *__event)
88   _(ensures \result == 0 && __op == EPOLL_CTL_ADD ==>  abstract_os->watched[__fd])
89   _(ensures \result == 0 && __op == EPOLL_CTL_DEL ==> !abstract_os->watched[__fd])
90 ;
91 
92 struct epoll_event_data;
93 int epoll_wait(int __epfd, struct epoll_event *__events, int maxevents, int timeout)
94   _(ensures \result <= maxevents)
95   /*TODO: use explicit triggers for quantifier instantiation*/
96   _(ensures \forall int i; {:level 2} 0 <= i && i < \result ==> \wrapped((struct epoll_event_data *)__events[i].data.ptr))
97   _(writes \extent((struct epoll_event[(unsigned)maxevents]) __events))
98   _(ensures \extent_mutable((struct epoll_event[(unsigned)maxevents]) __events))
99 ;
100 
101 int epoll_create(int size)
102   _(requires 0 < size)
103 ;
104 
105 /* Definitions from aws/common/macros.h */
106 #define AWS_CONTAINER_OF(ptr, type, member) ((type *)((uint8_t *)(ptr)-offsetof(type, member)))
107 #define AWS_UNLIKELY(x) x
108 
109 /* Definitions from aws/common/assert.h */
110 /* Convert into VCC assertions */
111 #define AWS_ASSERT(x) _(assert x)
112 #define AWS_PRECONDITION(x) _(assert x)
113 
114 /* Definitions from aws/common/logging.h (no-ops for proof) */
115 #define AWS_LOGF_INFO(...)
116 #define AWS_LOGF_TRACE(...)
117 #define AWS_LOGF_DEBUG(...)
118 #define AWS_LOGF_ERROR(...)
119 #define AWS_LOGF_FATAL(...)
120 
121 /* Definitions from aws/common/clock.h */
122 enum aws_timestamp_unit {
123     AWS_TIMESTAMP_SECS = 1,
124     AWS_TIMESTAMP_MILLIS = 1000,
125     AWS_TIMESTAMP_MICROS = 1000000,
126     AWS_TIMESTAMP_NANOS = 1000000000,
127 };
128 
129 uint64_t aws_timestamp_convert(
130     uint64_t timestamp,
131     enum aws_timestamp_unit convert_from,
132     enum aws_timestamp_unit convert_to,
133     uint64_t *remainder);
134 
135 /* Definitions from aws/common/error.h */
136 #define AWS_OP_SUCCESS (0)
137 #define AWS_OP_ERR (-1)
138 
139 enum aws_common_error {
140     AWS_ERROR_SUCCESS = 0,
141     AWS_ERROR_OOM,
142     AWS_ERROR_UNKNOWN,
143     AWS_ERROR_SHORT_BUFFER,
144     AWS_ERROR_OVERFLOW_DETECTED,
145     AWS_ERROR_UNSUPPORTED_OPERATION,
146     AWS_ERROR_INVALID_BUFFER_SIZE,
147     AWS_ERROR_INVALID_HEX_STR,
148     AWS_ERROR_INVALID_BASE64_STR,
149     AWS_ERROR_INVALID_INDEX,
150     AWS_ERROR_THREAD_INVALID_SETTINGS,
151     AWS_ERROR_THREAD_INSUFFICIENT_RESOURCE,
152     AWS_ERROR_THREAD_NO_PERMISSIONS,
153     AWS_ERROR_THREAD_NOT_JOINABLE,
154     AWS_ERROR_THREAD_NO_SUCH_THREAD_ID,
155     AWS_ERROR_THREAD_DEADLOCK_DETECTED,
156     AWS_ERROR_MUTEX_NOT_INIT,
157     AWS_ERROR_MUTEX_TIMEOUT,
158     AWS_ERROR_MUTEX_CALLER_NOT_OWNER,
159     AWS_ERROR_MUTEX_FAILED,
160     AWS_ERROR_COND_VARIABLE_INIT_FAILED,
161     AWS_ERROR_COND_VARIABLE_TIMED_OUT,
162     AWS_ERROR_COND_VARIABLE_ERROR_UNKNOWN,
163     AWS_ERROR_CLOCK_FAILURE,
164     AWS_ERROR_LIST_EMPTY,
165     AWS_ERROR_DEST_COPY_TOO_SMALL,
166     AWS_ERROR_LIST_EXCEEDS_MAX_SIZE,
167     AWS_ERROR_LIST_STATIC_MODE_CANT_SHRINK,
168     AWS_ERROR_PRIORITY_QUEUE_FULL,
169     AWS_ERROR_PRIORITY_QUEUE_EMPTY,
170     AWS_ERROR_PRIORITY_QUEUE_BAD_NODE,
171     AWS_ERROR_HASHTBL_ITEM_NOT_FOUND,
172     AWS_ERROR_INVALID_DATE_STR,
173     AWS_ERROR_INVALID_ARGUMENT,
174     AWS_ERROR_RANDOM_GEN_FAILED,
175     AWS_ERROR_MALFORMED_INPUT_STRING,
176     AWS_ERROR_UNIMPLEMENTED,
177     AWS_ERROR_INVALID_STATE,
178     AWS_ERROR_ENVIRONMENT_GET,
179     AWS_ERROR_ENVIRONMENT_SET,
180     AWS_ERROR_ENVIRONMENT_UNSET,
181     AWS_ERROR_STREAM_UNSEEKABLE,
182     AWS_ERROR_NO_PERMISSION,
183     AWS_ERROR_FILE_INVALID_PATH,
184     AWS_ERROR_MAX_FDS_EXCEEDED,
185     AWS_ERROR_SYS_CALL_FAILURE,
186     AWS_ERROR_C_STRING_BUFFER_NOT_NULL_TERMINATED,
187     AWS_ERROR_STRING_MATCH_NOT_FOUND,
188 
189     AWS_ERROR_END_COMMON_RANGE = 0x03FF
190 };
191 
192 int aws_raise_error(int err)
193     _(ensures \result == AWS_OP_ERR)
194 ;
195 
196 /* Forward declarations */
197 struct epoll_loop;
198 struct aws_allocator;
199 struct aws_linked_list_node;
200 struct aws_mutex;
201 struct aws_io_handle;
202 
203 /* Definitions from aws/common/allocator.h */
204 struct aws_allocator {
205     void *(*mem_acquire)(struct aws_allocator *allocator, size_t size);
206     void (*mem_release)(struct aws_allocator *allocator, void *ptr);
207     /* Optional method; if not supported, this pointer must be NULL */
208     void *(*mem_realloc)(struct aws_allocator *allocator, void *oldptr, size_t oldsize, size_t newsize);
209     /* Optional method; if not supported, this pointer must be NULL */
210     void *(*mem_calloc)(struct aws_allocator *allocator, size_t num, size_t size);
211     void *impl;
212     _(invariant mem_acquire->\valid)
213     _(invariant mem_release->\valid)
214     _(invariant mem_realloc == NULL || mem_release->\valid)
215     _(invariant mem_calloc == NULL || mem_calloc->\valid)
216 };
217 
218 #define aws_mem_calloc(a,n,s) malloc(n*s)
219 #define aws_mem_release(a,o) free(o)
220 
221 /* Definitions from aws/common/array_list.h */
222 struct aws_array_list {
223     struct aws_allocator *alloc;
224     size_t current_size;
225     size_t length;
226     size_t item_size;
227     void *data;
228 };
229 
230 /* Definitions from aws/common/priority_queue.h */
231 struct aws_priority_queue_node {
232     size_t current_index;
233 };
234 
235 /* VCC change: fnptr declaration
236 cio function pointer declaration can't be parsed
237 typedef int(aws_priority_queue_compare_fn)(const void *a, const void *b);
238 replaced with */
239 typedef int(* aws_priority_queue_compare_fn_ptr)(const void *a, const void *b);
240 
241 struct aws_priority_queue {
242     aws_priority_queue_compare_fn_ptr pred; /*< VCC change: fnptr */
243     _(inline) struct aws_array_list container;
244     _(inline) struct aws_array_list backpointers;
245     _(invariant pred->\valid)
246 };
247 
248 /* Definitions from aws/common/task_scheduler.h */
249 struct aws_task;
250 
251 typedef enum aws_task_status {
252     AWS_TASK_STATUS_RUN_READY,
253     AWS_TASK_STATUS_CANCELED,
254 } aws_task_status;
255 
256 /* VCC change: fnptr declaration */
257 typedef void(* aws_task_fn_ptr)(struct aws_task *task, void *arg, enum aws_task_status)
258 #ifdef UNSUB_TASK_FN_PTR
259   _(requires \malloc_root((struct epoll_event_data *)arg))
260   _(writes \extent((struct epoll_event_data *)arg))
261 #elif defined(STOP_TASK_FN_PTR)
262   _(updates epoll_loop_of(event_loop_of(arg))::status)
263   _(updates &epoll_loop_of(event_loop_of(arg))->stop_task_ptr)
264   _(requires \thread_local(event_loop_of(arg)))
265 #endif
266 ;
267 
268 struct aws_task {
269     aws_task_fn_ptr fn; /*< VCC change: fnptr */
270     void *arg;
271     uint64_t timestamp;
272     struct aws_linked_list_node node;
273     struct aws_priority_queue_node priority_queue_node;
274     const char *type_tag;
275     size_t reserved;
276     _(invariant \mine(&node))
277     _(invariant \mine(&priority_queue_node))
278 };
279 
280 void aws_task_init(struct aws_task *task, aws_task_fn_ptr aws_task_fn, void *arg, const char *type_tag)
281   _(requires \thread_local(task))
282   _(writes task)
283   _(ensures \wrapped(task))
284   _(ensures task->fn == aws_task_fn)
285   _(ensures task->arg == arg)
286   _(ensures task->type_tag == type_tag)
287 ;
288 
289 struct aws_task_scheduler {
290     struct aws_allocator *alloc;
291     struct aws_priority_queue timed_queue; /* Tasks scheduled to run at specific times */
292     struct aws_linked_list timed_list;     /* If timed_queue runs out of memory, further timed tests are stored here */
293     struct aws_linked_list asap_list;      /* Tasks scheduled to run as soon as possible */
294     _(invariant \mine(&timed_queue))
295     _(invariant \mine(&timed_list))
296     _(invariant \mine(&asap_list))
297 };
298 
299 int aws_task_scheduler_init(struct aws_task_scheduler *scheduler, struct aws_allocator *allocator)
300     _(writes \extent(&scheduler->timed_list))
301     _(writes \extent(&scheduler->asap_list))
302     _(ensures \extent_mutable(&scheduler->timed_list))
303     _(ensures \extent_mutable(&scheduler->asap_list))
304     _(ensures \result == AWS_OP_SUCCESS <==>
305         scheduler->timed_queue.pred.\valid &&
306         (scheduler->timed_list.head.next == &scheduler->timed_list.tail && scheduler->timed_list.length == 0) &&
307         (scheduler->asap_list.head.next == &scheduler->asap_list.tail && scheduler->asap_list.length == 0)
308      )
309 ;
310 
311 void aws_task_scheduler_schedule_now(struct aws_task_scheduler *scheduler, struct aws_task *task)
312     _(updates scheduler)
313 ;
314 
315 void aws_task_scheduler_schedule_future(struct aws_task_scheduler *scheduler, struct aws_task *task, uint64_t time_to_run)
316     _(updates scheduler)
317 ;
318 
319 void aws_task_scheduler_run_all(struct aws_task_scheduler *scheduler, uint64_t current_time)
320     _(updates scheduler)
321 ;
322 
323 void aws_task_scheduler_clean_up(struct aws_task_scheduler *scheduler)
324     _(updates scheduler)
325 ;
326 
327 void aws_task_scheduler_cancel_task(struct aws_task_scheduler *scheduler, struct aws_task *task)
328     _(updates scheduler)
329 ;
330 
331 bool aws_task_scheduler_has_tasks(const struct aws_task_scheduler *scheduler, uint64_t *next_task_time);
332 
333 /* Definitions from aws/common/atomics.h */
334 struct aws_atomic_var {
335   void *value;
336 };
337 
338 /* VCC change: remove volatile annotation */
339 void aws_atomic_init_int(/*volatile*/ struct aws_atomic_var *var, size_t n)
340   _(writes &(var->value))
341 ;
342 
343 void aws_atomic_init_ptr(/*volatile*/ struct aws_atomic_var *var, void *p)
344   _(writes &(var->value))
345 ;
346 
347 void *aws_atomic_load_ptr(/*volatile*/ struct aws_atomic_var *var)
348 ;
349 
350 uint64_t aws_atomic_load_int(/*volatile const*/ struct aws_atomic_var *var)
351 ;
352 
353 void aws_atomic_store_int(/*volatile*/ struct aws_atomic_var *var, size_t n)
354   _(writes &(var->value))
355 ;
356 
357 bool aws_atomic_compare_exchange_ptr(/*volatile*/ struct aws_atomic_var *var, void **expected, void *desired)
358   _(writes &(var->value))
359 ;
360 
361 void aws_atomic_store_ptr(/*volatile*/ struct aws_atomic_var *var, void *val)
362   _(writes &(var->value))
363 ;
364 
365 /* Fake-up pthread.h */
366 typedef uint64_t pthread_t;
367 
368 /* Definitions from aws/common/thread.h */
369 enum aws_thread_detach_state {
370     AWS_THREAD_NOT_CREATED = 1,
371     AWS_THREAD_JOINABLE,
372     AWS_THREAD_JOIN_COMPLETED,
373 };
374 
375 typedef pthread_t aws_thread_id_t;
376 
377 struct aws_thread {
378     struct aws_allocator *allocator;
379     enum aws_thread_detach_state detach_state;
380     aws_thread_id_t thread_id;
381 };
382 
383 struct aws_thread_options {
384     size_t stack_size;
385     int32_t cpu_id;
386 };
387 
388 _(pure) aws_thread_id_t aws_thread_current_thread_id(void)
389     _(ensures \result == \addr(\me))
390 ;
391 
392 int aws_thread_init(struct aws_thread *thread, struct aws_allocator *allocator)
393   _(requires allocator->\valid)
394   _(writes \span(thread))
395   _(ensures \extent_mutable(thread))
396 ;
397 
398 void aws_thread_clean_up(struct aws_thread *thread)
399   _(writes \span(thread))
400   _(ensures \extent_mutable(thread))
401 ;
402 
403 _(pure) bool aws_thread_thread_id_equal(aws_thread_id_t t1, aws_thread_id_t t2)
404   _(ensures \result == (t1 == t2))
405 ;
406 
407 /* Pure from point-of-view of the event loop since the following functions lock and mutate private aws-c-common state */
408 _(pure) void aws_thread_increment_unjoined_count();
409 _(pure) void aws_thread_decrement_unjoined_count();
410 
411 /* Definitions from aws/io/io.h */
412 enum aws_io_event_type {
413     AWS_IO_EVENT_TYPE_READABLE = 1,
414     AWS_IO_EVENT_TYPE_WRITABLE = 2,
415     AWS_IO_EVENT_TYPE_REMOTE_HANG_UP = 4,
416     AWS_IO_EVENT_TYPE_CLOSED = 8,
417     AWS_IO_EVENT_TYPE_ERROR = 16,
418 };
419 
420 struct aws_io_handle {
421     _(inline) struct {
422         int fd;
423         void *handle;
424     } data;
425     void *additional_data;
426     _(invariant valid_fd(data.fd))
427 };
428 
429 /* VCC change: fnptr declaration */
430 typedef int(* aws_io_clock_fn_ptr)(uint64_t *timestamp)
431   _(writes timestamp)
432 ;
433 
434 /* Definitions from aws/io/event_loop.h */
435 struct aws_event_loop_vtable;
436 
437 struct aws_event_loop_options {
438     aws_io_clock_fn_ptr clock; /*< VCC change: fnptr */
439     struct aws_thread_options *thread_options;
440     _(invariant clock->\valid)
441     _(invariant thread_options != NULL <==> \mine(thread_options))
442 };
443 
444 _(claimable) struct aws_event_loop {
445     struct aws_event_loop_vtable *vtable;
446     struct aws_allocator *alloc;
447     aws_io_clock_fn_ptr clock;           /*< VCC change: fnptr */
448 /*  struct aws_hash_table local_data; */ /*< VCC change: not modeled */
449     void *impl_data;
450     /*TODO: allocator is shared and closed*/
451     _(invariant clock->\valid)
452     _(invariant \mine((struct epoll_loop *)impl_data))
453 };
454 
455 /* VCC change: fnptr declaration */
456 typedef void(* aws_event_loop_on_event_fn_ptr)(
457     struct aws_event_loop *event_loop,
458     struct aws_io_handle *handle,
459     int events,
460     void *user_data _(ghost \claim(c)))
461     _(requires \wrapped(c) && \claims(c, event_loop->\closed))
462     _(requires \nested(handle))
463     _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue)
464 ;
465 
466 /* Definitions from aws/common/linked_list.h */
467 struct aws_linked_list_node {
468     struct aws_linked_list_node *next;
469     struct aws_linked_list_node *prev;
470 };
471 
472 struct aws_linked_list {
473     struct aws_linked_list_node head;
474     struct aws_linked_list_node tail;
475     _(ghost \natural length)
476     _(invariant \mine(&head))
477     _(invariant \mine(&tail))
478     _(invariant (0 == length) <==> (head.next == &tail))
479 };
480 
481 void aws_linked_list_init(struct aws_linked_list *list)
482   _(requires \thread_local(list))
483   _(writes \extent(list))
484   _(ensures \extent_mutable(list))
485   _(ensures list->head.next == &list->tail)
486   _(ensures list->tail.next == NULL)
487   _(ensures list->tail.prev == &list->head)
488   _(ensures list->head.prev == NULL)
489   _(ensures list->length == 0)
490 ;
491 
_(pure)492 _(pure) bool aws_linked_list_empty(const struct aws_linked_list *list)
493     _(requires \wrapped(list))
494     _(reads &list->length)
495     _(ensures \result == (list->length == 0))
496     _(decreases 0)
497 {
498     return list->head.next == &list->tail;
499 }
500 
501 /* Specialized for linked lists containing tasks */
502 struct aws_linked_list_node *aws_linked_list_pop_front(struct aws_linked_list *list _(out struct aws_task * task))
503     /* general: */
504     _(maintains \wrapped(list))
505     _(requires 0 < list->length)
506     _(ensures (\old(list->length) - 1) == list->length)
507     _(writes list)
508     _(decreases 0)
509     /* specialized: */
510     _(ensures task == AWS_CONTAINER_OF(\result, struct aws_task, node))
511     _(ensures \thread_local(task))
512     _(ensures task->fn->\valid)
513 ;
514 
515 /* We omit `task == AWS_CONTAINER_OF(node, struct aws_task, node)`
516 because VCC's memory model can't prove this when this occurs in
517 `s_schedule_task_common` */
518 void aws_linked_list_push_back(struct aws_linked_list *list, struct aws_linked_list_node *node _(ghost struct aws_task *task))
519     /* general: */
520     _(updates list)
521     /* specialized: */
522     _(requires task->fn->\valid)
523 ;
524 
525 void aws_linked_list_swap_contents(struct aws_linked_list *a, struct aws_linked_list *b)
526     _(updates a)
527     _(updates b)
528 ;
529 
530 /* Definitions from source/linux/epoll_event_loop.c */
531 struct epoll_loop {
532     _(group scheduler)
533     _(:scheduler) struct aws_task_scheduler scheduler;
534     struct aws_thread thread_created_on;
535     struct aws_thread_options thread_options;
536     aws_thread_id_t thread_joined_to;
537     struct aws_atomic_var running_thread_id;
538     _(group read_handle)
539     _(:read_handle) struct aws_io_handle read_task_handle;
540     struct aws_io_handle write_task_handle;
541     struct aws_mutex task_pre_queue_mutex;
542     _(group queue)
543     _(:queue) struct aws_linked_list task_pre_queue;
544     _(group stop_task)
545     _(:stop_task) struct aws_task stop_task;
546     _(:stop_task) struct aws_atomic_var stop_task_ptr;
547     int epoll_fd;
548     _(group status)
549     _(:status) bool should_process_task_pre_queue;
550     _(:status) bool should_continue;
551     _(invariant valid_fd(epoll_fd))
552     /* scheduler */
553     _(invariant \mine(&thread_created_on))
554     _(invariant \mine(&running_thread_id))
555     /* read_handle */
556     _(invariant \mine(&write_task_handle))
557     _(invariant \mine(&task_pre_queue_mutex))
558     /* task_pre_queue */
559     /* stop_task */
560     /* stop_task_ptr */
561     _(invariant task_pre_queue_mutex.protected_obj == &task_pre_queue)
562     _(invariant task_pre_queue_mutex.\claim_count == 1)
563 };
564 
565 struct epoll_event_data {
566     struct aws_allocator *alloc;
567     struct aws_io_handle *handle;
568     aws_event_loop_on_event_fn_ptr on_event; /*< VCC change: fnptr */
569     void *user_data;
570     struct aws_task cleanup_task;
571     bool is_subscribed; /* false when handle is unsubscribed, but this struct hasn't beeen cleaned up yet */
572     _(invariant \mine(handle))
573     _(invariant ((struct epoll_event_data *)handle->additional_data) == \this)
574     _(invariant on_event->\valid)
575     _(invariant \mine(&cleanup_task))
576 };
577 
578 /* VCC mutex contract */
579 /* VCC change: replace mutex implementation with VCC contract */
580 #define AWS_MUTEX_INIT { .locked = 0 }
581 
582 _(claimable) _(volatile_owns) struct aws_mutex {
583     volatile int locked; /* 0=>unlocked / 1=>locked */
584     _(ghost \object protected_obj)
585     _(invariant locked == 0 ==> \mine(protected_obj))
586 };
587 
588 void aws_mutex_lock(struct aws_mutex *l _(ghost \claim c))
589     _(always c, l->\closed)
590     _(ensures \wrapped(l->protected_obj) && \fresh(l->protected_obj))
591     _(ensures l->locked == 1)
592 ;
593 
594 void aws_mutex_unlock(struct aws_mutex *l _(ghost \claim c))
595     _(always c, l->\closed)
596     _(requires l->protected_obj != c)
597     _(writes l->protected_obj)
598     _(requires \wrapped(l->protected_obj))
599     _(ensures l->locked == 0)
600 ;
601 
602 /* Useful definitions */
603 _(def bool valid_fd(int fd) {
604   return 0 <= fd;
605 })
606 
607 _(def struct aws_event_loop *event_loop_of(void *arg) {
608   return (struct aws_event_loop *)arg;
609 })
610 
611 _(def struct epoll_loop *epoll_loop_of(struct aws_event_loop *event_loop) {
612   return (struct epoll_loop *)event_loop->impl_data;
613 })
614 
615 /*
616  * This predicate (which implies the object invariant properties of
617  * epoll_event_data) means the heap looks like this:
618  *
619  * handle --.additional_data--> epoll_event_data --.on_event--> valid fn
620  *     ^                         /            \
621  *      `--------------.handle--'              '--.user_data--> (can be NULL)
622  */
623 _(def \bool wf_cio_handle(struct aws_io_handle *handle) {
624   return \nested(handle) && handle->\closed &&
625          \malloc_root((struct epoll_event_data *)handle->additional_data) &&
626          \wrapped((struct epoll_event_data *)handle->additional_data) &&
627          (handle->\owner == (struct epoll_event_data *)handle->additional_data);
628 })
629 
630 #define current_thread_owns_event_loop(event_loop) \
631   \addr(\me) == \addr(event_loop->\owner)
632 
633 #define ownership_of_epoll_loop_objects(loop) \
634     (\wrapped(loop::scheduler)                       \
635         && \wrapped(&loop->scheduler)                \
636         && \wrapped(loop::read_handle)               \
637         && \wrapped(&loop->read_task_handle)         \
638         && \wrapped(loop::stop_task)                 \
639         && \wrapped(&loop->stop_task)                \
640         && \wrapped(&loop->stop_task_ptr)            \
641         && \wrapped(loop::queue)                     \
642         && \wrapped(loop::status)                    \
643         && \fresh(loop::scheduler)                   \
644         && \fresh(&loop->scheduler)                  \
645         && \fresh(loop::read_handle)                 \
646         && \fresh(&loop->read_task_handle)           \
647         && \fresh(loop::stop_task)                   \
648         && \fresh(&loop->stop_task)                  \
649         && \fresh(&loop->stop_task_ptr)              \
650         && \fresh(loop::queue)                       \
651         && \fresh(&loop->task_pre_queue)             \
652         && \fresh(loop::status))
653 
654 /* Specifications for epoll_loop functions */
655 static int s_subscribe_to_io_events(
656     struct aws_event_loop *event_loop,
657     struct aws_io_handle *handle,
658     int events,
659     aws_event_loop_on_event_fn_ptr on_event, /*< VCC change: fnptr */
660     void *user_data
661     _(ghost \claim(c_event_loop))
662 )
663     _(always c_event_loop, event_loop->\closed) /*< the event_loop won't be changed or destroyed underneath us */
664     _(requires \wrapped(handle))                /*< wrapped means closed (the handle is valid) and owned by the current thread */
665     _(requires on_event->\valid)                /*< valid function pointer */
666     /* user_data may be NULL */
667     _(ensures \result == AWS_OP_SUCCESS <==> wf_cio_handle(handle))
668     _(ensures \result == AWS_OP_SUCCESS  ==> \fresh((struct epoll_event_data *)handle->additional_data))
669     _(writes handle)
670 ;
671 
672 static int s_unsubscribe_from_io_events(struct aws_event_loop *event_loop, struct aws_io_handle *handle
673     _(ghost \claim(c_event_loop))
674     _(ghost \claim(c_mutex))
675 )
676     _(maintains \wrapped(event_loop))           /*< current thread owns event loop (i.e., current thread is the event loop thread) */
677     _(always c_event_loop, event_loop->\closed) /*< required for schedule call */
678     _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
679     _(requires wf_cio_handle(handle))
680     _(ensures \result == AWS_OP_SUCCESS <==> !\nested(handle))
681     _(ensures \result != AWS_OP_SUCCESS <==> wf_cio_handle(handle))
682     _(writes ((struct epoll_event_data *)handle->additional_data))
683     _(updates &epoll_loop_of(event_loop)->scheduler)
684 ;
685 
686 static void s_schedule_task_now(struct aws_event_loop *event_loop, struct aws_task *task
687     _(ghost \claim(c_event_loop))
688     _(ghost \claim(c_mutex))
689 )
690     _(always c_event_loop, event_loop->\closed)
691     _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
692     _(requires \thread_local(task))
693     _(requires \wrapped(task))
694     _(requires task->fn->\valid)
695     _(writes task)
696     _(updates &epoll_loop_of(event_loop)->scheduler)
697 ;
698 
699 static void s_schedule_task_future(struct aws_event_loop *event_loop, struct aws_task *task, uint64_t run_at_nanos
700     _(ghost \claim(c_event_loop))
701     _(ghost \claim(c_mutex))
702 )
703     _(always c_event_loop, event_loop->\closed)
704     _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
705     _(requires \thread_local(task))
706     _(requires \wrapped(task))
707     _(requires task->fn->\valid)
708     _(writes task)
709     _(updates &epoll_loop_of(event_loop)->scheduler)
710 ;
711 
712 static bool s_is_on_callers_thread(struct aws_event_loop *event_loop
713     _(ghost \claim(c_event_loop))
714 )
715     _(always c_event_loop, event_loop->\closed)
716     _(ensures \result ==> current_thread_owns_event_loop(event_loop))
717 ;
718 
719 static void s_cancel_task(struct aws_event_loop *event_loop, struct aws_task *task)
720     _(requires \wrapped(event_loop))
721     _(updates &epoll_loop_of(event_loop)->scheduler)
722 ;
723 
724 static void s_process_task_pre_queue(struct aws_event_loop *event_loop _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex)))
725     _(always c_event_loop, event_loop->\closed)
726     _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
727     _(requires \thread_local(&epoll_loop_of(event_loop)->read_task_handle))
728     _(requires (&epoll_loop_of(event_loop)->read_task_handle)->\closed)
729     _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue)
730     _(updates &epoll_loop_of(event_loop)->scheduler)
731 ;
732 
733 static void s_stop_task(struct aws_task *task, void *args, enum aws_task_status status)
734     _(requires \thread_local(event_loop_of(args)))
735     _(updates epoll_loop_of(event_loop_of(args))::status)
736     _(updates &epoll_loop_of(event_loop_of(args))->stop_task_ptr)
737 ;
738 
739 static int s_stop(struct aws_event_loop *event_loop
740     _(ghost \claim(c_event_loop))
741     _(ghost \claim(c_mutex))
742 )
743     /* wrapped0 means the claim_count of c_event_loop is 0 (i.e., notionally,
744     all of the claims handed to client threads have been destroyed), so
745     client threads may no longer call any further event loop API calls. */
746     _(maintains \wrapped0(c_event_loop) && \claims(c_event_loop, event_loop->\closed))
747     _(maintains \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
748     _(requires \wrapped(&epoll_loop_of(event_loop)->stop_task))
749     _(writes (&epoll_loop_of(event_loop)->stop_task))
750     _(updates epoll_loop_of(event_loop)::status)
751     _(updates &epoll_loop_of(event_loop)->scheduler)
752     _(updates &epoll_loop_of(event_loop)->stop_task_ptr)
753 ;
754 
755 static int s_wait_for_stop_completion(struct aws_event_loop *event_loop
756     _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex))
757 )
758     _(requires c_event_loop != c_mutex)
759     _(requires \wrapped0(c_event_loop) && \claims(c_event_loop, event_loop->\closed) && \claims_object(c_event_loop, event_loop))
760     _(writes c_event_loop, event_loop)
761     _(ensures !c_event_loop->\closed)
762     _(ensures \wrapped0(event_loop) && \nested(epoll_loop_of(event_loop)))
763     _(ensures ownership_of_epoll_loop_objects(epoll_loop_of(event_loop)))
764     _(ensures epoll_loop_of(event_loop)->task_pre_queue_mutex.locked == 0)
765     _(maintains \malloc_root(epoll_loop_of(event_loop)))
766     _(maintains \wrapped0(c_mutex) && \claims_object(c_mutex, &epoll_loop_of(event_loop)->task_pre_queue_mutex))
767 ;
768 
769 static int s_run(struct aws_event_loop *event_loop _(ghost \claim(c_mutex)))
770     _(requires
771         \wrapped0(event_loop) &&
772         \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop)->task_pre_queue_mutex)))
773     _(writes &epoll_loop_of(event_loop)->should_continue)
774 ;
775 
776 static void s_on_tasks_to_schedule(
777     struct aws_event_loop *event_loop,
778     struct aws_io_handle *handle,
779     int events,
780     void *user_data _(ghost \claim(c_event_loop))
781 )
782     _(always c_event_loop, event_loop->\closed)
783     _(requires \nested(handle))
784     _(writes &epoll_loop_of(event_loop)->should_process_task_pre_queue)
785 ;
786 
787 static void s_main_loop(void *args _(ghost \claim(c_mutex)))
788     _(requires \wrapped(event_loop_of(args)))
789     _(requires \not_shared(event_loop_of(args)))
790     _(requires \wrapped(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(event_loop_of(args))->task_pre_queue_mutex)))
791     _(requires \wrapped(&epoll_loop_of(event_loop_of(args))->read_task_handle))
792     _(updates &epoll_loop_of(event_loop_of(args))->scheduler)
793     _(updates &epoll_loop_of(event_loop_of(args))->running_thread_id)
794     _(writes event_loop_of(args))
795     _(writes &epoll_loop_of(event_loop_of(args))->read_task_handle)
796     _(writes (struct epoll_event_data *)(epoll_loop_of(event_loop_of(args))->read_task_handle.additional_data))
797     _(writes &epoll_loop_of(event_loop_of(args))->should_process_task_pre_queue)
798 ;
799 
800 struct aws_event_loop *aws_event_loop_new_default(
801     struct aws_allocator *alloc,
802     aws_io_clock_fn_ptr clock /*< VCC change: fnptr */
803     _(out \claim(c_mutex))
804 )
805     _(requires \wrapped(alloc))
806     _(requires clock->\valid)
807     _(ensures \result == NULL ||
808        (\wrapped0(\result) &&
809         \fresh(\result) && \malloc_root(\result) &&
810         \fresh(epoll_loop_of(\result)) && \malloc_root(epoll_loop_of(\result)) &&
811         ownership_of_epoll_loop_objects(epoll_loop_of(\result)) &&
812         \fresh(c_mutex) && \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(\result)->task_pre_queue_mutex))))
813 ;
814 
815 struct aws_event_loop *aws_event_loop_new_default_with_options(
816     struct aws_allocator *alloc,
817     const struct aws_event_loop_options *options
818     _(out \claim(c_mutex))
819 )
820     _(requires \wrapped(alloc))
821     _(maintains \wrapped(options))
822     _(ensures \result == NULL ||
823        (\wrapped0(\result) &&
824         \fresh(\result) && \malloc_root(\result) &&
825         \fresh(epoll_loop_of(\result)) && \malloc_root(epoll_loop_of(\result)) &&
826         ownership_of_epoll_loop_objects(epoll_loop_of(\result)) &&
827         \fresh(c_mutex) && \wrapped0(c_mutex) && \claims_object(c_mutex, &(epoll_loop_of(\result)->task_pre_queue_mutex))))
828 ;
829 
830 static void s_destroy(struct aws_event_loop *event_loop
831     _(ghost \claim(c_event_loop)) _(ghost \claim(c_mutex))
832 )
833     _(requires \malloc_root(event_loop))
834     _(requires \malloc_root(epoll_loop_of(event_loop)))
835     _(requires c_event_loop != c_mutex)
836     _(requires \wrapped0(c_event_loop) && \claims_object(c_event_loop, event_loop))
837     _(requires \wrapped0(c_mutex) && \claims_object(c_mutex, &epoll_loop_of(event_loop)->task_pre_queue_mutex))
838     _(requires \wrapped(&epoll_loop_of(event_loop)->scheduler))
839     _(requires \wrapped(epoll_loop_of(event_loop)::status))
840     _(requires \wrapped(&epoll_loop_of(event_loop)->stop_task))
841     _(writes &epoll_loop_of(event_loop)->scheduler)
842     _(writes epoll_loop_of(event_loop)::status)
843     _(writes &epoll_loop_of(event_loop)->stop_task)
844     _(writes event_loop, c_event_loop, c_mutex)
845     _(updates &epoll_loop_of(event_loop)->stop_task_ptr)
846 ;
847 
848 /* clang-format on */
849 #endif PREAMBLE_H
850