1 /**
2  * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3  * SPDX-License-Identifier: Apache-2.0.
4  */
5 
6 #include <aws/common/common.h>
7 #include <limits.h>
8 #include <proof_helpers/make_common_data_structures.h>
9 #include <stdint.h>
10 #include <stdlib.h>
11 
aws_byte_buf_is_bounded(const struct aws_byte_buf * const buf,const size_t max_size)12 bool aws_byte_buf_is_bounded(const struct aws_byte_buf *const buf, const size_t max_size) {
13     return (buf->capacity <= max_size);
14 }
15 
aws_byte_buf_has_allocator(const struct aws_byte_buf * const buf)16 bool aws_byte_buf_has_allocator(const struct aws_byte_buf *const buf) {
17     return (buf->allocator == aws_default_allocator());
18 }
19 
ensure_byte_buf_has_allocated_buffer_member(struct aws_byte_buf * const buf)20 void ensure_byte_buf_has_allocated_buffer_member(struct aws_byte_buf *const buf) {
21     if (buf) {
22         buf->allocator = (nondet_bool()) ? NULL : aws_default_allocator();
23         buf->buffer = malloc(sizeof(*(buf->buffer)) * buf->capacity);
24     }
25 }
26 
ensure_ring_buffer_has_allocated_members(struct aws_ring_buffer * ring_buf,const size_t size)27 void ensure_ring_buffer_has_allocated_members(struct aws_ring_buffer *ring_buf, const size_t size) {
28     ring_buf->allocator = aws_default_allocator();
29     /* The `aws_ring_buffer_init` function requires size > 0. */
30     __CPROVER_assume(size > 0 && size <= MAX_MALLOC);
31     ring_buf->allocation = malloc(sizeof(*(ring_buf->allocation)) * size);
32     size_t position_head;
33     size_t position_tail;
34     __CPROVER_assume(position_head < size);
35     __CPROVER_assume(position_tail < size);
36     if (ring_buf->allocation != NULL) {
37         aws_atomic_store_ptr(&ring_buf->head, (ring_buf->allocation + position_head));
38         aws_atomic_store_ptr(&ring_buf->tail, (ring_buf->allocation + position_tail));
39         ring_buf->allocation_end = ring_buf->allocation + size;
40     } else {
41         aws_atomic_store_ptr(&ring_buf->head, NULL);
42         aws_atomic_store_ptr(&ring_buf->tail, NULL);
43         ring_buf->allocation_end = NULL;
44     }
45 }
46 
47 /**
48  * Constrain a buffer to point-into and be contained within a range [lo,hi]
49  */
ensure_byte_buf_has_allocated_buffer_member_in_range(struct aws_byte_buf * buf,uint8_t * lo,uint8_t * hi)50 void ensure_byte_buf_has_allocated_buffer_member_in_range(struct aws_byte_buf *buf, uint8_t *lo, uint8_t *hi) {
51     assert(lo < hi);
52     size_t space = hi - lo;
53     size_t pos;
54     __CPROVER_assume(pos < space);
55     buf->buffer = lo + pos;
56     size_t max_capacity = hi - buf->buffer;
57     assert(0 < max_capacity);
58     __CPROVER_assume(0 < buf->capacity && buf->capacity <= max_capacity);
59 }
60 
61 /**
62  * Constrain a buffer to point-into the valid elements of a ring_buffer
63  */
ensure_byte_buf_has_allocated_buffer_member_in_ring_buf(struct aws_byte_buf * buf,struct aws_ring_buffer * ring_buf)64 void ensure_byte_buf_has_allocated_buffer_member_in_ring_buf(
65     struct aws_byte_buf *buf,
66     struct aws_ring_buffer *ring_buf) {
67     buf->allocator = (nondet_bool()) ? NULL : aws_default_allocator();
68     uint8_t *head = aws_atomic_load_ptr(&ring_buf->head);
69     uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail);
70     if (head < tail) { /* [....H    T....] */
71         if (nondet_bool()) {
72             __CPROVER_assume(tail < ring_buf->allocation_end);
73             ensure_byte_buf_has_allocated_buffer_member_in_range(buf, tail, ring_buf->allocation_end);
74         } else {
75             __CPROVER_assume(ring_buf->allocation < head);
76             ensure_byte_buf_has_allocated_buffer_member_in_range(buf, ring_buf->allocation, head);
77         }
78     } else { /* [    T....H    ] */
79         ensure_byte_buf_has_allocated_buffer_member_in_range(buf, tail, head);
80     }
81 }
82 
aws_byte_cursor_is_bounded(const struct aws_byte_cursor * const cursor,const size_t max_size)83 bool aws_byte_cursor_is_bounded(const struct aws_byte_cursor *const cursor, const size_t max_size) {
84     return cursor->len <= max_size;
85 }
86 
ensure_byte_cursor_has_allocated_buffer_member(struct aws_byte_cursor * const cursor)87 void ensure_byte_cursor_has_allocated_buffer_member(struct aws_byte_cursor *const cursor) {
88     if (cursor != NULL) {
89         cursor->ptr = malloc(cursor->len);
90     }
91 }
92 
aws_array_list_is_bounded(const struct aws_array_list * const list,const size_t max_initial_item_allocation,const size_t max_item_size)93 bool aws_array_list_is_bounded(
94     const struct aws_array_list *const list,
95     const size_t max_initial_item_allocation,
96     const size_t max_item_size) {
97     bool item_size_is_bounded = list->item_size <= max_item_size;
98     bool length_is_bounded = list->length <= max_initial_item_allocation;
99     return item_size_is_bounded && length_is_bounded;
100 }
101 
ensure_array_list_has_allocated_data_member(struct aws_array_list * const list)102 void ensure_array_list_has_allocated_data_member(struct aws_array_list *const list) {
103     list->data = malloc(list->current_size);
104     list->alloc = nondet_bool() ? NULL : aws_default_allocator();
105 }
106 
ensure_linked_list_is_allocated(struct aws_linked_list * const list,size_t max_length)107 void ensure_linked_list_is_allocated(struct aws_linked_list *const list, size_t max_length) {
108     list->head.prev = NULL;
109     list->tail.next = NULL;
110 
111     struct aws_linked_list_node *curr = &list->head;
112 
113     for (size_t i = 0; i < max_length; i++) {
114         struct aws_linked_list_node *node = malloc(sizeof(struct aws_linked_list_node));
115         if (!node)
116             break;
117 
118         curr->next = node;
119         node->prev = curr;
120         curr = node;
121     }
122 
123     curr->next = &list->tail;
124     list->tail.prev = curr;
125 }
126 
aws_priority_queue_is_bounded(const struct aws_priority_queue * const queue,const size_t max_initial_item_allocation,const size_t max_item_size)127 bool aws_priority_queue_is_bounded(
128     const struct aws_priority_queue *const queue,
129     const size_t max_initial_item_allocation,
130     const size_t max_item_size) {
131     bool container_is_bounded =
132         aws_array_list_is_bounded(&queue->container, max_initial_item_allocation, max_item_size);
133 
134     /* The backpointer list holds pointers to [struct
135      * aws_priority_queue_node] and so the max_item_size should be
136      * larger than the pointer size. */
137     bool backpointers_list_is_bounded = aws_array_list_is_bounded(
138         &queue->backpointers, max_initial_item_allocation, sizeof(struct aws_priority_queue_node *));
139     return container_is_bounded && backpointers_list_is_bounded;
140 }
141 
ensure_priority_queue_has_allocated_members(struct aws_priority_queue * const queue)142 void ensure_priority_queue_has_allocated_members(struct aws_priority_queue *const queue) {
143     ensure_array_list_has_allocated_data_member(&queue->container);
144     ensure_array_list_has_allocated_data_member(&queue->backpointers);
145     queue->pred = nondet_compare;
146 }
147 
ensure_allocated_hash_table(struct aws_hash_table * map,size_t max_table_entries)148 void ensure_allocated_hash_table(struct aws_hash_table *map, size_t max_table_entries) {
149     if (map == NULL) {
150         return;
151     }
152     size_t num_entries;
153     __CPROVER_assume(num_entries <= max_table_entries);
154     __CPROVER_assume(aws_is_power_of_two(num_entries));
155 
156     size_t required_bytes;
157     __CPROVER_assume(!hash_table_state_required_bytes(num_entries, &required_bytes));
158     struct hash_table_state *impl = malloc(required_bytes);
159     if (impl) {
160         impl->size = num_entries;
161         map->p_impl = impl;
162     } else {
163         map->p_impl = NULL;
164     }
165 }
166 
ensure_hash_table_has_valid_destroy_functions(struct aws_hash_table * map)167 void ensure_hash_table_has_valid_destroy_functions(struct aws_hash_table *map) {
168     map->p_impl->destroy_key_fn = nondet_bool() ? NULL : hash_proof_destroy_noop;
169     map->p_impl->destroy_value_fn = nondet_bool() ? NULL : hash_proof_destroy_noop;
170 }
171 
aws_hash_table_has_an_empty_slot(const struct aws_hash_table * const map,size_t * const rval)172 bool aws_hash_table_has_an_empty_slot(const struct aws_hash_table *const map, size_t *const rval) {
173     return hash_table_state_has_an_empty_slot(map->p_impl, rval);
174 }
175 
hash_table_state_has_an_empty_slot(const struct hash_table_state * const state,size_t * const rval)176 bool hash_table_state_has_an_empty_slot(const struct hash_table_state *const state, size_t *const rval) {
177     __CPROVER_assume(state->entry_count > 0);
178     size_t empty_slot_idx;
179     __CPROVER_assume(empty_slot_idx < state->size);
180     *rval = empty_slot_idx;
181     return state->slots[empty_slot_idx].hash_code == 0;
182 }
183 
184 /**
185  * A correct implementation of the hash_destroy function should never have a memory
186  * error on valid input. There is the question of whether the destroy functions themselves
187  * are correctly called (i.e. only on valid input, no double free, etc.).  This will be tested in
188  * future proofs.
189  */
hash_proof_destroy_noop(void * p)190 void hash_proof_destroy_noop(void *p) {}
191 
ensure_string_is_allocated_nondet_length()192 struct aws_string *ensure_string_is_allocated_nondet_length() {
193     /* Considers any size up to the maximum possible size for the array [bytes] in aws_string */
194     return nondet_allocate_string_bounded_length(SIZE_MAX - 1 - sizeof(struct aws_string));
195 }
196 
nondet_allocate_string_bounded_length(size_t max_size)197 struct aws_string *nondet_allocate_string_bounded_length(size_t max_size) {
198     size_t len;
199     __CPROVER_assume(len < max_size);
200     return ensure_string_is_allocated(len);
201 }
202 
ensure_string_is_allocated(size_t len)203 struct aws_string *ensure_string_is_allocated(size_t len) {
204     struct aws_string *str = malloc(sizeof(struct aws_string) + len + 1);
205     if (str == NULL) {
206         return NULL;
207     }
208 
209     /* Fields are declared const, so we need to copy them in like this */
210     if (str != NULL) {
211         *(struct aws_allocator **)(&str->allocator) = nondet_bool() ? aws_default_allocator() : NULL;
212         *(size_t *)(&str->len) = len;
213         *(uint8_t *)&str->bytes[len] = '\0';
214     }
215     return str;
216 }
217 
ensure_c_str_is_allocated(size_t max_size)218 const char *ensure_c_str_is_allocated(size_t max_size) {
219     size_t cap;
220     __CPROVER_assume(cap > 0 && cap <= max_size);
221     const char *str = malloc(cap);
222     /* Ensure that its a valid c string. Since all bytes are nondeterminstic, the actual
223      * string length is 0..str_cap
224      */
225     __CPROVER_assume(IMPLIES(str != NULL, str[cap - 1] == '\0'));
226     return str;
227 }
228