1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29 
30 #include "anv_private.h"
31 #include "anv_measure.h"
32 
33 #include "genxml/gen8_pack.h"
34 #include "genxml/genX_bits.h"
35 #include "perf/intel_perf.h"
36 
37 #include "util/debug.h"
38 
39 /** \file anv_batch_chain.c
40  *
41  * This file contains functions related to anv_cmd_buffer as a data
42  * structure.  This involves everything required to create and destroy
43  * the actual batch buffers as well as link them together and handle
44  * relocations and surface state.  It specifically does *not* contain any
45  * handling of actual vkCmd calls beyond vkCmdExecuteCommands.
46  */
47 
48 /*-----------------------------------------------------------------------*
49  * Functions related to anv_reloc_list
50  *-----------------------------------------------------------------------*/
51 
52 VkResult
anv_reloc_list_init(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)53 anv_reloc_list_init(struct anv_reloc_list *list,
54                     const VkAllocationCallbacks *alloc)
55 {
56    memset(list, 0, sizeof(*list));
57    return VK_SUCCESS;
58 }
59 
60 static VkResult
anv_reloc_list_init_clone(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,const struct anv_reloc_list * other_list)61 anv_reloc_list_init_clone(struct anv_reloc_list *list,
62                           const VkAllocationCallbacks *alloc,
63                           const struct anv_reloc_list *other_list)
64 {
65    list->num_relocs = other_list->num_relocs;
66    list->array_length = other_list->array_length;
67 
68    if (list->num_relocs > 0) {
69       list->relocs =
70          vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
71                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
72       if (list->relocs == NULL)
73          return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
74 
75       list->reloc_bos =
76          vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
77                    VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
78       if (list->reloc_bos == NULL) {
79          vk_free(alloc, list->relocs);
80          return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
81       }
82 
83       memcpy(list->relocs, other_list->relocs,
84              list->array_length * sizeof(*list->relocs));
85       memcpy(list->reloc_bos, other_list->reloc_bos,
86              list->array_length * sizeof(*list->reloc_bos));
87    } else {
88       list->relocs = NULL;
89       list->reloc_bos = NULL;
90    }
91 
92    list->dep_words = other_list->dep_words;
93 
94    if (list->dep_words > 0) {
95       list->deps =
96          vk_alloc(alloc, list->dep_words * sizeof(BITSET_WORD), 8,
97                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
98       memcpy(list->deps, other_list->deps,
99              list->dep_words * sizeof(BITSET_WORD));
100    } else {
101       list->deps = NULL;
102    }
103 
104    return VK_SUCCESS;
105 }
106 
107 void
anv_reloc_list_finish(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc)108 anv_reloc_list_finish(struct anv_reloc_list *list,
109                       const VkAllocationCallbacks *alloc)
110 {
111    vk_free(alloc, list->relocs);
112    vk_free(alloc, list->reloc_bos);
113    vk_free(alloc, list->deps);
114 }
115 
116 static VkResult
anv_reloc_list_grow(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,size_t num_additional_relocs)117 anv_reloc_list_grow(struct anv_reloc_list *list,
118                     const VkAllocationCallbacks *alloc,
119                     size_t num_additional_relocs)
120 {
121    if (list->num_relocs + num_additional_relocs <= list->array_length)
122       return VK_SUCCESS;
123 
124    size_t new_length = MAX2(16, list->array_length * 2);
125    while (new_length < list->num_relocs + num_additional_relocs)
126       new_length *= 2;
127 
128    struct drm_i915_gem_relocation_entry *new_relocs =
129       vk_realloc(alloc, list->relocs,
130                  new_length * sizeof(*list->relocs), 8,
131                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
132    if (new_relocs == NULL)
133       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
134    list->relocs = new_relocs;
135 
136    struct anv_bo **new_reloc_bos =
137       vk_realloc(alloc, list->reloc_bos,
138                  new_length * sizeof(*list->reloc_bos), 8,
139                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
140    if (new_reloc_bos == NULL)
141       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
142    list->reloc_bos = new_reloc_bos;
143 
144    list->array_length = new_length;
145 
146    return VK_SUCCESS;
147 }
148 
149 static VkResult
anv_reloc_list_grow_deps(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t min_num_words)150 anv_reloc_list_grow_deps(struct anv_reloc_list *list,
151                          const VkAllocationCallbacks *alloc,
152                          uint32_t min_num_words)
153 {
154    if (min_num_words <= list->dep_words)
155       return VK_SUCCESS;
156 
157    uint32_t new_length = MAX2(32, list->dep_words * 2);
158    while (new_length < min_num_words)
159       new_length *= 2;
160 
161    BITSET_WORD *new_deps =
162       vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
163                  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
164    if (new_deps == NULL)
165       return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
166    list->deps = new_deps;
167 
168    /* Zero out the new data */
169    memset(list->deps + list->dep_words, 0,
170           (new_length - list->dep_words) * sizeof(BITSET_WORD));
171    list->dep_words = new_length;
172 
173    return VK_SUCCESS;
174 }
175 
176 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
177 
178 VkResult
anv_reloc_list_add_bo(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_bo * target_bo)179 anv_reloc_list_add_bo(struct anv_reloc_list *list,
180                       const VkAllocationCallbacks *alloc,
181                       struct anv_bo *target_bo)
182 {
183    assert(!target_bo->is_wrapper);
184    assert(target_bo->flags & EXEC_OBJECT_PINNED);
185 
186    uint32_t idx = target_bo->gem_handle;
187    VkResult result = anv_reloc_list_grow_deps(list, alloc,
188                                               (idx / BITSET_WORDBITS) + 1);
189    if (unlikely(result != VK_SUCCESS))
190       return result;
191 
192    BITSET_SET(list->deps, idx);
193 
194    return VK_SUCCESS;
195 }
196 
197 VkResult
anv_reloc_list_add(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,uint32_t offset,struct anv_bo * target_bo,uint32_t delta,uint64_t * address_u64_out)198 anv_reloc_list_add(struct anv_reloc_list *list,
199                    const VkAllocationCallbacks *alloc,
200                    uint32_t offset, struct anv_bo *target_bo, uint32_t delta,
201                    uint64_t *address_u64_out)
202 {
203    struct drm_i915_gem_relocation_entry *entry;
204    int index;
205 
206    struct anv_bo *unwrapped_target_bo = anv_bo_unwrap(target_bo);
207    uint64_t target_bo_offset = READ_ONCE(unwrapped_target_bo->offset);
208    if (address_u64_out)
209       *address_u64_out = target_bo_offset + delta;
210 
211    assert(unwrapped_target_bo->gem_handle > 0);
212    assert(unwrapped_target_bo->refcount > 0);
213 
214    if (unwrapped_target_bo->flags & EXEC_OBJECT_PINNED)
215       return anv_reloc_list_add_bo(list, alloc, unwrapped_target_bo);
216 
217    VkResult result = anv_reloc_list_grow(list, alloc, 1);
218    if (result != VK_SUCCESS)
219       return result;
220 
221    /* XXX: Can we use I915_EXEC_HANDLE_LUT? */
222    index = list->num_relocs++;
223    list->reloc_bos[index] = target_bo;
224    entry = &list->relocs[index];
225    entry->target_handle = -1; /* See also anv_cmd_buffer_process_relocs() */
226    entry->delta = delta;
227    entry->offset = offset;
228    entry->presumed_offset = target_bo_offset;
229    entry->read_domains = 0;
230    entry->write_domain = 0;
231    VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry)));
232 
233    return VK_SUCCESS;
234 }
235 
236 static void
anv_reloc_list_clear(struct anv_reloc_list * list)237 anv_reloc_list_clear(struct anv_reloc_list *list)
238 {
239    list->num_relocs = 0;
240    if (list->dep_words > 0)
241       memset(list->deps, 0, list->dep_words * sizeof(BITSET_WORD));
242 }
243 
244 static VkResult
anv_reloc_list_append(struct anv_reloc_list * list,const VkAllocationCallbacks * alloc,struct anv_reloc_list * other,uint32_t offset)245 anv_reloc_list_append(struct anv_reloc_list *list,
246                       const VkAllocationCallbacks *alloc,
247                       struct anv_reloc_list *other, uint32_t offset)
248 {
249    VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs);
250    if (result != VK_SUCCESS)
251       return result;
252 
253    if (other->num_relocs > 0) {
254       memcpy(&list->relocs[list->num_relocs], &other->relocs[0],
255              other->num_relocs * sizeof(other->relocs[0]));
256       memcpy(&list->reloc_bos[list->num_relocs], &other->reloc_bos[0],
257              other->num_relocs * sizeof(other->reloc_bos[0]));
258 
259       for (uint32_t i = 0; i < other->num_relocs; i++)
260          list->relocs[i + list->num_relocs].offset += offset;
261 
262       list->num_relocs += other->num_relocs;
263    }
264 
265    anv_reloc_list_grow_deps(list, alloc, other->dep_words);
266    for (uint32_t w = 0; w < other->dep_words; w++)
267       list->deps[w] |= other->deps[w];
268 
269    return VK_SUCCESS;
270 }
271 
272 /*-----------------------------------------------------------------------*
273  * Functions related to anv_batch
274  *-----------------------------------------------------------------------*/
275 
276 void *
anv_batch_emit_dwords(struct anv_batch * batch,int num_dwords)277 anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords)
278 {
279    if (batch->next + num_dwords * 4 > batch->end) {
280       VkResult result = batch->extend_cb(batch, batch->user_data);
281       if (result != VK_SUCCESS) {
282          anv_batch_set_error(batch, result);
283          return NULL;
284       }
285    }
286 
287    void *p = batch->next;
288 
289    batch->next += num_dwords * 4;
290    assert(batch->next <= batch->end);
291 
292    return p;
293 }
294 
295 struct anv_address
anv_batch_address(struct anv_batch * batch,void * batch_location)296 anv_batch_address(struct anv_batch *batch, void *batch_location)
297 {
298    assert(batch->start < batch_location);
299 
300    /* Allow a jump at the current location of the batch. */
301    assert(batch->next >= batch_location);
302 
303    return anv_address_add(batch->start_addr, batch_location - batch->start);
304 }
305 
306 void
anv_batch_emit_batch(struct anv_batch * batch,struct anv_batch * other)307 anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other)
308 {
309    uint32_t size, offset;
310 
311    size = other->next - other->start;
312    assert(size % 4 == 0);
313 
314    if (batch->next + size > batch->end) {
315       VkResult result = batch->extend_cb(batch, batch->user_data);
316       if (result != VK_SUCCESS) {
317          anv_batch_set_error(batch, result);
318          return;
319       }
320    }
321 
322    assert(batch->next + size <= batch->end);
323 
324    VG(VALGRIND_CHECK_MEM_IS_DEFINED(other->start, size));
325    memcpy(batch->next, other->start, size);
326 
327    offset = batch->next - batch->start;
328    VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc,
329                                            other->relocs, offset);
330    if (result != VK_SUCCESS) {
331       anv_batch_set_error(batch, result);
332       return;
333    }
334 
335    batch->next += size;
336 }
337 
338 /*-----------------------------------------------------------------------*
339  * Functions related to anv_batch_bo
340  *-----------------------------------------------------------------------*/
341 
342 static VkResult
anv_batch_bo_create(struct anv_cmd_buffer * cmd_buffer,uint32_t size,struct anv_batch_bo ** bbo_out)343 anv_batch_bo_create(struct anv_cmd_buffer *cmd_buffer,
344                     uint32_t size,
345                     struct anv_batch_bo **bbo_out)
346 {
347    VkResult result;
348 
349    struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
350                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
351    if (bbo == NULL)
352       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
353 
354    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
355                               size, &bbo->bo);
356    if (result != VK_SUCCESS)
357       goto fail_alloc;
358 
359    result = anv_reloc_list_init(&bbo->relocs, &cmd_buffer->pool->alloc);
360    if (result != VK_SUCCESS)
361       goto fail_bo_alloc;
362 
363    *bbo_out = bbo;
364 
365    return VK_SUCCESS;
366 
367  fail_bo_alloc:
368    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
369  fail_alloc:
370    vk_free(&cmd_buffer->pool->alloc, bbo);
371 
372    return result;
373 }
374 
375 static VkResult
anv_batch_bo_clone(struct anv_cmd_buffer * cmd_buffer,const struct anv_batch_bo * other_bbo,struct anv_batch_bo ** bbo_out)376 anv_batch_bo_clone(struct anv_cmd_buffer *cmd_buffer,
377                    const struct anv_batch_bo *other_bbo,
378                    struct anv_batch_bo **bbo_out)
379 {
380    VkResult result;
381 
382    struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
383                                         8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
384    if (bbo == NULL)
385       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
386 
387    result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
388                               other_bbo->bo->size, &bbo->bo);
389    if (result != VK_SUCCESS)
390       goto fail_alloc;
391 
392    result = anv_reloc_list_init_clone(&bbo->relocs, &cmd_buffer->pool->alloc,
393                                       &other_bbo->relocs);
394    if (result != VK_SUCCESS)
395       goto fail_bo_alloc;
396 
397    bbo->length = other_bbo->length;
398    memcpy(bbo->bo->map, other_bbo->bo->map, other_bbo->length);
399    *bbo_out = bbo;
400 
401    return VK_SUCCESS;
402 
403  fail_bo_alloc:
404    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
405  fail_alloc:
406    vk_free(&cmd_buffer->pool->alloc, bbo);
407 
408    return result;
409 }
410 
411 static void
anv_batch_bo_start(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)412 anv_batch_bo_start(struct anv_batch_bo *bbo, struct anv_batch *batch,
413                    size_t batch_padding)
414 {
415    anv_batch_set_storage(batch, (struct anv_address) { .bo = bbo->bo, },
416                          bbo->bo->map, bbo->bo->size - batch_padding);
417    batch->relocs = &bbo->relocs;
418    anv_reloc_list_clear(&bbo->relocs);
419 }
420 
421 static void
anv_batch_bo_continue(struct anv_batch_bo * bbo,struct anv_batch * batch,size_t batch_padding)422 anv_batch_bo_continue(struct anv_batch_bo *bbo, struct anv_batch *batch,
423                       size_t batch_padding)
424 {
425    batch->start_addr = (struct anv_address) { .bo = bbo->bo, };
426    batch->start = bbo->bo->map;
427    batch->next = bbo->bo->map + bbo->length;
428    batch->end = bbo->bo->map + bbo->bo->size - batch_padding;
429    batch->relocs = &bbo->relocs;
430 }
431 
432 static void
anv_batch_bo_finish(struct anv_batch_bo * bbo,struct anv_batch * batch)433 anv_batch_bo_finish(struct anv_batch_bo *bbo, struct anv_batch *batch)
434 {
435    assert(batch->start == bbo->bo->map);
436    bbo->length = batch->next - batch->start;
437    VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch->start, bbo->length));
438 }
439 
440 static VkResult
anv_batch_bo_grow(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo,struct anv_batch * batch,size_t aditional,size_t batch_padding)441 anv_batch_bo_grow(struct anv_cmd_buffer *cmd_buffer, struct anv_batch_bo *bbo,
442                   struct anv_batch *batch, size_t aditional,
443                   size_t batch_padding)
444 {
445    assert(batch->start == bbo->bo->map);
446    bbo->length = batch->next - batch->start;
447 
448    size_t new_size = bbo->bo->size;
449    while (new_size <= bbo->length + aditional + batch_padding)
450       new_size *= 2;
451 
452    if (new_size == bbo->bo->size)
453       return VK_SUCCESS;
454 
455    struct anv_bo *new_bo;
456    VkResult result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
457                                        new_size, &new_bo);
458    if (result != VK_SUCCESS)
459       return result;
460 
461    memcpy(new_bo->map, bbo->bo->map, bbo->length);
462 
463    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
464 
465    bbo->bo = new_bo;
466    anv_batch_bo_continue(bbo, batch, batch_padding);
467 
468    return VK_SUCCESS;
469 }
470 
471 static void
anv_batch_bo_link(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * prev_bbo,struct anv_batch_bo * next_bbo,uint32_t next_bbo_offset)472 anv_batch_bo_link(struct anv_cmd_buffer *cmd_buffer,
473                   struct anv_batch_bo *prev_bbo,
474                   struct anv_batch_bo *next_bbo,
475                   uint32_t next_bbo_offset)
476 {
477    const uint32_t bb_start_offset =
478       prev_bbo->length - GFX8_MI_BATCH_BUFFER_START_length * 4;
479    ASSERTED const uint32_t *bb_start = prev_bbo->bo->map + bb_start_offset;
480 
481    /* Make sure we're looking at a MI_BATCH_BUFFER_START */
482    assert(((*bb_start >> 29) & 0x07) == 0);
483    assert(((*bb_start >> 23) & 0x3f) == 49);
484 
485    if (cmd_buffer->device->physical->use_softpin) {
486       assert(prev_bbo->bo->flags & EXEC_OBJECT_PINNED);
487       assert(next_bbo->bo->flags & EXEC_OBJECT_PINNED);
488 
489       write_reloc(cmd_buffer->device,
490                   prev_bbo->bo->map + bb_start_offset + 4,
491                   next_bbo->bo->offset + next_bbo_offset, true);
492    } else {
493       uint32_t reloc_idx = prev_bbo->relocs.num_relocs - 1;
494       assert(prev_bbo->relocs.relocs[reloc_idx].offset == bb_start_offset + 4);
495 
496       prev_bbo->relocs.reloc_bos[reloc_idx] = next_bbo->bo;
497       prev_bbo->relocs.relocs[reloc_idx].delta = next_bbo_offset;
498 
499       /* Use a bogus presumed offset to force a relocation */
500       prev_bbo->relocs.relocs[reloc_idx].presumed_offset = -1;
501    }
502 }
503 
504 static void
anv_batch_bo_destroy(struct anv_batch_bo * bbo,struct anv_cmd_buffer * cmd_buffer)505 anv_batch_bo_destroy(struct anv_batch_bo *bbo,
506                      struct anv_cmd_buffer *cmd_buffer)
507 {
508    anv_reloc_list_finish(&bbo->relocs, &cmd_buffer->pool->alloc);
509    anv_bo_pool_free(&cmd_buffer->device->batch_bo_pool, bbo->bo);
510    vk_free(&cmd_buffer->pool->alloc, bbo);
511 }
512 
513 static VkResult
anv_batch_bo_list_clone(const struct list_head * list,struct anv_cmd_buffer * cmd_buffer,struct list_head * new_list)514 anv_batch_bo_list_clone(const struct list_head *list,
515                         struct anv_cmd_buffer *cmd_buffer,
516                         struct list_head *new_list)
517 {
518    VkResult result = VK_SUCCESS;
519 
520    list_inithead(new_list);
521 
522    struct anv_batch_bo *prev_bbo = NULL;
523    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
524       struct anv_batch_bo *new_bbo = NULL;
525       result = anv_batch_bo_clone(cmd_buffer, bbo, &new_bbo);
526       if (result != VK_SUCCESS)
527          break;
528       list_addtail(&new_bbo->link, new_list);
529 
530       if (prev_bbo)
531          anv_batch_bo_link(cmd_buffer, prev_bbo, new_bbo, 0);
532 
533       prev_bbo = new_bbo;
534    }
535 
536    if (result != VK_SUCCESS) {
537       list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
538          list_del(&bbo->link);
539          anv_batch_bo_destroy(bbo, cmd_buffer);
540       }
541    }
542 
543    return result;
544 }
545 
546 /*-----------------------------------------------------------------------*
547  * Functions related to anv_batch_bo
548  *-----------------------------------------------------------------------*/
549 
550 static struct anv_batch_bo *
anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer * cmd_buffer)551 anv_cmd_buffer_current_batch_bo(struct anv_cmd_buffer *cmd_buffer)
552 {
553    return LIST_ENTRY(struct anv_batch_bo, cmd_buffer->batch_bos.prev, link);
554 }
555 
556 struct anv_address
anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer * cmd_buffer)557 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer)
558 {
559    struct anv_state_pool *pool = anv_binding_table_pool(cmd_buffer->device);
560    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
561    return (struct anv_address) {
562       .bo = pool->block_pool.bo,
563       .offset = bt_block->offset - pool->start_offset,
564    };
565 }
566 
567 static void
emit_batch_buffer_start(struct anv_cmd_buffer * cmd_buffer,struct anv_bo * bo,uint32_t offset)568 emit_batch_buffer_start(struct anv_cmd_buffer *cmd_buffer,
569                         struct anv_bo *bo, uint32_t offset)
570 {
571    /* In gfx8+ the address field grew to two dwords to accomodate 48 bit
572     * offsets. The high 16 bits are in the last dword, so we can use the gfx8
573     * version in either case, as long as we set the instruction length in the
574     * header accordingly.  This means that we always emit three dwords here
575     * and all the padding and adjustment we do in this file works for all
576     * gens.
577     */
578 
579 #define GFX7_MI_BATCH_BUFFER_START_length      2
580 #define GFX7_MI_BATCH_BUFFER_START_length_bias      2
581 
582    const uint32_t gfx7_length =
583       GFX7_MI_BATCH_BUFFER_START_length - GFX7_MI_BATCH_BUFFER_START_length_bias;
584    const uint32_t gfx8_length =
585       GFX8_MI_BATCH_BUFFER_START_length - GFX8_MI_BATCH_BUFFER_START_length_bias;
586 
587    anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_START, bbs) {
588       bbs.DWordLength               = cmd_buffer->device->info.ver < 8 ?
589                                       gfx7_length : gfx8_length;
590       bbs.SecondLevelBatchBuffer    = Firstlevelbatch;
591       bbs.AddressSpaceIndicator     = ASI_PPGTT;
592       bbs.BatchBufferStartAddress   = (struct anv_address) { bo, offset };
593    }
594 }
595 
596 static void
cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_batch_bo * bbo)597 cmd_buffer_chain_to_batch_bo(struct anv_cmd_buffer *cmd_buffer,
598                              struct anv_batch_bo *bbo)
599 {
600    struct anv_batch *batch = &cmd_buffer->batch;
601    struct anv_batch_bo *current_bbo =
602       anv_cmd_buffer_current_batch_bo(cmd_buffer);
603 
604    /* We set the end of the batch a little short so we would be sure we
605     * have room for the chaining command.  Since we're about to emit the
606     * chaining command, let's set it back where it should go.
607     */
608    batch->end += GFX8_MI_BATCH_BUFFER_START_length * 4;
609    assert(batch->end == current_bbo->bo->map + current_bbo->bo->size);
610 
611    emit_batch_buffer_start(cmd_buffer, bbo->bo, 0);
612 
613    anv_batch_bo_finish(current_bbo, batch);
614 }
615 
616 static void
anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer * cmd_buffer_from,struct anv_cmd_buffer * cmd_buffer_to)617 anv_cmd_buffer_record_chain_submit(struct anv_cmd_buffer *cmd_buffer_from,
618                                    struct anv_cmd_buffer *cmd_buffer_to)
619 {
620    assert(cmd_buffer_from->device->physical->use_softpin);
621 
622    uint32_t *bb_start = cmd_buffer_from->batch_end;
623 
624    struct anv_batch_bo *last_bbo =
625       list_last_entry(&cmd_buffer_from->batch_bos, struct anv_batch_bo, link);
626    struct anv_batch_bo *first_bbo =
627       list_first_entry(&cmd_buffer_to->batch_bos, struct anv_batch_bo, link);
628 
629    struct GFX8_MI_BATCH_BUFFER_START gen_bb_start = {
630       __anv_cmd_header(GFX8_MI_BATCH_BUFFER_START),
631       .SecondLevelBatchBuffer    = Firstlevelbatch,
632       .AddressSpaceIndicator     = ASI_PPGTT,
633       .BatchBufferStartAddress   = (struct anv_address) { first_bbo->bo, 0 },
634    };
635    struct anv_batch local_batch = {
636       .start  = last_bbo->bo->map,
637       .end    = last_bbo->bo->map + last_bbo->bo->size,
638       .relocs = &last_bbo->relocs,
639       .alloc  = &cmd_buffer_from->pool->alloc,
640    };
641 
642    __anv_cmd_pack(GFX8_MI_BATCH_BUFFER_START)(&local_batch, bb_start, &gen_bb_start);
643 
644    last_bbo->chained = true;
645 }
646 
647 static void
anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer * cmd_buffer)648 anv_cmd_buffer_record_end_submit(struct anv_cmd_buffer *cmd_buffer)
649 {
650    assert(cmd_buffer->device->physical->use_softpin);
651 
652    struct anv_batch_bo *last_bbo =
653       list_last_entry(&cmd_buffer->batch_bos, struct anv_batch_bo, link);
654    last_bbo->chained = false;
655 
656    uint32_t *batch = cmd_buffer->batch_end;
657    anv_pack_struct(batch, GFX8_MI_BATCH_BUFFER_END,
658                    __anv_cmd_header(GFX8_MI_BATCH_BUFFER_END));
659 }
660 
661 static VkResult
anv_cmd_buffer_chain_batch(struct anv_batch * batch,void * _data)662 anv_cmd_buffer_chain_batch(struct anv_batch *batch, void *_data)
663 {
664    struct anv_cmd_buffer *cmd_buffer = _data;
665    struct anv_batch_bo *new_bbo;
666    /* Cap reallocation to chunk. */
667    uint32_t alloc_size = MIN2(cmd_buffer->total_batch_size,
668                               ANV_MAX_CMD_BUFFER_BATCH_SIZE);
669 
670    VkResult result = anv_batch_bo_create(cmd_buffer, alloc_size, &new_bbo);
671    if (result != VK_SUCCESS)
672       return result;
673 
674    cmd_buffer->total_batch_size += alloc_size;
675 
676    struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
677    if (seen_bbo == NULL) {
678       anv_batch_bo_destroy(new_bbo, cmd_buffer);
679       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
680    }
681    *seen_bbo = new_bbo;
682 
683    cmd_buffer_chain_to_batch_bo(cmd_buffer, new_bbo);
684 
685    list_addtail(&new_bbo->link, &cmd_buffer->batch_bos);
686 
687    anv_batch_bo_start(new_bbo, batch, GFX8_MI_BATCH_BUFFER_START_length * 4);
688 
689    return VK_SUCCESS;
690 }
691 
692 static VkResult
anv_cmd_buffer_grow_batch(struct anv_batch * batch,void * _data)693 anv_cmd_buffer_grow_batch(struct anv_batch *batch, void *_data)
694 {
695    struct anv_cmd_buffer *cmd_buffer = _data;
696    struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
697 
698    anv_batch_bo_grow(cmd_buffer, bbo, &cmd_buffer->batch, 4096,
699                      GFX8_MI_BATCH_BUFFER_START_length * 4);
700 
701    return VK_SUCCESS;
702 }
703 
704 /** Allocate a binding table
705  *
706  * This function allocates a binding table.  This is a bit more complicated
707  * than one would think due to a combination of Vulkan driver design and some
708  * unfortunate hardware restrictions.
709  *
710  * The 3DSTATE_BINDING_TABLE_POINTERS_* packets only have a 16-bit field for
711  * the binding table pointer which means that all binding tables need to live
712  * in the bottom 64k of surface state base address.  The way the GL driver has
713  * classically dealt with this restriction is to emit all surface states
714  * on-the-fly into the batch and have a batch buffer smaller than 64k.  This
715  * isn't really an option in Vulkan for a couple of reasons:
716  *
717  *  1) In Vulkan, we have growing (or chaining) batches so surface states have
718  *     to live in their own buffer and we have to be able to re-emit
719  *     STATE_BASE_ADDRESS as needed which requires a full pipeline stall.  In
720  *     order to avoid emitting STATE_BASE_ADDRESS any more often than needed
721  *     (it's not that hard to hit 64k of just binding tables), we allocate
722  *     surface state objects up-front when VkImageView is created.  In order
723  *     for this to work, surface state objects need to be allocated from a
724  *     global buffer.
725  *
726  *  2) We tried to design the surface state system in such a way that it's
727  *     already ready for bindless texturing.  The way bindless texturing works
728  *     on our hardware is that you have a big pool of surface state objects
729  *     (with its own state base address) and the bindless handles are simply
730  *     offsets into that pool.  With the architecture we chose, we already
731  *     have that pool and it's exactly the same pool that we use for regular
732  *     surface states so we should already be ready for bindless.
733  *
734  *  3) For render targets, we need to be able to fill out the surface states
735  *     later in vkBeginRenderPass so that we can assign clear colors
736  *     correctly.  One way to do this would be to just create the surface
737  *     state data and then repeatedly copy it into the surface state BO every
738  *     time we have to re-emit STATE_BASE_ADDRESS.  While this works, it's
739  *     rather annoying and just being able to allocate them up-front and
740  *     re-use them for the entire render pass.
741  *
742  * While none of these are technically blockers for emitting state on the fly
743  * like we do in GL, the ability to have a single surface state pool is
744  * simplifies things greatly.  Unfortunately, it comes at a cost...
745  *
746  * Because of the 64k limitation of 3DSTATE_BINDING_TABLE_POINTERS_*, we can't
747  * place the binding tables just anywhere in surface state base address.
748  * Because 64k isn't a whole lot of space, we can't simply restrict the
749  * surface state buffer to 64k, we have to be more clever.  The solution we've
750  * chosen is to have a block pool with a maximum size of 2G that starts at
751  * zero and grows in both directions.  All surface states are allocated from
752  * the top of the pool (positive offsets) and we allocate blocks (< 64k) of
753  * binding tables from the bottom of the pool (negative offsets).  Every time
754  * we allocate a new binding table block, we set surface state base address to
755  * point to the bottom of the binding table block.  This way all of the
756  * binding tables in the block are in the bottom 64k of surface state base
757  * address.  When we fill out the binding table, we add the distance between
758  * the bottom of our binding table block and zero of the block pool to the
759  * surface state offsets so that they are correct relative to out new surface
760  * state base address at the bottom of the binding table block.
761  *
762  * \see adjust_relocations_from_block_pool()
763  * \see adjust_relocations_too_block_pool()
764  *
765  * \param[in]  entries        The number of surface state entries the binding
766  *                            table should be able to hold.
767  *
768  * \param[out] state_offset   The offset surface surface state base address
769  *                            where the surface states live.  This must be
770  *                            added to the surface state offset when it is
771  *                            written into the binding table entry.
772  *
773  * \return                    An anv_state representing the binding table
774  */
775 struct anv_state
anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer * cmd_buffer,uint32_t entries,uint32_t * state_offset)776 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
777                                    uint32_t entries, uint32_t *state_offset)
778 {
779    struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
780 
781    uint32_t bt_size = align_u32(entries * 4, 32);
782 
783    struct anv_state state = cmd_buffer->bt_next;
784    if (bt_size > state.alloc_size)
785       return (struct anv_state) { 0 };
786 
787    state.alloc_size = bt_size;
788    cmd_buffer->bt_next.offset += bt_size;
789    cmd_buffer->bt_next.map += bt_size;
790    cmd_buffer->bt_next.alloc_size -= bt_size;
791 
792    assert(bt_block->offset < 0);
793    *state_offset = -bt_block->offset;
794 
795    return state;
796 }
797 
798 struct anv_state
anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer * cmd_buffer)799 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer)
800 {
801    struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
802    return anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
803                                  isl_dev->ss.size, isl_dev->ss.align);
804 }
805 
806 struct anv_state
anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer * cmd_buffer,uint32_t size,uint32_t alignment)807 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
808                                    uint32_t size, uint32_t alignment)
809 {
810    return anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
811                                  size, alignment);
812 }
813 
814 VkResult
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer * cmd_buffer)815 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer)
816 {
817    struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
818    if (bt_block == NULL) {
819       anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
820       return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
821    }
822 
823    *bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
824 
825    /* The bt_next state is a rolling state (we update it as we suballocate
826     * from it) which is relative to the start of the binding table block.
827     */
828    cmd_buffer->bt_next = *bt_block;
829    cmd_buffer->bt_next.offset = 0;
830 
831    return VK_SUCCESS;
832 }
833 
834 VkResult
anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)835 anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
836 {
837    struct anv_batch_bo *batch_bo;
838    VkResult result;
839 
840    list_inithead(&cmd_buffer->batch_bos);
841 
842    cmd_buffer->total_batch_size = ANV_MIN_CMD_BUFFER_BATCH_SIZE;
843 
844    result = anv_batch_bo_create(cmd_buffer,
845                                 cmd_buffer->total_batch_size,
846                                 &batch_bo);
847    if (result != VK_SUCCESS)
848       return result;
849 
850    list_addtail(&batch_bo->link, &cmd_buffer->batch_bos);
851 
852    cmd_buffer->batch.alloc = &cmd_buffer->pool->alloc;
853    cmd_buffer->batch.user_data = cmd_buffer;
854 
855    if (cmd_buffer->device->can_chain_batches) {
856       cmd_buffer->batch.extend_cb = anv_cmd_buffer_chain_batch;
857    } else {
858       cmd_buffer->batch.extend_cb = anv_cmd_buffer_grow_batch;
859    }
860 
861    anv_batch_bo_start(batch_bo, &cmd_buffer->batch,
862                       GFX8_MI_BATCH_BUFFER_START_length * 4);
863 
864    int success = u_vector_init_pow2(&cmd_buffer->seen_bbos, 8,
865                                     sizeof(struct anv_bo *));
866    if (!success)
867       goto fail_batch_bo;
868 
869    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = batch_bo;
870 
871    success = u_vector_init(&cmd_buffer->bt_block_states, 8,
872                            sizeof(struct anv_state));
873    if (!success)
874       goto fail_seen_bbos;
875 
876    result = anv_reloc_list_init(&cmd_buffer->surface_relocs,
877                                 &cmd_buffer->pool->alloc);
878    if (result != VK_SUCCESS)
879       goto fail_bt_blocks;
880    cmd_buffer->last_ss_pool_center = 0;
881 
882    result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
883    if (result != VK_SUCCESS)
884       goto fail_bt_blocks;
885 
886    return VK_SUCCESS;
887 
888  fail_bt_blocks:
889    u_vector_finish(&cmd_buffer->bt_block_states);
890  fail_seen_bbos:
891    u_vector_finish(&cmd_buffer->seen_bbos);
892  fail_batch_bo:
893    anv_batch_bo_destroy(batch_bo, cmd_buffer);
894 
895    return result;
896 }
897 
898 void
anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)899 anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
900 {
901    struct anv_state *bt_block;
902    u_vector_foreach(bt_block, &cmd_buffer->bt_block_states)
903       anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
904    u_vector_finish(&cmd_buffer->bt_block_states);
905 
906    anv_reloc_list_finish(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc);
907 
908    u_vector_finish(&cmd_buffer->seen_bbos);
909 
910    /* Destroy all of the batch buffers */
911    list_for_each_entry_safe(struct anv_batch_bo, bbo,
912                             &cmd_buffer->batch_bos, link) {
913       list_del(&bbo->link);
914       anv_batch_bo_destroy(bbo, cmd_buffer);
915    }
916 }
917 
918 void
anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer * cmd_buffer)919 anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
920 {
921    /* Delete all but the first batch bo */
922    assert(!list_is_empty(&cmd_buffer->batch_bos));
923    while (cmd_buffer->batch_bos.next != cmd_buffer->batch_bos.prev) {
924       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
925       list_del(&bbo->link);
926       anv_batch_bo_destroy(bbo, cmd_buffer);
927    }
928    assert(!list_is_empty(&cmd_buffer->batch_bos));
929 
930    anv_batch_bo_start(anv_cmd_buffer_current_batch_bo(cmd_buffer),
931                       &cmd_buffer->batch,
932                       GFX8_MI_BATCH_BUFFER_START_length * 4);
933 
934    while (u_vector_length(&cmd_buffer->bt_block_states) > 1) {
935       struct anv_state *bt_block = u_vector_remove(&cmd_buffer->bt_block_states);
936       anv_binding_table_pool_free(cmd_buffer->device, *bt_block);
937    }
938    assert(u_vector_length(&cmd_buffer->bt_block_states) == 1);
939    cmd_buffer->bt_next = *(struct anv_state *)u_vector_head(&cmd_buffer->bt_block_states);
940    cmd_buffer->bt_next.offset = 0;
941 
942    anv_reloc_list_clear(&cmd_buffer->surface_relocs);
943    cmd_buffer->last_ss_pool_center = 0;
944 
945    /* Reset the list of seen buffers */
946    cmd_buffer->seen_bbos.head = 0;
947    cmd_buffer->seen_bbos.tail = 0;
948 
949    struct anv_batch_bo *first_bbo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
950 
951    *(struct anv_batch_bo **)u_vector_add(&cmd_buffer->seen_bbos) = first_bbo;
952 
953 
954    assert(!cmd_buffer->device->can_chain_batches ||
955           first_bbo->bo->size == ANV_MIN_CMD_BUFFER_BATCH_SIZE);
956    cmd_buffer->total_batch_size = first_bbo->bo->size;
957 }
958 
959 void
anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer * cmd_buffer)960 anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer)
961 {
962    struct anv_batch_bo *batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
963 
964    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
965       /* When we start a batch buffer, we subtract a certain amount of
966        * padding from the end to ensure that we always have room to emit a
967        * BATCH_BUFFER_START to chain to the next BO.  We need to remove
968        * that padding before we end the batch; otherwise, we may end up
969        * with our BATCH_BUFFER_END in another BO.
970        */
971       cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
972       assert(cmd_buffer->batch.start == batch_bo->bo->map);
973       assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
974 
975       /* Save end instruction location to override it later. */
976       cmd_buffer->batch_end = cmd_buffer->batch.next;
977 
978       /* If we can chain this command buffer to another one, leave some place
979        * for the jump instruction.
980        */
981       batch_bo->chained = anv_cmd_buffer_is_chainable(cmd_buffer);
982       if (batch_bo->chained)
983          emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
984       else
985          anv_batch_emit(&cmd_buffer->batch, GFX8_MI_BATCH_BUFFER_END, bbe);
986 
987       /* Round batch up to an even number of dwords. */
988       if ((cmd_buffer->batch.next - cmd_buffer->batch.start) & 4)
989          anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
990 
991       cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_PRIMARY;
992    } else {
993       assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
994       /* If this is a secondary command buffer, we need to determine the
995        * mode in which it will be executed with vkExecuteCommands.  We
996        * determine this statically here so that this stays in sync with the
997        * actual ExecuteCommands implementation.
998        */
999       const uint32_t length = cmd_buffer->batch.next - cmd_buffer->batch.start;
1000       if (!cmd_buffer->device->can_chain_batches) {
1001          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT;
1002       } else if (cmd_buffer->device->physical->use_call_secondary) {
1003          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN;
1004          /* If the secondary command buffer begins & ends in the same BO and
1005           * its length is less than the length of CS prefetch, add some NOOPs
1006           * instructions so the last MI_BATCH_BUFFER_START is outside the CS
1007           * prefetch.
1008           */
1009          if (cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) {
1010             const struct intel_device_info *devinfo = &cmd_buffer->device->info;
1011             /* Careful to have everything in signed integer. */
1012             int32_t prefetch_len = devinfo->cs_prefetch_size;
1013             int32_t batch_len =
1014                cmd_buffer->batch.next - cmd_buffer->batch.start;
1015 
1016             for (int32_t i = 0; i < (prefetch_len - batch_len); i += 4)
1017                anv_batch_emit(&cmd_buffer->batch, GFX8_MI_NOOP, noop);
1018          }
1019 
1020          void *jump_addr =
1021             anv_batch_emitn(&cmd_buffer->batch,
1022                             GFX8_MI_BATCH_BUFFER_START_length,
1023                             GFX8_MI_BATCH_BUFFER_START,
1024                             .AddressSpaceIndicator = ASI_PPGTT,
1025                             .SecondLevelBatchBuffer = Firstlevelbatch) +
1026             (GFX8_MI_BATCH_BUFFER_START_BatchBufferStartAddress_start / 8);
1027          cmd_buffer->return_addr = anv_batch_address(&cmd_buffer->batch, jump_addr);
1028 
1029          /* The emit above may have caused us to chain batch buffers which
1030           * would mean that batch_bo is no longer valid.
1031           */
1032          batch_bo = anv_cmd_buffer_current_batch_bo(cmd_buffer);
1033       } else if ((cmd_buffer->batch_bos.next == cmd_buffer->batch_bos.prev) &&
1034                  (length < ANV_MIN_CMD_BUFFER_BATCH_SIZE / 2)) {
1035          /* If the secondary has exactly one batch buffer in its list *and*
1036           * that batch buffer is less than half of the maximum size, we're
1037           * probably better of simply copying it into our batch.
1038           */
1039          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_EMIT;
1040       } else if (!(cmd_buffer->usage_flags &
1041                    VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
1042          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_CHAIN;
1043 
1044          /* In order to chain, we need this command buffer to contain an
1045           * MI_BATCH_BUFFER_START which will jump back to the calling batch.
1046           * It doesn't matter where it points now so long as has a valid
1047           * relocation.  We'll adjust it later as part of the chaining
1048           * process.
1049           *
1050           * We set the end of the batch a little short so we would be sure we
1051           * have room for the chaining command.  Since we're about to emit the
1052           * chaining command, let's set it back where it should go.
1053           */
1054          cmd_buffer->batch.end += GFX8_MI_BATCH_BUFFER_START_length * 4;
1055          assert(cmd_buffer->batch.start == batch_bo->bo->map);
1056          assert(cmd_buffer->batch.end == batch_bo->bo->map + batch_bo->bo->size);
1057 
1058          emit_batch_buffer_start(cmd_buffer, batch_bo->bo, 0);
1059          assert(cmd_buffer->batch.start == batch_bo->bo->map);
1060       } else {
1061          cmd_buffer->exec_mode = ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN;
1062       }
1063    }
1064 
1065    anv_batch_bo_finish(batch_bo, &cmd_buffer->batch);
1066 }
1067 
1068 static VkResult
anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer * cmd_buffer,struct list_head * list)1069 anv_cmd_buffer_add_seen_bbos(struct anv_cmd_buffer *cmd_buffer,
1070                              struct list_head *list)
1071 {
1072    list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
1073       struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
1074       if (bbo_ptr == NULL)
1075          return vk_error(cmd_buffer, VK_ERROR_OUT_OF_HOST_MEMORY);
1076 
1077       *bbo_ptr = bbo;
1078    }
1079 
1080    return VK_SUCCESS;
1081 }
1082 
1083 void
anv_cmd_buffer_add_secondary(struct anv_cmd_buffer * primary,struct anv_cmd_buffer * secondary)1084 anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1085                              struct anv_cmd_buffer *secondary)
1086 {
1087    anv_measure_add_secondary(primary, secondary);
1088    switch (secondary->exec_mode) {
1089    case ANV_CMD_BUFFER_EXEC_MODE_EMIT:
1090       anv_batch_emit_batch(&primary->batch, &secondary->batch);
1091       break;
1092    case ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT: {
1093       struct anv_batch_bo *bbo = anv_cmd_buffer_current_batch_bo(primary);
1094       unsigned length = secondary->batch.end - secondary->batch.start;
1095       anv_batch_bo_grow(primary, bbo, &primary->batch, length,
1096                         GFX8_MI_BATCH_BUFFER_START_length * 4);
1097       anv_batch_emit_batch(&primary->batch, &secondary->batch);
1098       break;
1099    }
1100    case ANV_CMD_BUFFER_EXEC_MODE_CHAIN: {
1101       struct anv_batch_bo *first_bbo =
1102          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1103       struct anv_batch_bo *last_bbo =
1104          list_last_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1105 
1106       emit_batch_buffer_start(primary, first_bbo->bo, 0);
1107 
1108       struct anv_batch_bo *this_bbo = anv_cmd_buffer_current_batch_bo(primary);
1109       assert(primary->batch.start == this_bbo->bo->map);
1110       uint32_t offset = primary->batch.next - primary->batch.start;
1111 
1112       /* Make the tail of the secondary point back to right after the
1113        * MI_BATCH_BUFFER_START in the primary batch.
1114        */
1115       anv_batch_bo_link(primary, last_bbo, this_bbo, offset);
1116 
1117       anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1118       break;
1119    }
1120    case ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN: {
1121       struct list_head copy_list;
1122       VkResult result = anv_batch_bo_list_clone(&secondary->batch_bos,
1123                                                 secondary,
1124                                                 &copy_list);
1125       if (result != VK_SUCCESS)
1126          return; /* FIXME */
1127 
1128       anv_cmd_buffer_add_seen_bbos(primary, &copy_list);
1129 
1130       struct anv_batch_bo *first_bbo =
1131          list_first_entry(&copy_list, struct anv_batch_bo, link);
1132       struct anv_batch_bo *last_bbo =
1133          list_last_entry(&copy_list, struct anv_batch_bo, link);
1134 
1135       cmd_buffer_chain_to_batch_bo(primary, first_bbo);
1136 
1137       list_splicetail(&copy_list, &primary->batch_bos);
1138 
1139       anv_batch_bo_continue(last_bbo, &primary->batch,
1140                             GFX8_MI_BATCH_BUFFER_START_length * 4);
1141       break;
1142    }
1143    case ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN: {
1144       struct anv_batch_bo *first_bbo =
1145          list_first_entry(&secondary->batch_bos, struct anv_batch_bo, link);
1146 
1147       uint64_t *write_return_addr =
1148          anv_batch_emitn(&primary->batch,
1149                          GFX8_MI_STORE_DATA_IMM_length + 1 /* QWord write */,
1150                          GFX8_MI_STORE_DATA_IMM,
1151                          .Address = secondary->return_addr)
1152          + (GFX8_MI_STORE_DATA_IMM_ImmediateData_start / 8);
1153 
1154       emit_batch_buffer_start(primary, first_bbo->bo, 0);
1155 
1156       *write_return_addr =
1157          anv_address_physical(anv_batch_address(&primary->batch,
1158                                                 primary->batch.next));
1159 
1160       anv_cmd_buffer_add_seen_bbos(primary, &secondary->batch_bos);
1161       break;
1162    }
1163    default:
1164       assert(!"Invalid execution mode");
1165    }
1166 
1167    anv_reloc_list_append(&primary->surface_relocs, &primary->pool->alloc,
1168                          &secondary->surface_relocs, 0);
1169 }
1170 
1171 struct anv_execbuf {
1172    struct drm_i915_gem_execbuffer2           execbuf;
1173 
1174    struct drm_i915_gem_execbuffer_ext_timeline_fences timeline_fences;
1175 
1176    struct drm_i915_gem_exec_object2 *        objects;
1177    uint32_t                                  bo_count;
1178    struct anv_bo **                          bos;
1179 
1180    /* Allocated length of the 'objects' and 'bos' arrays */
1181    uint32_t                                  array_length;
1182 
1183    /* List of relocations for surface states, only used with platforms not
1184     * using softpin.
1185     */
1186    void *                                    surface_states_relocs;
1187 
1188    /* Indicates whether any of the command buffers have relocations. This
1189     * doesn't not necessarily mean we'll need the kernel to process them. It
1190     * might be that a previous execbuf has already placed things in the VMA
1191     * and we can make i915 skip the relocations.
1192     */
1193    bool                                      has_relocs;
1194 
1195    const VkAllocationCallbacks *             alloc;
1196    VkSystemAllocationScope                   alloc_scope;
1197 
1198    int                                       perf_query_pass;
1199 };
1200 
1201 static void
anv_execbuf_init(struct anv_execbuf * exec)1202 anv_execbuf_init(struct anv_execbuf *exec)
1203 {
1204    memset(exec, 0, sizeof(*exec));
1205 }
1206 
1207 static void
anv_execbuf_finish(struct anv_execbuf * exec)1208 anv_execbuf_finish(struct anv_execbuf *exec)
1209 {
1210    vk_free(exec->alloc, exec->surface_states_relocs);
1211    vk_free(exec->alloc, exec->objects);
1212    vk_free(exec->alloc, exec->bos);
1213 }
1214 
1215 static void
anv_execbuf_add_ext(struct anv_execbuf * exec,uint32_t ext_name,struct i915_user_extension * ext)1216 anv_execbuf_add_ext(struct anv_execbuf *exec,
1217                     uint32_t ext_name,
1218                     struct i915_user_extension *ext)
1219 {
1220    __u64 *iter = &exec->execbuf.cliprects_ptr;
1221 
1222    exec->execbuf.flags |= I915_EXEC_USE_EXTENSIONS;
1223 
1224    while (*iter != 0) {
1225       iter = (__u64 *) &((struct i915_user_extension *)(uintptr_t)*iter)->next_extension;
1226    }
1227 
1228    ext->name = ext_name;
1229 
1230    *iter = (uintptr_t) ext;
1231 }
1232 
1233 static VkResult
1234 anv_execbuf_add_bo_bitset(struct anv_device *device,
1235                           struct anv_execbuf *exec,
1236                           uint32_t dep_words,
1237                           BITSET_WORD *deps,
1238                           uint32_t extra_flags);
1239 
1240 static VkResult
anv_execbuf_add_bo(struct anv_device * device,struct anv_execbuf * exec,struct anv_bo * bo,struct anv_reloc_list * relocs,uint32_t extra_flags)1241 anv_execbuf_add_bo(struct anv_device *device,
1242                    struct anv_execbuf *exec,
1243                    struct anv_bo *bo,
1244                    struct anv_reloc_list *relocs,
1245                    uint32_t extra_flags)
1246 {
1247    struct drm_i915_gem_exec_object2 *obj = NULL;
1248 
1249    bo = anv_bo_unwrap(bo);
1250 
1251    if (bo->index < exec->bo_count && exec->bos[bo->index] == bo)
1252       obj = &exec->objects[bo->index];
1253 
1254    if (obj == NULL) {
1255       /* We've never seen this one before.  Add it to the list and assign
1256        * an id that we can use later.
1257        */
1258       if (exec->bo_count >= exec->array_length) {
1259          uint32_t new_len = exec->objects ? exec->array_length * 2 : 64;
1260 
1261          struct drm_i915_gem_exec_object2 *new_objects =
1262             vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
1263          if (new_objects == NULL)
1264             return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1265 
1266          struct anv_bo **new_bos =
1267             vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
1268          if (new_bos == NULL) {
1269             vk_free(exec->alloc, new_objects);
1270             return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
1271          }
1272 
1273          if (exec->objects) {
1274             memcpy(new_objects, exec->objects,
1275                    exec->bo_count * sizeof(*new_objects));
1276             memcpy(new_bos, exec->bos,
1277                    exec->bo_count * sizeof(*new_bos));
1278          }
1279 
1280          vk_free(exec->alloc, exec->objects);
1281          vk_free(exec->alloc, exec->bos);
1282 
1283          exec->objects = new_objects;
1284          exec->bos = new_bos;
1285          exec->array_length = new_len;
1286       }
1287 
1288       assert(exec->bo_count < exec->array_length);
1289 
1290       bo->index = exec->bo_count++;
1291       obj = &exec->objects[bo->index];
1292       exec->bos[bo->index] = bo;
1293 
1294       obj->handle = bo->gem_handle;
1295       obj->relocation_count = 0;
1296       obj->relocs_ptr = 0;
1297       obj->alignment = 0;
1298       obj->offset = bo->offset;
1299       obj->flags = bo->flags | extra_flags;
1300       obj->rsvd1 = 0;
1301       obj->rsvd2 = 0;
1302    }
1303 
1304    if (extra_flags & EXEC_OBJECT_WRITE) {
1305       obj->flags |= EXEC_OBJECT_WRITE;
1306       obj->flags &= ~EXEC_OBJECT_ASYNC;
1307    }
1308 
1309    if (relocs != NULL) {
1310       assert(obj->relocation_count == 0);
1311 
1312       if (relocs->num_relocs > 0) {
1313          /* This is the first time we've ever seen a list of relocations for
1314           * this BO.  Go ahead and set the relocations and then walk the list
1315           * of relocations and add them all.
1316           */
1317          exec->has_relocs = true;
1318          obj->relocation_count = relocs->num_relocs;
1319          obj->relocs_ptr = (uintptr_t) relocs->relocs;
1320 
1321          for (size_t i = 0; i < relocs->num_relocs; i++) {
1322             VkResult result;
1323 
1324             /* A quick sanity check on relocations */
1325             assert(relocs->relocs[i].offset < bo->size);
1326             result = anv_execbuf_add_bo(device, exec, relocs->reloc_bos[i],
1327                                         NULL, extra_flags);
1328             if (result != VK_SUCCESS)
1329                return result;
1330          }
1331       }
1332 
1333       return anv_execbuf_add_bo_bitset(device, exec, relocs->dep_words,
1334                                        relocs->deps, extra_flags);
1335    }
1336 
1337    return VK_SUCCESS;
1338 }
1339 
1340 /* Add BO dependencies to execbuf */
1341 static VkResult
anv_execbuf_add_bo_bitset(struct anv_device * device,struct anv_execbuf * exec,uint32_t dep_words,BITSET_WORD * deps,uint32_t extra_flags)1342 anv_execbuf_add_bo_bitset(struct anv_device *device,
1343                           struct anv_execbuf *exec,
1344                           uint32_t dep_words,
1345                           BITSET_WORD *deps,
1346                           uint32_t extra_flags)
1347 {
1348    for (uint32_t w = 0; w < dep_words; w++) {
1349       BITSET_WORD mask = deps[w];
1350       while (mask) {
1351          int i = u_bit_scan(&mask);
1352          uint32_t gem_handle = w * BITSET_WORDBITS + i;
1353          struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
1354          assert(bo->refcount > 0);
1355          VkResult result =
1356             anv_execbuf_add_bo(device, exec, bo, NULL, extra_flags);
1357          if (result != VK_SUCCESS)
1358             return result;
1359       }
1360    }
1361 
1362    return VK_SUCCESS;
1363 }
1364 
1365 static void
anv_cmd_buffer_process_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_reloc_list * list)1366 anv_cmd_buffer_process_relocs(struct anv_cmd_buffer *cmd_buffer,
1367                               struct anv_reloc_list *list)
1368 {
1369    for (size_t i = 0; i < list->num_relocs; i++)
1370       list->relocs[i].target_handle = anv_bo_unwrap(list->reloc_bos[i])->index;
1371 }
1372 
1373 static void
adjust_relocations_from_state_pool(struct anv_state_pool * pool,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1374 adjust_relocations_from_state_pool(struct anv_state_pool *pool,
1375                                    struct anv_reloc_list *relocs,
1376                                    uint32_t last_pool_center_bo_offset)
1377 {
1378    assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1379    uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1380 
1381    for (size_t i = 0; i < relocs->num_relocs; i++) {
1382       /* All of the relocations from this block pool to other BO's should
1383        * have been emitted relative to the surface block pool center.  We
1384        * need to add the center offset to make them relative to the
1385        * beginning of the actual GEM bo.
1386        */
1387       relocs->relocs[i].offset += delta;
1388    }
1389 }
1390 
1391 static void
adjust_relocations_to_state_pool(struct anv_state_pool * pool,struct anv_bo * from_bo,struct anv_reloc_list * relocs,uint32_t last_pool_center_bo_offset)1392 adjust_relocations_to_state_pool(struct anv_state_pool *pool,
1393                                  struct anv_bo *from_bo,
1394                                  struct anv_reloc_list *relocs,
1395                                  uint32_t last_pool_center_bo_offset)
1396 {
1397    assert(!from_bo->is_wrapper);
1398    assert(last_pool_center_bo_offset <= pool->block_pool.center_bo_offset);
1399    uint32_t delta = pool->block_pool.center_bo_offset - last_pool_center_bo_offset;
1400 
1401    /* When we initially emit relocations into a block pool, we don't
1402     * actually know what the final center_bo_offset will be so we just emit
1403     * it as if center_bo_offset == 0.  Now that we know what the center
1404     * offset is, we need to walk the list of relocations and adjust any
1405     * relocations that point to the pool bo with the correct offset.
1406     */
1407    for (size_t i = 0; i < relocs->num_relocs; i++) {
1408       if (relocs->reloc_bos[i] == pool->block_pool.bo) {
1409          /* Adjust the delta value in the relocation to correctly
1410           * correspond to the new delta.  Initially, this value may have
1411           * been negative (if treated as unsigned), but we trust in
1412           * uint32_t roll-over to fix that for us at this point.
1413           */
1414          relocs->relocs[i].delta += delta;
1415 
1416          /* Since the delta has changed, we need to update the actual
1417           * relocated value with the new presumed value.  This function
1418           * should only be called on batch buffers, so we know it isn't in
1419           * use by the GPU at the moment.
1420           */
1421          assert(relocs->relocs[i].offset < from_bo->size);
1422          write_reloc(pool->block_pool.device,
1423                      from_bo->map + relocs->relocs[i].offset,
1424                      relocs->relocs[i].presumed_offset +
1425                      relocs->relocs[i].delta, false);
1426       }
1427    }
1428 }
1429 
1430 static void
anv_reloc_list_apply(struct anv_device * device,struct anv_reloc_list * list,struct anv_bo * bo,bool always_relocate)1431 anv_reloc_list_apply(struct anv_device *device,
1432                      struct anv_reloc_list *list,
1433                      struct anv_bo *bo,
1434                      bool always_relocate)
1435 {
1436    bo = anv_bo_unwrap(bo);
1437 
1438    for (size_t i = 0; i < list->num_relocs; i++) {
1439       struct anv_bo *target_bo = anv_bo_unwrap(list->reloc_bos[i]);
1440       if (list->relocs[i].presumed_offset == target_bo->offset &&
1441           !always_relocate)
1442          continue;
1443 
1444       void *p = bo->map + list->relocs[i].offset;
1445       write_reloc(device, p, target_bo->offset + list->relocs[i].delta, true);
1446       list->relocs[i].presumed_offset = target_bo->offset;
1447    }
1448 }
1449 
1450 /**
1451  * This function applies the relocation for a command buffer and writes the
1452  * actual addresses into the buffers as per what we were told by the kernel on
1453  * the previous execbuf2 call.  This should be safe to do because, for each
1454  * relocated address, we have two cases:
1455  *
1456  *  1) The target BO is inactive (as seen by the kernel).  In this case, it is
1457  *     not in use by the GPU so updating the address is 100% ok.  It won't be
1458  *     in-use by the GPU (from our context) again until the next execbuf2
1459  *     happens.  If the kernel decides to move it in the next execbuf2, it
1460  *     will have to do the relocations itself, but that's ok because it should
1461  *     have all of the information needed to do so.
1462  *
1463  *  2) The target BO is active (as seen by the kernel).  In this case, it
1464  *     hasn't moved since the last execbuffer2 call because GTT shuffling
1465  *     *only* happens when the BO is idle. (From our perspective, it only
1466  *     happens inside the execbuffer2 ioctl, but the shuffling may be
1467  *     triggered by another ioctl, with full-ppgtt this is limited to only
1468  *     execbuffer2 ioctls on the same context, or memory pressure.)  Since the
1469  *     target BO hasn't moved, our anv_bo::offset exactly matches the BO's GTT
1470  *     address and the relocated value we are writing into the BO will be the
1471  *     same as the value that is already there.
1472  *
1473  *     There is also a possibility that the target BO is active but the exact
1474  *     RENDER_SURFACE_STATE object we are writing the relocation into isn't in
1475  *     use.  In this case, the address currently in the RENDER_SURFACE_STATE
1476  *     may be stale but it's still safe to write the relocation because that
1477  *     particular RENDER_SURFACE_STATE object isn't in-use by the GPU and
1478  *     won't be until the next execbuf2 call.
1479  *
1480  * By doing relocations on the CPU, we can tell the kernel that it doesn't
1481  * need to bother.  We want to do this because the surface state buffer is
1482  * used by every command buffer so, if the kernel does the relocations, it
1483  * will always be busy and the kernel will always stall.  This is also
1484  * probably the fastest mechanism for doing relocations since the kernel would
1485  * have to make a full copy of all the relocations lists.
1486  */
1487 static bool
execbuf_can_skip_relocations(struct anv_execbuf * exec)1488 execbuf_can_skip_relocations(struct anv_execbuf *exec)
1489 {
1490    if (!exec->has_relocs)
1491       return true;
1492 
1493    static int userspace_relocs = -1;
1494    if (userspace_relocs < 0)
1495       userspace_relocs = env_var_as_boolean("ANV_USERSPACE_RELOCS", true);
1496    if (!userspace_relocs)
1497       return false;
1498 
1499    /* First, we have to check to see whether or not we can even do the
1500     * relocation.  New buffers which have never been submitted to the kernel
1501     * don't have a valid offset so we need to let the kernel do relocations so
1502     * that we can get offsets for them.  On future execbuf2 calls, those
1503     * buffers will have offsets and we will be able to skip relocating.
1504     * Invalid offsets are indicated by anv_bo::offset == (uint64_t)-1.
1505     */
1506    for (uint32_t i = 0; i < exec->bo_count; i++) {
1507       assert(!exec->bos[i]->is_wrapper);
1508       if (exec->bos[i]->offset == (uint64_t)-1)
1509          return false;
1510    }
1511 
1512    return true;
1513 }
1514 
1515 static void
relocate_cmd_buffer(struct anv_cmd_buffer * cmd_buffer,struct anv_execbuf * exec)1516 relocate_cmd_buffer(struct anv_cmd_buffer *cmd_buffer,
1517                     struct anv_execbuf *exec)
1518 {
1519    /* Since surface states are shared between command buffers and we don't
1520     * know what order they will be submitted to the kernel, we don't know
1521     * what address is actually written in the surface state object at any
1522     * given time.  The only option is to always relocate them.
1523     */
1524    struct anv_bo *surface_state_bo =
1525       anv_bo_unwrap(cmd_buffer->device->surface_state_pool.block_pool.bo);
1526    anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
1527                         surface_state_bo,
1528                         true /* always relocate surface states */);
1529 
1530    /* Since we own all of the batch buffers, we know what values are stored
1531     * in the relocated addresses and only have to update them if the offsets
1532     * have changed.
1533     */
1534    struct anv_batch_bo **bbo;
1535    u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1536       anv_reloc_list_apply(cmd_buffer->device,
1537                            &(*bbo)->relocs, (*bbo)->bo, false);
1538    }
1539 
1540    for (uint32_t i = 0; i < exec->bo_count; i++)
1541       exec->objects[i].offset = exec->bos[i]->offset;
1542 }
1543 
1544 static void
reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer * cmd_buffer)1545 reset_cmd_buffer_surface_offsets(struct anv_cmd_buffer *cmd_buffer)
1546 {
1547    /* In the case where we fall back to doing kernel relocations, we need to
1548     * ensure that the relocation list is valid. All relocations on the batch
1549     * buffers are already valid and kept up-to-date. Since surface states are
1550     * shared between command buffers and we don't know what order they will be
1551     * submitted to the kernel, we don't know what address is actually written
1552     * in the surface state object at any given time. The only option is to set
1553     * a bogus presumed offset and let the kernel relocate them.
1554     */
1555    for (size_t i = 0; i < cmd_buffer->surface_relocs.num_relocs; i++)
1556       cmd_buffer->surface_relocs.relocs[i].presumed_offset = -1;
1557 }
1558 
1559 static VkResult
setup_execbuf_for_cmd_buffer(struct anv_execbuf * execbuf,struct anv_cmd_buffer * cmd_buffer)1560 setup_execbuf_for_cmd_buffer(struct anv_execbuf *execbuf,
1561                              struct anv_cmd_buffer *cmd_buffer)
1562 {
1563    struct anv_state_pool *ss_pool =
1564       &cmd_buffer->device->surface_state_pool;
1565 
1566    adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
1567                                       cmd_buffer->last_ss_pool_center);
1568    VkResult result;
1569    if (cmd_buffer->device->physical->use_softpin) {
1570       /* Add surface dependencies (BOs) to the execbuf */
1571       anv_execbuf_add_bo_bitset(cmd_buffer->device, execbuf,
1572                                 cmd_buffer->surface_relocs.dep_words,
1573                                 cmd_buffer->surface_relocs.deps, 0);
1574    } else {
1575       /* Since we aren't in the softpin case, all of our STATE_BASE_ADDRESS BOs
1576        * will get added automatically by processing relocations on the batch
1577        * buffer.  We have to add the surface state BO manually because it has
1578        * relocations of its own that we need to be sure are processsed.
1579        */
1580       result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1581                                   ss_pool->block_pool.bo,
1582                                   &cmd_buffer->surface_relocs, 0);
1583       if (result != VK_SUCCESS)
1584          return result;
1585    }
1586 
1587    /* First, we walk over all of the bos we've seen and add them and their
1588     * relocations to the validate list.
1589     */
1590    struct anv_batch_bo **bbo;
1591    u_vector_foreach(bbo, &cmd_buffer->seen_bbos) {
1592       adjust_relocations_to_state_pool(ss_pool, (*bbo)->bo, &(*bbo)->relocs,
1593                                        cmd_buffer->last_ss_pool_center);
1594 
1595       result = anv_execbuf_add_bo(cmd_buffer->device, execbuf,
1596                                   (*bbo)->bo, &(*bbo)->relocs, 0);
1597       if (result != VK_SUCCESS)
1598          return result;
1599    }
1600 
1601    /* Now that we've adjusted all of the surface state relocations, we need to
1602     * record the surface state pool center so future executions of the command
1603     * buffer can adjust correctly.
1604     */
1605    cmd_buffer->last_ss_pool_center = ss_pool->block_pool.center_bo_offset;
1606 
1607    return VK_SUCCESS;
1608 }
1609 
1610 static void
chain_command_buffers(struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1611 chain_command_buffers(struct anv_cmd_buffer **cmd_buffers,
1612                       uint32_t num_cmd_buffers)
1613 {
1614    if (!anv_cmd_buffer_is_chainable(cmd_buffers[0])) {
1615       assert(num_cmd_buffers == 1);
1616       return;
1617    }
1618 
1619    /* Chain the N-1 first batch buffers */
1620    for (uint32_t i = 0; i < (num_cmd_buffers - 1); i++)
1621       anv_cmd_buffer_record_chain_submit(cmd_buffers[i], cmd_buffers[i + 1]);
1622 
1623    /* Put an end to the last one */
1624    anv_cmd_buffer_record_end_submit(cmd_buffers[num_cmd_buffers - 1]);
1625 }
1626 
1627 static VkResult
setup_execbuf_for_cmd_buffers(struct anv_execbuf * execbuf,struct anv_queue * queue,struct anv_cmd_buffer ** cmd_buffers,uint32_t num_cmd_buffers)1628 setup_execbuf_for_cmd_buffers(struct anv_execbuf *execbuf,
1629                               struct anv_queue *queue,
1630                               struct anv_cmd_buffer **cmd_buffers,
1631                               uint32_t num_cmd_buffers)
1632 {
1633    struct anv_device *device = queue->device;
1634    struct anv_state_pool *ss_pool = &device->surface_state_pool;
1635    VkResult result;
1636 
1637    /* Edit the tail of the command buffers to chain them all together if they
1638     * can be.
1639     */
1640    chain_command_buffers(cmd_buffers, num_cmd_buffers);
1641 
1642    for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1643       result = setup_execbuf_for_cmd_buffer(execbuf, cmd_buffers[i]);
1644       if (result != VK_SUCCESS)
1645          return result;
1646    }
1647 
1648    /* Add all the global BOs to the object list for softpin case. */
1649    if (device->physical->use_softpin) {
1650       anv_block_pool_foreach_bo(bo, &ss_pool->block_pool) {
1651          result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1652          if (result != VK_SUCCESS)
1653             return result;
1654       }
1655 
1656       struct anv_block_pool *pool;
1657       pool = &device->dynamic_state_pool.block_pool;
1658       anv_block_pool_foreach_bo(bo, pool) {
1659          result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1660          if (result != VK_SUCCESS)
1661             return result;
1662       }
1663 
1664       pool = &device->general_state_pool.block_pool;
1665       anv_block_pool_foreach_bo(bo, pool) {
1666          result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1667          if (result != VK_SUCCESS)
1668             return result;
1669       }
1670 
1671       pool = &device->instruction_state_pool.block_pool;
1672       anv_block_pool_foreach_bo(bo, pool) {
1673          result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1674          if (result != VK_SUCCESS)
1675             return result;
1676       }
1677 
1678       pool = &device->binding_table_pool.block_pool;
1679       anv_block_pool_foreach_bo(bo, pool) {
1680          result = anv_execbuf_add_bo(device, execbuf, bo, NULL, 0);
1681          if (result != VK_SUCCESS)
1682             return result;
1683       }
1684 
1685       /* Add the BOs for all user allocated memory objects because we can't
1686        * track after binding updates of VK_EXT_descriptor_indexing.
1687        */
1688       list_for_each_entry(struct anv_device_memory, mem,
1689                           &device->memory_objects, link) {
1690          result = anv_execbuf_add_bo(device, execbuf, mem->bo, NULL, 0);
1691          if (result != VK_SUCCESS)
1692             return result;
1693       }
1694    } else {
1695       /* We do not support chaining primary command buffers without
1696        * softpin.
1697        */
1698       assert(num_cmd_buffers == 1);
1699    }
1700 
1701    bool no_reloc = true;
1702    if (execbuf->has_relocs) {
1703       no_reloc = execbuf_can_skip_relocations(execbuf);
1704       if (no_reloc) {
1705          /* If we were able to successfully relocate everything, tell the
1706           * kernel that it can skip doing relocations. The requirement for
1707           * using NO_RELOC is:
1708           *
1709           *  1) The addresses written in the objects must match the
1710           *     corresponding reloc.presumed_offset which in turn must match
1711           *     the corresponding execobject.offset.
1712           *
1713           *  2) To avoid stalling, execobject.offset should match the current
1714           *     address of that object within the active context.
1715           *
1716           * In order to satisfy all of the invariants that make userspace
1717           * relocations to be safe (see relocate_cmd_buffer()), we need to
1718           * further ensure that the addresses we use match those used by the
1719           * kernel for the most recent execbuf2.
1720           *
1721           * The kernel may still choose to do relocations anyway if something
1722           * has moved in the GTT. In this case, the relocation list still
1723           * needs to be valid. All relocations on the batch buffers are
1724           * already valid and kept up-to-date. For surface state relocations,
1725           * by applying the relocations in relocate_cmd_buffer, we ensured
1726           * that the address in the RENDER_SURFACE_STATE matches
1727           * presumed_offset, so it should be safe for the kernel to relocate
1728           * them as needed.
1729           */
1730          for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1731             relocate_cmd_buffer(cmd_buffers[i], execbuf);
1732 
1733             anv_reloc_list_apply(device, &cmd_buffers[i]->surface_relocs,
1734                                  device->surface_state_pool.block_pool.bo,
1735                                  true /* always relocate surface states */);
1736          }
1737       } else {
1738          /* In the case where we fall back to doing kernel relocations, we
1739           * need to ensure that the relocation list is valid. All relocations
1740           * on the batch buffers are already valid and kept up-to-date. Since
1741           * surface states are shared between command buffers and we don't
1742           * know what order they will be submitted to the kernel, we don't
1743           * know what address is actually written in the surface state object
1744           * at any given time. The only option is to set a bogus presumed
1745           * offset and let the kernel relocate them.
1746           */
1747          for (uint32_t i = 0; i < num_cmd_buffers; i++)
1748             reset_cmd_buffer_surface_offsets(cmd_buffers[i]);
1749       }
1750    }
1751 
1752    struct anv_batch_bo *first_batch_bo =
1753       list_first_entry(&cmd_buffers[0]->batch_bos, struct anv_batch_bo, link);
1754 
1755    /* The kernel requires that the last entry in the validation list be the
1756     * batch buffer to execute.  We can simply swap the element
1757     * corresponding to the first batch_bo in the chain with the last
1758     * element in the list.
1759     */
1760    if (first_batch_bo->bo->index != execbuf->bo_count - 1) {
1761       uint32_t idx = first_batch_bo->bo->index;
1762       uint32_t last_idx = execbuf->bo_count - 1;
1763 
1764       struct drm_i915_gem_exec_object2 tmp_obj = execbuf->objects[idx];
1765       assert(execbuf->bos[idx] == first_batch_bo->bo);
1766 
1767       execbuf->objects[idx] = execbuf->objects[last_idx];
1768       execbuf->bos[idx] = execbuf->bos[last_idx];
1769       execbuf->bos[idx]->index = idx;
1770 
1771       execbuf->objects[last_idx] = tmp_obj;
1772       execbuf->bos[last_idx] = first_batch_bo->bo;
1773       first_batch_bo->bo->index = last_idx;
1774    }
1775 
1776    /* If we are pinning our BOs, we shouldn't have to relocate anything */
1777    if (device->physical->use_softpin)
1778       assert(!execbuf->has_relocs);
1779 
1780    /* Now we go through and fixup all of the relocation lists to point to the
1781     * correct indices in the object array (I915_EXEC_HANDLE_LUT).  We have to
1782     * do this after we reorder the list above as some of the indices may have
1783     * changed.
1784     */
1785    struct anv_batch_bo **bbo;
1786    if (execbuf->has_relocs) {
1787       assert(num_cmd_buffers == 1);
1788       u_vector_foreach(bbo, &cmd_buffers[0]->seen_bbos)
1789          anv_cmd_buffer_process_relocs(cmd_buffers[0], &(*bbo)->relocs);
1790 
1791       anv_cmd_buffer_process_relocs(cmd_buffers[0], &cmd_buffers[0]->surface_relocs);
1792    }
1793 
1794    if (!device->info.has_llc) {
1795       __builtin_ia32_mfence();
1796       for (uint32_t i = 0; i < num_cmd_buffers; i++) {
1797          u_vector_foreach(bbo, &cmd_buffers[i]->seen_bbos) {
1798             for (uint32_t i = 0; i < (*bbo)->length; i += CACHELINE_SIZE)
1799                __builtin_ia32_clflush((*bbo)->bo->map + i);
1800          }
1801       }
1802    }
1803 
1804    struct anv_batch *batch = &cmd_buffers[0]->batch;
1805    execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1806       .buffers_ptr = (uintptr_t) execbuf->objects,
1807       .buffer_count = execbuf->bo_count,
1808       .batch_start_offset = 0,
1809       /* On platforms that cannot chain batch buffers because of the i915
1810        * command parser, we have to provide the batch length. Everywhere else
1811        * we'll chain batches so no point in passing a length.
1812        */
1813       .batch_len = device->can_chain_batches ? 0 : batch->next - batch->start,
1814       .cliprects_ptr = 0,
1815       .num_cliprects = 0,
1816       .DR1 = 0,
1817       .DR4 = 0,
1818       .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | (no_reloc ? I915_EXEC_NO_RELOC : 0),
1819       .rsvd1 = device->context_id,
1820       .rsvd2 = 0,
1821    };
1822 
1823    return VK_SUCCESS;
1824 }
1825 
1826 static VkResult
setup_empty_execbuf(struct anv_execbuf * execbuf,struct anv_queue * queue)1827 setup_empty_execbuf(struct anv_execbuf *execbuf, struct anv_queue *queue)
1828 {
1829    struct anv_device *device = queue->device;
1830    VkResult result = anv_execbuf_add_bo(device, execbuf,
1831                                         device->trivial_batch_bo,
1832                                         NULL, 0);
1833    if (result != VK_SUCCESS)
1834       return result;
1835 
1836    execbuf->execbuf = (struct drm_i915_gem_execbuffer2) {
1837       .buffers_ptr = (uintptr_t) execbuf->objects,
1838       .buffer_count = execbuf->bo_count,
1839       .batch_start_offset = 0,
1840       .batch_len = 8, /* GFX7_MI_BATCH_BUFFER_END and NOOP */
1841       .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1842       .rsvd1 = device->context_id,
1843       .rsvd2 = 0,
1844    };
1845 
1846    return VK_SUCCESS;
1847 }
1848 
1849 /* We lock around execbuf for three main reasons:
1850  *
1851  *  1) When a block pool is resized, we create a new gem handle with a
1852  *     different size and, in the case of surface states, possibly a different
1853  *     center offset but we re-use the same anv_bo struct when we do so. If
1854  *     this happens in the middle of setting up an execbuf, we could end up
1855  *     with our list of BOs out of sync with our list of gem handles.
1856  *
1857  *  2) The algorithm we use for building the list of unique buffers isn't
1858  *     thread-safe. While the client is supposed to syncronize around
1859  *     QueueSubmit, this would be extremely difficult to debug if it ever came
1860  *     up in the wild due to a broken app. It's better to play it safe and
1861  *     just lock around QueueSubmit.
1862  *
1863  *  3) The anv_cmd_buffer_execbuf function may perform relocations in
1864  *      userspace. Due to the fact that the surface state buffer is shared
1865  *      between batches, we can't afford to have that happen from multiple
1866  *      threads at the same time. Even though the user is supposed to ensure
1867  *      this doesn't happen, we play it safe as in (2) above.
1868  *
1869  * Since the only other things that ever take the device lock such as block
1870  * pool resize only rarely happen, this will almost never be contended so
1871  * taking a lock isn't really an expensive operation in this case.
1872  */
1873 VkResult
anv_queue_execbuf_locked(struct anv_queue * queue,struct anv_queue_submit * submit)1874 anv_queue_execbuf_locked(struct anv_queue *queue,
1875                          struct anv_queue_submit *submit)
1876 {
1877    struct anv_device *device = queue->device;
1878    struct anv_execbuf execbuf;
1879    anv_execbuf_init(&execbuf);
1880    execbuf.alloc = submit->alloc;
1881    execbuf.alloc_scope = submit->alloc_scope;
1882    execbuf.perf_query_pass = submit->perf_query_pass;
1883 
1884    /* Always add the workaround BO as it includes a driver identifier for the
1885     * error_state.
1886     */
1887    VkResult result =
1888       anv_execbuf_add_bo(device, &execbuf, device->workaround_bo, NULL, 0);
1889    if (result != VK_SUCCESS)
1890       goto error;
1891 
1892    for (uint32_t i = 0; i < submit->fence_bo_count; i++) {
1893       int signaled;
1894       struct anv_bo *bo = anv_unpack_ptr(submit->fence_bos[i], 1, &signaled);
1895 
1896       result = anv_execbuf_add_bo(device, &execbuf, bo, NULL,
1897                                   signaled ? EXEC_OBJECT_WRITE : 0);
1898       if (result != VK_SUCCESS)
1899          goto error;
1900    }
1901 
1902    if (submit->cmd_buffer_count) {
1903       result = setup_execbuf_for_cmd_buffers(&execbuf, queue,
1904                                              submit->cmd_buffers,
1905                                              submit->cmd_buffer_count);
1906    } else if (submit->simple_bo) {
1907       result = anv_execbuf_add_bo(device, &execbuf, submit->simple_bo, NULL, 0);
1908       if (result != VK_SUCCESS)
1909          goto error;
1910 
1911       execbuf.execbuf = (struct drm_i915_gem_execbuffer2) {
1912          .buffers_ptr = (uintptr_t) execbuf.objects,
1913          .buffer_count = execbuf.bo_count,
1914          .batch_start_offset = 0,
1915          .batch_len = submit->simple_bo_size,
1916          .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags | I915_EXEC_NO_RELOC,
1917          .rsvd1 = device->context_id,
1918          .rsvd2 = 0,
1919       };
1920    } else {
1921       result = setup_empty_execbuf(&execbuf, queue);
1922    }
1923 
1924    if (result != VK_SUCCESS)
1925       goto error;
1926 
1927    const bool has_perf_query =
1928       submit->perf_query_pass >= 0 &&
1929       submit->cmd_buffer_count &&
1930       submit->perf_query_pool;
1931 
1932    if (INTEL_DEBUG(DEBUG_SUBMIT)) {
1933       fprintf(stderr, "Batch offset=0x%x len=0x%x on queue 0\n",
1934               execbuf.execbuf.batch_start_offset, execbuf.execbuf.batch_len);
1935       for (uint32_t i = 0; i < execbuf.bo_count; i++) {
1936          const struct anv_bo *bo = execbuf.bos[i];
1937 
1938          fprintf(stderr, "   BO: addr=0x%016"PRIx64" size=%010"PRIx64" handle=%05u name=%s\n",
1939                  bo->offset, bo->size, bo->gem_handle, bo->name);
1940       }
1941    }
1942 
1943    if (INTEL_DEBUG(DEBUG_BATCH)) {
1944       fprintf(stderr, "Batch on queue %d\n", (int)(queue - device->queues));
1945       if (submit->cmd_buffer_count) {
1946          if (has_perf_query) {
1947             struct anv_query_pool *query_pool = submit->perf_query_pool;
1948             struct anv_bo *pass_batch_bo = query_pool->bo;
1949             uint64_t pass_batch_offset =
1950                khr_perf_query_preamble_offset(query_pool,
1951                                               submit->perf_query_pass);
1952 
1953             intel_print_batch(&device->decoder_ctx,
1954                               pass_batch_bo->map + pass_batch_offset, 64,
1955                               pass_batch_bo->offset + pass_batch_offset, false);
1956          }
1957 
1958          for (uint32_t i = 0; i < submit->cmd_buffer_count; i++) {
1959             struct anv_batch_bo **bo =
1960                u_vector_tail(&submit->cmd_buffers[i]->seen_bbos);
1961             device->cmd_buffer_being_decoded = submit->cmd_buffers[i];
1962             intel_print_batch(&device->decoder_ctx, (*bo)->bo->map,
1963                               (*bo)->bo->size, (*bo)->bo->offset, false);
1964             device->cmd_buffer_being_decoded = NULL;
1965          }
1966       } else if (submit->simple_bo) {
1967          intel_print_batch(&device->decoder_ctx, submit->simple_bo->map,
1968                            submit->simple_bo->size, submit->simple_bo->offset, false);
1969       } else {
1970          intel_print_batch(&device->decoder_ctx,
1971                            device->trivial_batch_bo->map,
1972                            device->trivial_batch_bo->size,
1973                            device->trivial_batch_bo->offset, false);
1974       }
1975    }
1976 
1977    if (submit->fence_count > 0) {
1978       if (device->has_thread_submit) {
1979          execbuf.timeline_fences.fence_count = submit->fence_count;
1980          execbuf.timeline_fences.handles_ptr = (uintptr_t)submit->fences;
1981          execbuf.timeline_fences.values_ptr = (uintptr_t)submit->fence_values;
1982          anv_execbuf_add_ext(&execbuf,
1983                              DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES,
1984                              &execbuf.timeline_fences.base);
1985       } else {
1986          execbuf.execbuf.flags |= I915_EXEC_FENCE_ARRAY;
1987          execbuf.execbuf.num_cliprects = submit->fence_count;
1988          execbuf.execbuf.cliprects_ptr = (uintptr_t)submit->fences;
1989       }
1990    }
1991 
1992    if (submit->in_fence != -1) {
1993       assert(!device->has_thread_submit);
1994       execbuf.execbuf.flags |= I915_EXEC_FENCE_IN;
1995       execbuf.execbuf.rsvd2 |= (uint32_t)submit->in_fence;
1996    }
1997 
1998    if (submit->need_out_fence) {
1999       assert(!device->has_thread_submit);
2000       execbuf.execbuf.flags |= I915_EXEC_FENCE_OUT;
2001    }
2002 
2003    if (has_perf_query) {
2004       struct anv_query_pool *query_pool = submit->perf_query_pool;
2005       assert(submit->perf_query_pass < query_pool->n_passes);
2006       struct intel_perf_query_info *query_info =
2007          query_pool->pass_query[submit->perf_query_pass];
2008 
2009       /* Some performance queries just the pipeline statistic HW, no need for
2010        * OA in that case, so no need to reconfigure.
2011        */
2012       if (!INTEL_DEBUG(DEBUG_NO_OACONFIG) &&
2013           (query_info->kind == INTEL_PERF_QUERY_TYPE_OA ||
2014            query_info->kind == INTEL_PERF_QUERY_TYPE_RAW)) {
2015          int ret = intel_ioctl(device->perf_fd, I915_PERF_IOCTL_CONFIG,
2016                                (void *)(uintptr_t) query_info->oa_metrics_set_id);
2017          if (ret < 0) {
2018             result = anv_device_set_lost(device,
2019                                          "i915-perf config failed: %s",
2020                                          strerror(errno));
2021          }
2022       }
2023 
2024       struct anv_bo *pass_batch_bo = query_pool->bo;
2025 
2026       struct drm_i915_gem_exec_object2 query_pass_object = {
2027          .handle = pass_batch_bo->gem_handle,
2028          .offset = pass_batch_bo->offset,
2029          .flags  = pass_batch_bo->flags,
2030       };
2031       struct drm_i915_gem_execbuffer2 query_pass_execbuf = {
2032          .buffers_ptr = (uintptr_t) &query_pass_object,
2033          .buffer_count = 1,
2034          .batch_start_offset = khr_perf_query_preamble_offset(query_pool,
2035                                                               submit->perf_query_pass),
2036          .flags = I915_EXEC_HANDLE_LUT | queue->exec_flags,
2037          .rsvd1 = device->context_id,
2038       };
2039 
2040       int ret = queue->device->info.no_hw ? 0 :
2041          anv_gem_execbuffer(queue->device, &query_pass_execbuf);
2042       if (ret)
2043          result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
2044    }
2045 
2046    int ret = queue->device->info.no_hw ? 0 :
2047       anv_gem_execbuffer(queue->device, &execbuf.execbuf);
2048    if (ret)
2049       result = anv_queue_set_lost(queue, "execbuf2 failed: %m");
2050 
2051    struct drm_i915_gem_exec_object2 *objects = execbuf.objects;
2052    for (uint32_t k = 0; k < execbuf.bo_count; k++) {
2053       if (execbuf.bos[k]->flags & EXEC_OBJECT_PINNED)
2054          assert(execbuf.bos[k]->offset == objects[k].offset);
2055       execbuf.bos[k]->offset = objects[k].offset;
2056    }
2057 
2058    if (result == VK_SUCCESS && submit->need_out_fence)
2059       submit->out_fence = execbuf.execbuf.rsvd2 >> 32;
2060 
2061  error:
2062    pthread_cond_broadcast(&device->queue_submit);
2063 
2064    anv_execbuf_finish(&execbuf);
2065 
2066    return result;
2067 }
2068