1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_resource.c
25 *
26 * Resources are images, buffers, and other objects used by the GPU.
27 *
28 * XXX: explain resources
29 */
30
31 #include <stdio.h>
32 #include <errno.h>
33 #include "pipe/p_defines.h"
34 #include "pipe/p_state.h"
35 #include "pipe/p_context.h"
36 #include "pipe/p_screen.h"
37 #include "util/os_memory.h"
38 #include "util/u_cpu_detect.h"
39 #include "util/u_inlines.h"
40 #include "util/format/u_format.h"
41 #include "util/u_memory.h"
42 #include "util/u_threaded_context.h"
43 #include "util/u_transfer.h"
44 #include "util/u_transfer_helper.h"
45 #include "util/u_upload_mgr.h"
46 #include "util/ralloc.h"
47 #include "iris_batch.h"
48 #include "iris_context.h"
49 #include "iris_resource.h"
50 #include "iris_screen.h"
51 #include "intel/common/intel_aux_map.h"
52 #include "intel/dev/intel_debug.h"
53 #include "isl/isl.h"
54 #include "drm-uapi/drm_fourcc.h"
55 #include "drm-uapi/i915_drm.h"
56
57 enum modifier_priority {
58 MODIFIER_PRIORITY_INVALID = 0,
59 MODIFIER_PRIORITY_LINEAR,
60 MODIFIER_PRIORITY_X,
61 MODIFIER_PRIORITY_Y,
62 MODIFIER_PRIORITY_Y_CCS,
63 MODIFIER_PRIORITY_Y_GFX12_RC_CCS,
64 MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC,
65 };
66
67 static const uint64_t priority_to_modifier[] = {
68 [MODIFIER_PRIORITY_INVALID] = DRM_FORMAT_MOD_INVALID,
69 [MODIFIER_PRIORITY_LINEAR] = DRM_FORMAT_MOD_LINEAR,
70 [MODIFIER_PRIORITY_X] = I915_FORMAT_MOD_X_TILED,
71 [MODIFIER_PRIORITY_Y] = I915_FORMAT_MOD_Y_TILED,
72 [MODIFIER_PRIORITY_Y_CCS] = I915_FORMAT_MOD_Y_TILED_CCS,
73 [MODIFIER_PRIORITY_Y_GFX12_RC_CCS] = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
74 [MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC] = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
75 };
76
77 static bool
modifier_is_supported(const struct intel_device_info * devinfo,enum pipe_format pfmt,unsigned bind,uint64_t modifier)78 modifier_is_supported(const struct intel_device_info *devinfo,
79 enum pipe_format pfmt, unsigned bind,
80 uint64_t modifier)
81 {
82 /* Check for basic device support. */
83 switch (modifier) {
84 case DRM_FORMAT_MOD_LINEAR:
85 case I915_FORMAT_MOD_X_TILED:
86 break;
87 case I915_FORMAT_MOD_Y_TILED:
88 if (devinfo->ver <= 8 && (bind & PIPE_BIND_SCANOUT))
89 return false;
90 if (devinfo->verx10 >= 125)
91 return false;
92 break;
93 case I915_FORMAT_MOD_Y_TILED_CCS:
94 if (devinfo->ver <= 8 || devinfo->ver >= 12)
95 return false;
96 break;
97 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
98 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
99 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
100 if (devinfo->verx10 != 120)
101 return false;
102 break;
103 case DRM_FORMAT_MOD_INVALID:
104 default:
105 return false;
106 }
107
108 /* Check remaining requirements. */
109 switch (modifier) {
110 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
111 if (INTEL_DEBUG(DEBUG_NO_CCS))
112 return false;
113
114 if (pfmt != PIPE_FORMAT_BGRA8888_UNORM &&
115 pfmt != PIPE_FORMAT_RGBA8888_UNORM &&
116 pfmt != PIPE_FORMAT_BGRX8888_UNORM &&
117 pfmt != PIPE_FORMAT_RGBX8888_UNORM &&
118 pfmt != PIPE_FORMAT_NV12 &&
119 pfmt != PIPE_FORMAT_P010 &&
120 pfmt != PIPE_FORMAT_P012 &&
121 pfmt != PIPE_FORMAT_P016 &&
122 pfmt != PIPE_FORMAT_YUYV &&
123 pfmt != PIPE_FORMAT_UYVY) {
124 return false;
125 }
126 break;
127 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
128 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
129 case I915_FORMAT_MOD_Y_TILED_CCS: {
130 if (INTEL_DEBUG(DEBUG_NO_CCS))
131 return false;
132
133 enum isl_format rt_format =
134 iris_format_for_usage(devinfo, pfmt,
135 ISL_SURF_USAGE_RENDER_TARGET_BIT).fmt;
136
137 if (rt_format == ISL_FORMAT_UNSUPPORTED ||
138 !isl_format_supports_ccs_e(devinfo, rt_format))
139 return false;
140 break;
141 }
142 default:
143 break;
144 }
145
146 return true;
147 }
148
149 static uint64_t
select_best_modifier(struct intel_device_info * devinfo,const struct pipe_resource * templ,const uint64_t * modifiers,int count)150 select_best_modifier(struct intel_device_info *devinfo,
151 const struct pipe_resource *templ,
152 const uint64_t *modifiers,
153 int count)
154 {
155 enum modifier_priority prio = MODIFIER_PRIORITY_INVALID;
156
157 for (int i = 0; i < count; i++) {
158 if (!modifier_is_supported(devinfo, templ->format, templ->bind,
159 modifiers[i]))
160 continue;
161
162 switch (modifiers[i]) {
163 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
164 prio = MAX2(prio, MODIFIER_PRIORITY_Y_GFX12_RC_CCS_CC);
165 break;
166 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
167 prio = MAX2(prio, MODIFIER_PRIORITY_Y_GFX12_RC_CCS);
168 break;
169 case I915_FORMAT_MOD_Y_TILED_CCS:
170 prio = MAX2(prio, MODIFIER_PRIORITY_Y_CCS);
171 break;
172 case I915_FORMAT_MOD_Y_TILED:
173 prio = MAX2(prio, MODIFIER_PRIORITY_Y);
174 break;
175 case I915_FORMAT_MOD_X_TILED:
176 prio = MAX2(prio, MODIFIER_PRIORITY_X);
177 break;
178 case DRM_FORMAT_MOD_LINEAR:
179 prio = MAX2(prio, MODIFIER_PRIORITY_LINEAR);
180 break;
181 case DRM_FORMAT_MOD_INVALID:
182 default:
183 break;
184 }
185 }
186
187 return priority_to_modifier[prio];
188 }
189
is_modifier_external_only(enum pipe_format pfmt,uint64_t modifier)190 static inline bool is_modifier_external_only(enum pipe_format pfmt,
191 uint64_t modifier)
192 {
193 /* Only allow external usage for the following cases: YUV formats
194 * and the media-compression modifier. The render engine lacks
195 * support for rendering to a media-compressed surface if the
196 * compression ratio is large enough. By requiring external usage
197 * of media-compressed surfaces, resolves are avoided.
198 */
199 return util_format_is_yuv(pfmt) ||
200 modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS;
201 }
202
203 static void
iris_query_dmabuf_modifiers(struct pipe_screen * pscreen,enum pipe_format pfmt,int max,uint64_t * modifiers,unsigned int * external_only,int * count)204 iris_query_dmabuf_modifiers(struct pipe_screen *pscreen,
205 enum pipe_format pfmt,
206 int max,
207 uint64_t *modifiers,
208 unsigned int *external_only,
209 int *count)
210 {
211 struct iris_screen *screen = (void *) pscreen;
212 const struct intel_device_info *devinfo = &screen->devinfo;
213
214 uint64_t all_modifiers[] = {
215 DRM_FORMAT_MOD_LINEAR,
216 I915_FORMAT_MOD_X_TILED,
217 I915_FORMAT_MOD_Y_TILED,
218 I915_FORMAT_MOD_Y_TILED_CCS,
219 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
220 I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
221 I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
222 };
223
224 int supported_mods = 0;
225
226 for (int i = 0; i < ARRAY_SIZE(all_modifiers); i++) {
227 if (!modifier_is_supported(devinfo, pfmt, 0, all_modifiers[i]))
228 continue;
229
230 if (supported_mods < max) {
231 if (modifiers)
232 modifiers[supported_mods] = all_modifiers[i];
233
234 if (external_only) {
235 external_only[supported_mods] =
236 is_modifier_external_only(pfmt, all_modifiers[i]);
237 }
238 }
239
240 supported_mods++;
241 }
242
243 *count = supported_mods;
244 }
245
246 static bool
iris_is_dmabuf_modifier_supported(struct pipe_screen * pscreen,uint64_t modifier,enum pipe_format pfmt,bool * external_only)247 iris_is_dmabuf_modifier_supported(struct pipe_screen *pscreen,
248 uint64_t modifier, enum pipe_format pfmt,
249 bool *external_only)
250 {
251 struct iris_screen *screen = (void *) pscreen;
252 const struct intel_device_info *devinfo = &screen->devinfo;
253
254 if (modifier_is_supported(devinfo, pfmt, 0, modifier)) {
255 if (external_only)
256 *external_only = is_modifier_external_only(pfmt, modifier);
257
258 return true;
259 }
260
261 return false;
262 }
263
264 static unsigned int
iris_get_dmabuf_modifier_planes(struct pipe_screen * pscreen,uint64_t modifier,enum pipe_format format)265 iris_get_dmabuf_modifier_planes(struct pipe_screen *pscreen, uint64_t modifier,
266 enum pipe_format format)
267 {
268 unsigned int planes = util_format_get_num_planes(format);
269
270 switch (modifier) {
271 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
272 return 3;
273 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
274 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
275 case I915_FORMAT_MOD_Y_TILED_CCS:
276 return 2 * planes;
277 default:
278 return planes;
279 }
280 }
281
282 enum isl_format
iris_image_view_get_format(struct iris_context * ice,const struct pipe_image_view * img)283 iris_image_view_get_format(struct iris_context *ice,
284 const struct pipe_image_view *img)
285 {
286 struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
287 const struct intel_device_info *devinfo = &screen->devinfo;
288
289 isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
290 enum isl_format isl_fmt =
291 iris_format_for_usage(devinfo, img->format, usage).fmt;
292
293 if (img->shader_access & PIPE_IMAGE_ACCESS_READ) {
294 /* On Gfx8, try to use typed surfaces reads (which support a
295 * limited number of formats), and if not possible, fall back
296 * to untyped reads.
297 */
298 if (devinfo->ver == 8 &&
299 !isl_has_matching_typed_storage_image_format(devinfo, isl_fmt))
300 return ISL_FORMAT_RAW;
301 else
302 return isl_lower_storage_image_format(devinfo, isl_fmt);
303 }
304
305 return isl_fmt;
306 }
307
308 static struct pipe_memory_object *
iris_memobj_create_from_handle(struct pipe_screen * pscreen,struct winsys_handle * whandle,bool dedicated)309 iris_memobj_create_from_handle(struct pipe_screen *pscreen,
310 struct winsys_handle *whandle,
311 bool dedicated)
312 {
313 struct iris_screen *screen = (struct iris_screen *)pscreen;
314 struct iris_memory_object *memobj = CALLOC_STRUCT(iris_memory_object);
315 struct iris_bo *bo;
316
317 if (!memobj)
318 return NULL;
319
320 switch (whandle->type) {
321 case WINSYS_HANDLE_TYPE_SHARED:
322 bo = iris_bo_gem_create_from_name(screen->bufmgr, "winsys image",
323 whandle->handle);
324 break;
325 case WINSYS_HANDLE_TYPE_FD:
326 bo = iris_bo_import_dmabuf(screen->bufmgr, whandle->handle);
327 break;
328 default:
329 unreachable("invalid winsys handle type");
330 }
331
332 if (!bo) {
333 free(memobj);
334 return NULL;
335 }
336
337 memobj->b.dedicated = dedicated;
338 memobj->bo = bo;
339 memobj->format = whandle->format;
340 memobj->stride = whandle->stride;
341
342 return &memobj->b;
343 }
344
345 static void
iris_memobj_destroy(struct pipe_screen * pscreen,struct pipe_memory_object * pmemobj)346 iris_memobj_destroy(struct pipe_screen *pscreen,
347 struct pipe_memory_object *pmemobj)
348 {
349 struct iris_memory_object *memobj = (struct iris_memory_object *)pmemobj;
350
351 iris_bo_unreference(memobj->bo);
352 free(memobj);
353 }
354
355 struct pipe_resource *
iris_resource_get_separate_stencil(struct pipe_resource * p_res)356 iris_resource_get_separate_stencil(struct pipe_resource *p_res)
357 {
358 /* For packed depth-stencil, we treat depth as the primary resource
359 * and store S8 as the "second plane" resource.
360 */
361 if (p_res->next && p_res->next->format == PIPE_FORMAT_S8_UINT)
362 return p_res->next;
363
364 return NULL;
365
366 }
367
368 static void
iris_resource_set_separate_stencil(struct pipe_resource * p_res,struct pipe_resource * stencil)369 iris_resource_set_separate_stencil(struct pipe_resource *p_res,
370 struct pipe_resource *stencil)
371 {
372 assert(util_format_has_depth(util_format_description(p_res->format)));
373 pipe_resource_reference(&p_res->next, stencil);
374 }
375
376 void
iris_get_depth_stencil_resources(struct pipe_resource * res,struct iris_resource ** out_z,struct iris_resource ** out_s)377 iris_get_depth_stencil_resources(struct pipe_resource *res,
378 struct iris_resource **out_z,
379 struct iris_resource **out_s)
380 {
381 if (!res) {
382 *out_z = NULL;
383 *out_s = NULL;
384 return;
385 }
386
387 if (res->format != PIPE_FORMAT_S8_UINT) {
388 *out_z = (void *) res;
389 *out_s = (void *) iris_resource_get_separate_stencil(res);
390 } else {
391 *out_z = NULL;
392 *out_s = (void *) res;
393 }
394 }
395
396 void
iris_resource_disable_aux(struct iris_resource * res)397 iris_resource_disable_aux(struct iris_resource *res)
398 {
399 iris_bo_unreference(res->aux.bo);
400 iris_bo_unreference(res->aux.clear_color_bo);
401 free(res->aux.state);
402
403 res->aux.usage = ISL_AUX_USAGE_NONE;
404 res->aux.surf.size_B = 0;
405 res->aux.bo = NULL;
406 res->aux.extra_aux.surf.size_B = 0;
407 res->aux.clear_color_bo = NULL;
408 res->aux.state = NULL;
409 }
410
411 static uint32_t
iris_resource_alloc_flags(const struct iris_screen * screen,const struct pipe_resource * templ,enum isl_aux_usage aux_usage)412 iris_resource_alloc_flags(const struct iris_screen *screen,
413 const struct pipe_resource *templ,
414 enum isl_aux_usage aux_usage)
415 {
416 if (templ->flags & IRIS_RESOURCE_FLAG_DEVICE_MEM)
417 return 0;
418
419 uint32_t flags = 0;
420
421 switch (templ->usage) {
422 case PIPE_USAGE_STAGING:
423 flags |= BO_ALLOC_SMEM | BO_ALLOC_COHERENT;
424 break;
425 case PIPE_USAGE_STREAM:
426 flags |= BO_ALLOC_SMEM;
427 break;
428 case PIPE_USAGE_DYNAMIC:
429 case PIPE_USAGE_DEFAULT:
430 case PIPE_USAGE_IMMUTABLE:
431 /* Use LMEM for these if possible */
432 break;
433 }
434
435 if (templ->bind & PIPE_BIND_SCANOUT)
436 flags |= BO_ALLOC_SCANOUT;
437
438 if (templ->flags & (PIPE_RESOURCE_FLAG_MAP_COHERENT |
439 PIPE_RESOURCE_FLAG_MAP_PERSISTENT))
440 flags |= BO_ALLOC_SMEM;
441
442 if (screen->devinfo.verx10 >= 125 && isl_aux_usage_has_ccs(aux_usage))
443 flags |= BO_ALLOC_LMEM;
444
445 if ((templ->bind & PIPE_BIND_SHARED) ||
446 util_format_get_num_planes(templ->format) > 1)
447 flags |= BO_ALLOC_NO_SUBALLOC;
448
449 return flags;
450 }
451
452 static void
iris_resource_destroy(struct pipe_screen * screen,struct pipe_resource * p_res)453 iris_resource_destroy(struct pipe_screen *screen,
454 struct pipe_resource *p_res)
455 {
456 struct iris_resource *res = (struct iris_resource *) p_res;
457
458 if (p_res->target == PIPE_BUFFER)
459 util_range_destroy(&res->valid_buffer_range);
460
461 iris_resource_disable_aux(res);
462
463 threaded_resource_deinit(p_res);
464 iris_bo_unreference(res->bo);
465 iris_pscreen_unref(res->orig_screen);
466
467 free(res);
468 }
469
470 static struct iris_resource *
iris_alloc_resource(struct pipe_screen * pscreen,const struct pipe_resource * templ)471 iris_alloc_resource(struct pipe_screen *pscreen,
472 const struct pipe_resource *templ)
473 {
474 struct iris_resource *res = calloc(1, sizeof(struct iris_resource));
475 if (!res)
476 return NULL;
477
478 res->base.b = *templ;
479 res->base.b.screen = pscreen;
480 res->orig_screen = iris_pscreen_ref(pscreen);
481 pipe_reference_init(&res->base.b.reference, 1);
482 threaded_resource_init(&res->base.b, false);
483
484 if (templ->target == PIPE_BUFFER)
485 util_range_init(&res->valid_buffer_range);
486
487 return res;
488 }
489
490 unsigned
iris_get_num_logical_layers(const struct iris_resource * res,unsigned level)491 iris_get_num_logical_layers(const struct iris_resource *res, unsigned level)
492 {
493 if (res->surf.dim == ISL_SURF_DIM_3D)
494 return u_minify(res->surf.logical_level0_px.depth, level);
495 else
496 return res->surf.logical_level0_px.array_len;
497 }
498
499 static enum isl_aux_state **
create_aux_state_map(struct iris_resource * res,enum isl_aux_state initial)500 create_aux_state_map(struct iris_resource *res, enum isl_aux_state initial)
501 {
502 assert(res->aux.state == NULL);
503
504 uint32_t total_slices = 0;
505 for (uint32_t level = 0; level < res->surf.levels; level++)
506 total_slices += iris_get_num_logical_layers(res, level);
507
508 const size_t per_level_array_size =
509 res->surf.levels * sizeof(enum isl_aux_state *);
510
511 /* We're going to allocate a single chunk of data for both the per-level
512 * reference array and the arrays of aux_state. This makes cleanup
513 * significantly easier.
514 */
515 const size_t total_size =
516 per_level_array_size + total_slices * sizeof(enum isl_aux_state);
517
518 void *data = malloc(total_size);
519 if (!data)
520 return NULL;
521
522 enum isl_aux_state **per_level_arr = data;
523 enum isl_aux_state *s = data + per_level_array_size;
524 for (uint32_t level = 0; level < res->surf.levels; level++) {
525 per_level_arr[level] = s;
526 const unsigned level_layers = iris_get_num_logical_layers(res, level);
527 for (uint32_t a = 0; a < level_layers; a++)
528 *(s++) = initial;
529 }
530 assert((void *)s == data + total_size);
531
532 return per_level_arr;
533 }
534
535 static unsigned
iris_get_aux_clear_color_state_size(struct iris_screen * screen,struct iris_resource * res)536 iris_get_aux_clear_color_state_size(struct iris_screen *screen,
537 struct iris_resource *res)
538 {
539 if (!isl_aux_usage_has_fast_clears(res->aux.usage))
540 return 0;
541
542 assert(!isl_surf_usage_is_stencil(res->surf.usage));
543
544 /* Depth packets can't specify indirect clear values. The only time depth
545 * buffers can use indirect clear values is when they're accessed by the
546 * sampler via render surface state objects.
547 */
548 if (isl_surf_usage_is_depth(res->surf.usage) &&
549 !iris_sample_with_depth_aux(&screen->devinfo, res))
550 return 0;
551
552 return screen->isl_dev.ss.clear_color_state_size;
553 }
554
555 static void
map_aux_addresses(struct iris_screen * screen,struct iris_resource * res,enum pipe_format pfmt,unsigned plane)556 map_aux_addresses(struct iris_screen *screen, struct iris_resource *res,
557 enum pipe_format pfmt, unsigned plane)
558 {
559 void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
560 if (!aux_map_ctx)
561 return;
562
563 if (isl_aux_usage_has_ccs(res->aux.usage)) {
564 const unsigned aux_offset = res->aux.extra_aux.surf.size_B > 0 ?
565 res->aux.extra_aux.offset : res->aux.offset;
566 const enum isl_format format =
567 iris_format_for_usage(&screen->devinfo, pfmt, res->surf.usage).fmt;
568 const uint64_t format_bits =
569 intel_aux_map_format_bits(res->surf.tiling, format, plane);
570 intel_aux_map_add_mapping(aux_map_ctx, res->bo->address + res->offset,
571 res->aux.bo->address + aux_offset,
572 res->surf.size_B, format_bits);
573 res->bo->aux_map_address = res->aux.bo->address;
574 }
575 }
576
577 static bool
want_ccs_e_for_format(const struct intel_device_info * devinfo,enum isl_format format)578 want_ccs_e_for_format(const struct intel_device_info *devinfo,
579 enum isl_format format)
580 {
581 if (!isl_format_supports_ccs_e(devinfo, format))
582 return false;
583
584 const struct isl_format_layout *fmtl = isl_format_get_layout(format);
585
586 /* Prior to TGL, CCS_E seems to significantly hurt performance with 32-bit
587 * floating point formats. For example, Paraview's "Wavelet Volume" case
588 * uses both R32_FLOAT and R32G32B32A32_FLOAT, and enabling CCS_E for those
589 * formats causes a 62% FPS drop.
590 *
591 * However, many benchmarks seem to use 16-bit float with no issues.
592 */
593 if (devinfo->ver <= 11 &&
594 fmtl->channels.r.bits == 32 && fmtl->channels.r.type == ISL_SFLOAT)
595 return false;
596
597 return true;
598 }
599
600 static enum isl_surf_dim
target_to_isl_surf_dim(enum pipe_texture_target target)601 target_to_isl_surf_dim(enum pipe_texture_target target)
602 {
603 switch (target) {
604 case PIPE_BUFFER:
605 case PIPE_TEXTURE_1D:
606 case PIPE_TEXTURE_1D_ARRAY:
607 return ISL_SURF_DIM_1D;
608 case PIPE_TEXTURE_2D:
609 case PIPE_TEXTURE_CUBE:
610 case PIPE_TEXTURE_RECT:
611 case PIPE_TEXTURE_2D_ARRAY:
612 case PIPE_TEXTURE_CUBE_ARRAY:
613 return ISL_SURF_DIM_2D;
614 case PIPE_TEXTURE_3D:
615 return ISL_SURF_DIM_3D;
616 case PIPE_MAX_TEXTURE_TYPES:
617 break;
618 }
619 unreachable("invalid texture type");
620 }
621
622 static bool
iris_resource_configure_main(const struct iris_screen * screen,struct iris_resource * res,const struct pipe_resource * templ,uint64_t modifier,uint32_t row_pitch_B)623 iris_resource_configure_main(const struct iris_screen *screen,
624 struct iris_resource *res,
625 const struct pipe_resource *templ,
626 uint64_t modifier, uint32_t row_pitch_B)
627 {
628 res->mod_info = isl_drm_modifier_get_info(modifier);
629
630 if (modifier != DRM_FORMAT_MOD_INVALID && res->mod_info == NULL)
631 return false;
632
633 isl_tiling_flags_t tiling_flags = 0;
634
635 if (res->mod_info != NULL) {
636 tiling_flags = 1 << res->mod_info->tiling;
637 } else if (templ->usage == PIPE_USAGE_STAGING ||
638 templ->bind & (PIPE_BIND_LINEAR | PIPE_BIND_CURSOR)) {
639 tiling_flags = ISL_TILING_LINEAR_BIT;
640 } else if (templ->bind & PIPE_BIND_SCANOUT) {
641 tiling_flags = screen->devinfo.has_tiling_uapi ?
642 ISL_TILING_X_BIT : ISL_TILING_LINEAR_BIT;
643 } else {
644 tiling_flags = ISL_TILING_ANY_MASK;
645 }
646
647 isl_surf_usage_flags_t usage = 0;
648
649 if (res->mod_info && res->mod_info->aux_usage == ISL_AUX_USAGE_NONE)
650 usage |= ISL_SURF_USAGE_DISABLE_AUX_BIT;
651
652 if (templ->usage == PIPE_USAGE_STAGING)
653 usage |= ISL_SURF_USAGE_STAGING_BIT;
654
655 if (templ->bind & PIPE_BIND_RENDER_TARGET)
656 usage |= ISL_SURF_USAGE_RENDER_TARGET_BIT;
657
658 if (templ->bind & PIPE_BIND_SAMPLER_VIEW)
659 usage |= ISL_SURF_USAGE_TEXTURE_BIT;
660
661 if (templ->bind & PIPE_BIND_SHADER_IMAGE)
662 usage |= ISL_SURF_USAGE_STORAGE_BIT;
663
664 if (templ->bind & PIPE_BIND_SCANOUT)
665 usage |= ISL_SURF_USAGE_DISPLAY_BIT;
666
667 if (templ->target == PIPE_TEXTURE_CUBE ||
668 templ->target == PIPE_TEXTURE_CUBE_ARRAY) {
669 usage |= ISL_SURF_USAGE_CUBE_BIT;
670 }
671
672 if (templ->usage != PIPE_USAGE_STAGING &&
673 util_format_is_depth_or_stencil(templ->format)) {
674
675 /* Should be handled by u_transfer_helper */
676 assert(!util_format_is_depth_and_stencil(templ->format));
677
678 usage |= templ->format == PIPE_FORMAT_S8_UINT ?
679 ISL_SURF_USAGE_STENCIL_BIT : ISL_SURF_USAGE_DEPTH_BIT;
680 }
681
682 const enum isl_format format =
683 iris_format_for_usage(&screen->devinfo, templ->format, usage).fmt;
684
685 const struct isl_surf_init_info init_info = {
686 .dim = target_to_isl_surf_dim(templ->target),
687 .format = format,
688 .width = templ->width0,
689 .height = templ->height0,
690 .depth = templ->depth0,
691 .levels = templ->last_level + 1,
692 .array_len = templ->array_size,
693 .samples = MAX2(templ->nr_samples, 1),
694 .min_alignment_B = 0,
695 .row_pitch_B = row_pitch_B,
696 .usage = usage,
697 .tiling_flags = tiling_flags
698 };
699
700 if (!isl_surf_init_s(&screen->isl_dev, &res->surf, &init_info))
701 return false;
702
703 res->internal_format = templ->format;
704
705 return true;
706 }
707
708 static bool
iris_get_ccs_surf_or_support(const struct isl_device * dev,const struct isl_surf * surf,struct isl_surf * aux_surf,struct isl_surf * extra_aux_surf)709 iris_get_ccs_surf_or_support(const struct isl_device *dev,
710 const struct isl_surf *surf,
711 struct isl_surf *aux_surf,
712 struct isl_surf *extra_aux_surf)
713 {
714 assert(extra_aux_surf->size_B == 0);
715
716 struct isl_surf *ccs_surf;
717 const struct isl_surf *hiz_or_mcs_surf;
718 if (aux_surf->size_B > 0) {
719 assert(aux_surf->usage & (ISL_SURF_USAGE_HIZ_BIT |
720 ISL_SURF_USAGE_MCS_BIT));
721 hiz_or_mcs_surf = aux_surf;
722 ccs_surf = extra_aux_surf;
723 } else {
724 hiz_or_mcs_surf = NULL;
725 ccs_surf = aux_surf;
726 }
727
728 if (dev->info->verx10 >= 125) {
729 /* CCS doesn't require VMA on XeHP. So, instead of creating a separate
730 * surface, we can just return whether CCS is supported for the given
731 * input surfaces.
732 */
733 return isl_surf_supports_ccs(dev, surf, hiz_or_mcs_surf);
734 } else {
735 return isl_surf_get_ccs_surf(dev, surf, hiz_or_mcs_surf, ccs_surf, 0);
736 }
737 }
738
739 /**
740 * Configure aux for the resource, but don't allocate it. For images which
741 * might be shared with modifiers, we must allocate the image and aux data in
742 * a single bo.
743 *
744 * Returns false on unexpected error (e.g. allocation failed, or invalid
745 * configuration result).
746 */
747 static bool
iris_resource_configure_aux(struct iris_screen * screen,struct iris_resource * res,bool imported)748 iris_resource_configure_aux(struct iris_screen *screen,
749 struct iris_resource *res, bool imported)
750 {
751 const struct intel_device_info *devinfo = &screen->devinfo;
752
753 const bool has_mcs =
754 isl_surf_get_mcs_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
755
756 const bool has_hiz = !INTEL_DEBUG(DEBUG_NO_HIZ) &&
757 isl_surf_get_hiz_surf(&screen->isl_dev, &res->surf, &res->aux.surf);
758
759 const bool has_ccs = !INTEL_DEBUG(DEBUG_NO_CCS) &&
760 iris_get_ccs_surf_or_support(&screen->isl_dev, &res->surf,
761 &res->aux.surf, &res->aux.extra_aux.surf);
762
763 if (has_mcs) {
764 assert(!res->mod_info);
765 assert(!has_hiz);
766 if (has_ccs) {
767 res->aux.usage = ISL_AUX_USAGE_MCS_CCS;
768 } else {
769 res->aux.usage = ISL_AUX_USAGE_MCS;
770 }
771 } else if (has_hiz) {
772 assert(!res->mod_info);
773 assert(!has_mcs);
774 if (!has_ccs) {
775 res->aux.usage = ISL_AUX_USAGE_HIZ;
776 } else if (res->surf.samples == 1 &&
777 (res->surf.usage & ISL_SURF_USAGE_TEXTURE_BIT)) {
778 /* If this resource is single-sampled and will be used as a texture,
779 * put the HiZ surface in write-through mode so that we can sample
780 * from it.
781 */
782 res->aux.usage = ISL_AUX_USAGE_HIZ_CCS_WT;
783 } else {
784 res->aux.usage = ISL_AUX_USAGE_HIZ_CCS;
785 }
786 } else if (has_ccs) {
787 if (res->mod_info) {
788 res->aux.usage = res->mod_info->aux_usage;
789 } else if (isl_surf_usage_is_stencil(res->surf.usage)) {
790 res->aux.usage = ISL_AUX_USAGE_STC_CCS;
791 } else if (want_ccs_e_for_format(devinfo, res->surf.format)) {
792 res->aux.usage = devinfo->ver < 12 ?
793 ISL_AUX_USAGE_CCS_E : ISL_AUX_USAGE_GFX12_CCS_E;
794 } else {
795 assert(isl_format_supports_ccs_d(devinfo, res->surf.format));
796 res->aux.usage = ISL_AUX_USAGE_CCS_D;
797 }
798 }
799
800 enum isl_aux_state initial_state;
801 switch (res->aux.usage) {
802 case ISL_AUX_USAGE_NONE:
803 /* Having no aux buffer is only okay if there's no modifier with aux. */
804 return !res->mod_info || res->mod_info->aux_usage == ISL_AUX_USAGE_NONE;
805 case ISL_AUX_USAGE_HIZ:
806 case ISL_AUX_USAGE_HIZ_CCS:
807 case ISL_AUX_USAGE_HIZ_CCS_WT:
808 initial_state = ISL_AUX_STATE_AUX_INVALID;
809 break;
810 case ISL_AUX_USAGE_MCS:
811 case ISL_AUX_USAGE_MCS_CCS:
812 /* The Ivybridge PRM, Vol 2 Part 1 p326 says:
813 *
814 * "When MCS buffer is enabled and bound to MSRT, it is required
815 * that it is cleared prior to any rendering."
816 *
817 * Since we only use the MCS buffer for rendering, we just clear it
818 * immediately on allocation. The clear value for MCS buffers is all
819 * 1's, so we simply memset it to 0xff.
820 */
821 initial_state = ISL_AUX_STATE_CLEAR;
822 break;
823 case ISL_AUX_USAGE_CCS_D:
824 case ISL_AUX_USAGE_CCS_E:
825 case ISL_AUX_USAGE_GFX12_CCS_E:
826 case ISL_AUX_USAGE_STC_CCS:
827 case ISL_AUX_USAGE_MC:
828 if (imported) {
829 assert(res->aux.usage != ISL_AUX_USAGE_STC_CCS);
830 initial_state =
831 isl_drm_modifier_get_default_aux_state(res->mod_info->modifier);
832 } else if (devinfo->verx10 >= 125) {
833 assert(res->aux.surf.size_B == 0);
834 /* From Bspec 47709, "MCS/CCS Buffers for Render Target(s)":
835 *
836 * "CCS surface does not require initialization. Illegal CCS
837 * [values] are treated as uncompressed memory."
838 *
839 * Even if we wanted to, we can't initialize the CCS via CPU map. So,
840 * we choose an aux state which describes the current state and helps
841 * avoid ambiguating (something not currently supported for STC_CCS).
842 */
843 assert(isl_aux_usage_has_compression(res->aux.usage));
844 initial_state = isl_aux_usage_has_fast_clears(res->aux.usage) ?
845 ISL_AUX_STATE_COMPRESSED_CLEAR :
846 ISL_AUX_STATE_COMPRESSED_NO_CLEAR;
847 } else {
848 assert(res->aux.surf.size_B > 0);
849 /* When CCS is used, we need to ensure that it starts off in a valid
850 * state. From the Sky Lake PRM, "MCS Buffer for Render Target(s)":
851 *
852 * "If Software wants to enable Color Compression without Fast
853 * clear, Software needs to initialize MCS with zeros."
854 *
855 * A CCS surface initialized to zero is in the pass-through state.
856 * This state can avoid the need to ambiguate in some cases. We'll
857 * map and zero the CCS later on in iris_resource_init_aux_buf.
858 */
859 initial_state = ISL_AUX_STATE_PASS_THROUGH;
860 }
861 break;
862 default:
863 unreachable("Unsupported aux mode");
864 }
865
866 /* Create the aux_state for the auxiliary buffer. */
867 res->aux.state = create_aux_state_map(res, initial_state);
868 if (!res->aux.state)
869 return false;
870
871 return true;
872 }
873
874 /**
875 * Initialize the aux buffer contents.
876 *
877 * Returns false on unexpected error (e.g. mapping a BO failed).
878 */
879 static bool
iris_resource_init_aux_buf(struct iris_screen * screen,struct iris_resource * res)880 iris_resource_init_aux_buf(struct iris_screen *screen,
881 struct iris_resource *res)
882 {
883 void *map = iris_bo_map(NULL, res->bo, MAP_WRITE | MAP_RAW);
884
885 if (!map)
886 return false;
887
888 if (iris_resource_get_aux_state(res, 0, 0) != ISL_AUX_STATE_AUX_INVALID) {
889 /* See iris_resource_configure_aux for the memset_value rationale. */
890 uint8_t memset_value = isl_aux_usage_has_mcs(res->aux.usage) ? 0xFF : 0;
891 memset((char*)map + res->aux.offset, memset_value,
892 res->aux.surf.size_B);
893 }
894
895 memset((char*)map + res->aux.extra_aux.offset,
896 0, res->aux.extra_aux.surf.size_B);
897
898 /* Zero the indirect clear color to match ::fast_clear_color. */
899 memset((char *)map + res->aux.clear_color_offset, 0,
900 iris_get_aux_clear_color_state_size(screen, res));
901
902 iris_bo_unmap(res->bo);
903
904 if (res->aux.surf.size_B > 0) {
905 res->aux.bo = res->bo;
906 iris_bo_reference(res->aux.bo);
907 map_aux_addresses(screen, res, res->internal_format, 0);
908 }
909
910 if (iris_get_aux_clear_color_state_size(screen, res) > 0) {
911 res->aux.clear_color_bo = res->bo;
912 iris_bo_reference(res->aux.clear_color_bo);
913 }
914
915 return true;
916 }
917
918 static void
import_aux_info(struct iris_resource * res,const struct iris_resource * aux_res)919 import_aux_info(struct iris_resource *res,
920 const struct iris_resource *aux_res)
921 {
922 assert(aux_res->aux.surf.row_pitch_B && aux_res->aux.offset);
923 assert(res->bo == aux_res->aux.bo);
924 assert(res->aux.surf.row_pitch_B == aux_res->aux.surf.row_pitch_B);
925 assert(res->bo->size >= aux_res->aux.offset + res->aux.surf.size_B);
926
927 iris_bo_reference(aux_res->aux.bo);
928 res->aux.bo = aux_res->aux.bo;
929 res->aux.offset = aux_res->aux.offset;
930 }
931
932 static void
iris_resource_finish_aux_import(struct pipe_screen * pscreen,struct iris_resource * res)933 iris_resource_finish_aux_import(struct pipe_screen *pscreen,
934 struct iris_resource *res)
935 {
936 struct iris_screen *screen = (struct iris_screen *)pscreen;
937
938 /* Create an array of resources. Combining main and aux planes is easier
939 * with indexing as opposed to scanning the linked list.
940 */
941 struct iris_resource *r[4] = { NULL, };
942 unsigned num_planes = 0;
943 unsigned num_main_planes = 0;
944 for (struct pipe_resource *p_res = &res->base.b; p_res; p_res = p_res->next) {
945 r[num_planes] = (struct iris_resource *)p_res;
946 num_main_planes += r[num_planes++]->bo != NULL;
947 }
948
949 /* Combine main and aux plane information. */
950 switch (res->mod_info->modifier) {
951 case I915_FORMAT_MOD_Y_TILED_CCS:
952 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
953 assert(num_main_planes == 1 && num_planes == 2);
954 import_aux_info(r[0], r[1]);
955 map_aux_addresses(screen, r[0], res->external_format, 0);
956
957 /* Add on a clear color BO.
958 *
959 * Also add some padding to make sure the fast clear color state buffer
960 * starts at a 4K alignment to avoid some unknown issues. See the
961 * matching comment in iris_resource_create_with_modifiers().
962 */
963 if (iris_get_aux_clear_color_state_size(screen, res) > 0) {
964 res->aux.clear_color_bo =
965 iris_bo_alloc(screen->bufmgr, "clear color_buffer",
966 iris_get_aux_clear_color_state_size(screen, res),
967 4096, IRIS_MEMZONE_OTHER, BO_ALLOC_ZEROED);
968 }
969 break;
970 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
971 assert(num_main_planes == 1 && num_planes == 3);
972 import_aux_info(r[0], r[1]);
973 map_aux_addresses(screen, r[0], res->external_format, 0);
974
975 /* Import the clear color BO. */
976 iris_bo_reference(r[2]->aux.clear_color_bo);
977 r[0]->aux.clear_color_bo = r[2]->aux.clear_color_bo;
978 r[0]->aux.clear_color_offset = r[2]->aux.clear_color_offset;
979 r[0]->aux.clear_color_unknown = true;
980 break;
981 case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
982 if (num_main_planes == 1 && num_planes == 2) {
983 import_aux_info(r[0], r[1]);
984 map_aux_addresses(screen, r[0], res->external_format, 0);
985 } else {
986 assert(num_main_planes == 2 && num_planes == 4);
987 import_aux_info(r[0], r[2]);
988 import_aux_info(r[1], r[3]);
989 map_aux_addresses(screen, r[0], res->external_format, 0);
990 map_aux_addresses(screen, r[1], res->external_format, 1);
991 }
992 assert(!isl_aux_usage_has_fast_clears(res->mod_info->aux_usage));
993 break;
994 default:
995 assert(res->mod_info->aux_usage == ISL_AUX_USAGE_NONE);
996 break;
997 }
998 }
999
1000 static struct pipe_resource *
iris_resource_create_for_buffer(struct pipe_screen * pscreen,const struct pipe_resource * templ)1001 iris_resource_create_for_buffer(struct pipe_screen *pscreen,
1002 const struct pipe_resource *templ)
1003 {
1004 struct iris_screen *screen = (struct iris_screen *)pscreen;
1005 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1006
1007 assert(templ->target == PIPE_BUFFER);
1008 assert(templ->height0 <= 1);
1009 assert(templ->depth0 <= 1);
1010 assert(templ->format == PIPE_FORMAT_NONE ||
1011 util_format_get_blocksize(templ->format) == 1);
1012
1013 res->internal_format = templ->format;
1014 res->surf.tiling = ISL_TILING_LINEAR;
1015
1016 enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
1017 const char *name = templ->target == PIPE_BUFFER ? "buffer" : "miptree";
1018 if (templ->flags & IRIS_RESOURCE_FLAG_SHADER_MEMZONE) {
1019 memzone = IRIS_MEMZONE_SHADER;
1020 name = "shader kernels";
1021 } else if (templ->flags & IRIS_RESOURCE_FLAG_SURFACE_MEMZONE) {
1022 memzone = IRIS_MEMZONE_SURFACE;
1023 name = "surface state";
1024 } else if (templ->flags & IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE) {
1025 memzone = IRIS_MEMZONE_DYNAMIC;
1026 name = "dynamic state";
1027 } else if (templ->flags & IRIS_RESOURCE_FLAG_BINDLESS_MEMZONE) {
1028 memzone = IRIS_MEMZONE_BINDLESS;
1029 name = "bindless surface state";
1030 }
1031
1032 unsigned flags = iris_resource_alloc_flags(screen, templ, res->aux.usage);
1033
1034 res->bo =
1035 iris_bo_alloc(screen->bufmgr, name, templ->width0, 1, memzone, flags);
1036
1037 if (!res->bo) {
1038 iris_resource_destroy(pscreen, &res->base.b);
1039 return NULL;
1040 }
1041
1042 if (templ->bind & PIPE_BIND_SHARED) {
1043 iris_bo_mark_exported(res->bo);
1044 res->base.is_shared = true;
1045 }
1046
1047 return &res->base.b;
1048 }
1049
1050 static struct pipe_resource *
iris_resource_create_with_modifiers(struct pipe_screen * pscreen,const struct pipe_resource * templ,const uint64_t * modifiers,int modifiers_count)1051 iris_resource_create_with_modifiers(struct pipe_screen *pscreen,
1052 const struct pipe_resource *templ,
1053 const uint64_t *modifiers,
1054 int modifiers_count)
1055 {
1056 struct iris_screen *screen = (struct iris_screen *)pscreen;
1057 struct intel_device_info *devinfo = &screen->devinfo;
1058 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1059
1060 if (!res)
1061 return NULL;
1062
1063 uint64_t modifier =
1064 select_best_modifier(devinfo, templ, modifiers, modifiers_count);
1065
1066 if (modifier == DRM_FORMAT_MOD_INVALID && modifiers_count > 0) {
1067 fprintf(stderr, "Unsupported modifier, resource creation failed.\n");
1068 goto fail;
1069 }
1070
1071 UNUSED const bool isl_surf_created_successfully =
1072 iris_resource_configure_main(screen, res, templ, modifier, 0);
1073 assert(isl_surf_created_successfully);
1074
1075 if (!iris_resource_configure_aux(screen, res, false))
1076 goto fail;
1077
1078 const char *name = "miptree";
1079 enum iris_memory_zone memzone = IRIS_MEMZONE_OTHER;
1080
1081 unsigned flags = iris_resource_alloc_flags(screen, templ, res->aux.usage);
1082
1083 /* These are for u_upload_mgr buffers only */
1084 assert(!(templ->flags & (IRIS_RESOURCE_FLAG_SHADER_MEMZONE |
1085 IRIS_RESOURCE_FLAG_SURFACE_MEMZONE |
1086 IRIS_RESOURCE_FLAG_DYNAMIC_MEMZONE |
1087 IRIS_RESOURCE_FLAG_BINDLESS_MEMZONE)));
1088
1089 /* Modifiers require the aux data to be in the same buffer as the main
1090 * surface, but we combine them even when a modifier is not being used.
1091 */
1092 uint64_t bo_size = res->surf.size_B;
1093
1094 /* Allocate space for the aux buffer. */
1095 if (res->aux.surf.size_B > 0) {
1096 res->aux.offset = ALIGN(bo_size, res->aux.surf.alignment_B);
1097 bo_size = res->aux.offset + res->aux.surf.size_B;
1098 }
1099
1100 /* Allocate space for the extra aux buffer. */
1101 if (res->aux.extra_aux.surf.size_B > 0) {
1102 res->aux.extra_aux.offset =
1103 ALIGN(bo_size, res->aux.extra_aux.surf.alignment_B);
1104 bo_size = res->aux.extra_aux.offset + res->aux.extra_aux.surf.size_B;
1105 }
1106
1107 /* Allocate space for the indirect clear color.
1108 *
1109 * Also add some padding to make sure the fast clear color state buffer
1110 * starts at a 4K alignment. We believe that 256B might be enough, but due
1111 * to lack of testing we will leave this as 4K for now.
1112 */
1113 if (iris_get_aux_clear_color_state_size(screen, res) > 0) {
1114 res->aux.clear_color_offset = ALIGN(bo_size, 4096);
1115 bo_size = res->aux.clear_color_offset +
1116 iris_get_aux_clear_color_state_size(screen, res);
1117 }
1118
1119 uint32_t alignment = MAX2(4096, res->surf.alignment_B);
1120 res->bo =
1121 iris_bo_alloc(screen->bufmgr, name, bo_size, alignment, memzone, flags);
1122
1123 if (!res->bo)
1124 goto fail;
1125
1126 if (res->aux.usage != ISL_AUX_USAGE_NONE &&
1127 !iris_resource_init_aux_buf(screen, res))
1128 goto fail;
1129
1130 if (templ->bind & PIPE_BIND_SHARED) {
1131 iris_bo_mark_exported(res->bo);
1132 res->base.is_shared = true;
1133 }
1134
1135 return &res->base.b;
1136
1137 fail:
1138 fprintf(stderr, "XXX: resource creation failed\n");
1139 iris_resource_destroy(pscreen, &res->base.b);
1140 return NULL;
1141 }
1142
1143 static struct pipe_resource *
iris_resource_create(struct pipe_screen * pscreen,const struct pipe_resource * templ)1144 iris_resource_create(struct pipe_screen *pscreen,
1145 const struct pipe_resource *templ)
1146 {
1147 if (templ->target == PIPE_BUFFER)
1148 return iris_resource_create_for_buffer(pscreen, templ);
1149 else
1150 return iris_resource_create_with_modifiers(pscreen, templ, NULL, 0);
1151 }
1152
1153 static uint64_t
tiling_to_modifier(uint32_t tiling)1154 tiling_to_modifier(uint32_t tiling)
1155 {
1156 static const uint64_t map[] = {
1157 [I915_TILING_NONE] = DRM_FORMAT_MOD_LINEAR,
1158 [I915_TILING_X] = I915_FORMAT_MOD_X_TILED,
1159 [I915_TILING_Y] = I915_FORMAT_MOD_Y_TILED,
1160 };
1161
1162 assert(tiling < ARRAY_SIZE(map));
1163
1164 return map[tiling];
1165 }
1166
1167 static struct pipe_resource *
iris_resource_from_user_memory(struct pipe_screen * pscreen,const struct pipe_resource * templ,void * user_memory)1168 iris_resource_from_user_memory(struct pipe_screen *pscreen,
1169 const struct pipe_resource *templ,
1170 void *user_memory)
1171 {
1172 struct iris_screen *screen = (struct iris_screen *)pscreen;
1173 struct iris_bufmgr *bufmgr = screen->bufmgr;
1174 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1175 if (!res)
1176 return NULL;
1177
1178 assert(templ->target == PIPE_BUFFER);
1179
1180 res->internal_format = templ->format;
1181 res->base.is_user_ptr = true;
1182 res->bo = iris_bo_create_userptr(bufmgr, "user",
1183 user_memory, templ->width0,
1184 IRIS_MEMZONE_OTHER);
1185 if (!res->bo) {
1186 iris_resource_destroy(pscreen, &res->base.b);
1187 return NULL;
1188 }
1189
1190 util_range_add(&res->base.b, &res->valid_buffer_range, 0, templ->width0);
1191
1192 return &res->base.b;
1193 }
1194
1195 static bool
mod_plane_is_clear_color(uint64_t modifier,uint32_t plane)1196 mod_plane_is_clear_color(uint64_t modifier, uint32_t plane)
1197 {
1198 ASSERTED const struct isl_drm_modifier_info *mod_info =
1199 isl_drm_modifier_get_info(modifier);
1200 assert(mod_info);
1201
1202 switch (modifier) {
1203 case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1204 assert(mod_info->supports_clear_color);
1205 return plane == 2;
1206 default:
1207 assert(!mod_info->supports_clear_color);
1208 return false;
1209 }
1210 }
1211
1212 static unsigned
get_num_planes(const struct pipe_resource * resource)1213 get_num_planes(const struct pipe_resource *resource)
1214 {
1215 unsigned count = 0;
1216 for (const struct pipe_resource *cur = resource; cur; cur = cur->next)
1217 count++;
1218
1219 return count;
1220 }
1221
1222 static struct pipe_resource *
iris_resource_from_handle(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct winsys_handle * whandle,unsigned usage)1223 iris_resource_from_handle(struct pipe_screen *pscreen,
1224 const struct pipe_resource *templ,
1225 struct winsys_handle *whandle,
1226 unsigned usage)
1227 {
1228 assert(templ->target != PIPE_BUFFER);
1229
1230 struct iris_screen *screen = (struct iris_screen *)pscreen;
1231 struct iris_bufmgr *bufmgr = screen->bufmgr;
1232 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1233 if (!res)
1234 return NULL;
1235
1236 switch (whandle->type) {
1237 case WINSYS_HANDLE_TYPE_FD:
1238 res->bo = iris_bo_import_dmabuf(bufmgr, whandle->handle);
1239 break;
1240 case WINSYS_HANDLE_TYPE_SHARED:
1241 res->bo = iris_bo_gem_create_from_name(bufmgr, "winsys image",
1242 whandle->handle);
1243 break;
1244 default:
1245 unreachable("invalid winsys handle type");
1246 }
1247 if (!res->bo)
1248 goto fail;
1249
1250 res->offset = whandle->offset;
1251 res->external_format = whandle->format;
1252
1253 /* Create a surface for each plane specified by the external format. */
1254 if (whandle->plane < util_format_get_num_planes(whandle->format)) {
1255 uint64_t modifier = whandle->modifier;
1256
1257 if (whandle->modifier == DRM_FORMAT_MOD_INVALID) {
1258 /* We don't have a modifier; match whatever GEM_GET_TILING says */
1259 uint32_t tiling;
1260 iris_gem_get_tiling(res->bo, &tiling);
1261 modifier = tiling_to_modifier(tiling);
1262 }
1263
1264 UNUSED const bool isl_surf_created_successfully =
1265 iris_resource_configure_main(screen, res, templ, modifier,
1266 whandle->stride);
1267 assert(isl_surf_created_successfully);
1268
1269 UNUSED const bool ok = iris_resource_configure_aux(screen, res, true);
1270 assert(ok);
1271 /* The gallium dri layer will create a separate plane resource for the
1272 * aux image. iris_resource_finish_aux_import will merge the separate aux
1273 * parameters back into a single iris_resource.
1274 */
1275 } else if (mod_plane_is_clear_color(whandle->modifier, whandle->plane)) {
1276 res->aux.clear_color_offset = whandle->offset;
1277 res->aux.clear_color_bo = res->bo;
1278 res->bo = NULL;
1279 } else {
1280 /* Save modifier import information to reconstruct later. After import,
1281 * this will be available under a second image accessible from the main
1282 * image with res->base.next. See iris_resource_finish_aux_import.
1283 */
1284 res->aux.surf.row_pitch_B = whandle->stride;
1285 res->aux.offset = whandle->offset;
1286 res->aux.bo = res->bo;
1287 res->bo = NULL;
1288 }
1289
1290 if (get_num_planes(&res->base.b) ==
1291 iris_get_dmabuf_modifier_planes(pscreen, whandle->modifier,
1292 whandle->format)) {
1293 iris_resource_finish_aux_import(pscreen, res);
1294 }
1295
1296 return &res->base.b;
1297
1298 fail:
1299 iris_resource_destroy(pscreen, &res->base.b);
1300 return NULL;
1301 }
1302
1303 static struct pipe_resource *
iris_resource_from_memobj(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct pipe_memory_object * pmemobj,uint64_t offset)1304 iris_resource_from_memobj(struct pipe_screen *pscreen,
1305 const struct pipe_resource *templ,
1306 struct pipe_memory_object *pmemobj,
1307 uint64_t offset)
1308 {
1309 struct iris_screen *screen = (struct iris_screen *)pscreen;
1310 struct iris_memory_object *memobj = (struct iris_memory_object *)pmemobj;
1311 struct iris_resource *res = iris_alloc_resource(pscreen, templ);
1312
1313 if (!res)
1314 return NULL;
1315
1316 if (templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) {
1317 UNUSED const bool isl_surf_created_successfully =
1318 iris_resource_configure_main(screen, res, templ, DRM_FORMAT_MOD_INVALID, 0);
1319 assert(isl_surf_created_successfully);
1320 }
1321
1322 res->bo = memobj->bo;
1323 res->offset = offset;
1324 res->external_format = memobj->format;
1325 res->internal_format = templ->format;
1326
1327 iris_bo_reference(memobj->bo);
1328
1329 return &res->base.b;
1330 }
1331
1332 /* Handle combined depth/stencil with memory objects.
1333 *
1334 * This function is modeled after u_transfer_helper_resource_create.
1335 */
1336 static struct pipe_resource *
iris_resource_from_memobj_wrapper(struct pipe_screen * pscreen,const struct pipe_resource * templ,struct pipe_memory_object * pmemobj,uint64_t offset)1337 iris_resource_from_memobj_wrapper(struct pipe_screen *pscreen,
1338 const struct pipe_resource *templ,
1339 struct pipe_memory_object *pmemobj,
1340 uint64_t offset)
1341 {
1342 enum pipe_format format = templ->format;
1343
1344 /* Normal case, no special handling: */
1345 if (!(util_format_is_depth_and_stencil(format)))
1346 return iris_resource_from_memobj(pscreen, templ, pmemobj, offset);
1347
1348 struct pipe_resource t = *templ;
1349 t.format = util_format_get_depth_only(format);
1350
1351 struct pipe_resource *prsc =
1352 iris_resource_from_memobj(pscreen, &t, pmemobj, offset);
1353 if (!prsc)
1354 return NULL;
1355
1356 struct iris_resource *res = (struct iris_resource *) prsc;
1357
1358 /* Stencil offset in the buffer without aux. */
1359 uint64_t s_offset = offset +
1360 ALIGN(res->surf.size_B, res->surf.alignment_B);
1361
1362 prsc->format = format; /* frob the format back to the "external" format */
1363
1364 t.format = PIPE_FORMAT_S8_UINT;
1365 struct pipe_resource *stencil =
1366 iris_resource_from_memobj(pscreen, &t, pmemobj, s_offset);
1367 if (!stencil) {
1368 iris_resource_destroy(pscreen, prsc);
1369 return NULL;
1370 }
1371
1372 iris_resource_set_separate_stencil(prsc, stencil);
1373 return prsc;
1374 }
1375
1376 static void
iris_flush_resource(struct pipe_context * ctx,struct pipe_resource * resource)1377 iris_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
1378 {
1379 struct iris_context *ice = (struct iris_context *)ctx;
1380 struct iris_resource *res = (void *) resource;
1381 const struct isl_drm_modifier_info *mod = res->mod_info;
1382
1383 iris_resource_prepare_access(ice, res,
1384 0, INTEL_REMAINING_LEVELS,
1385 0, INTEL_REMAINING_LAYERS,
1386 mod ? mod->aux_usage : ISL_AUX_USAGE_NONE,
1387 mod ? mod->supports_clear_color : false);
1388
1389 if (!res->mod_info && res->aux.usage != ISL_AUX_USAGE_NONE) {
1390 /* flush_resource may be used to prepare an image for sharing external
1391 * to the driver (e.g. via eglCreateImage). To account for this, make
1392 * sure to get rid of any compression that a consumer wouldn't know how
1393 * to handle.
1394 */
1395 iris_foreach_batch(ice, batch) {
1396 if (iris_batch_references(batch, res->bo))
1397 iris_batch_flush(batch);
1398 }
1399
1400 iris_resource_disable_aux(res);
1401 }
1402 }
1403
1404 /**
1405 * Reallocate a (non-external) resource into new storage, copying the data
1406 * and modifying the original resource to point at the new storage.
1407 *
1408 * This is useful for e.g. moving a suballocated internal resource to a
1409 * dedicated allocation that can be exported by itself.
1410 */
1411 static void
iris_reallocate_resource_inplace(struct iris_context * ice,struct iris_resource * old_res,unsigned new_bind_flag)1412 iris_reallocate_resource_inplace(struct iris_context *ice,
1413 struct iris_resource *old_res,
1414 unsigned new_bind_flag)
1415 {
1416 struct pipe_screen *pscreen = ice->ctx.screen;
1417
1418 if (iris_bo_is_external(old_res->bo))
1419 return;
1420
1421 assert(old_res->mod_info == NULL);
1422 assert(old_res->bo == old_res->aux.bo || old_res->aux.bo == NULL);
1423 assert(old_res->bo == old_res->aux.clear_color_bo ||
1424 old_res->aux.clear_color_bo == NULL);
1425 assert(old_res->external_format == PIPE_FORMAT_NONE);
1426
1427 struct pipe_resource templ = old_res->base.b;
1428 templ.bind |= new_bind_flag;
1429
1430 struct iris_resource *new_res =
1431 (void *) pscreen->resource_create(pscreen, &templ);
1432
1433 assert(iris_bo_is_real(new_res->bo));
1434
1435 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
1436
1437 if (old_res->base.b.target == PIPE_BUFFER) {
1438 struct pipe_box box = (struct pipe_box) {
1439 .width = old_res->base.b.width0,
1440 .height = 1,
1441 };
1442
1443 iris_copy_region(&ice->blorp, batch, &new_res->base.b, 0, 0, 0, 0,
1444 &old_res->base.b, 0, &box);
1445 } else {
1446 for (unsigned l = 0; l <= templ.last_level; l++) {
1447 struct pipe_box box = (struct pipe_box) {
1448 .width = u_minify(templ.width0, l),
1449 .height = u_minify(templ.height0, l),
1450 .depth = util_num_layers(&templ, l),
1451 };
1452
1453 iris_copy_region(&ice->blorp, batch, &new_res->base.b, l, 0, 0, 0,
1454 &old_res->base.b, l, &box);
1455 }
1456 }
1457
1458 iris_flush_resource(&ice->ctx, &new_res->base.b);
1459
1460 struct iris_bo *old_bo = old_res->bo;
1461 struct iris_bo *old_aux_bo = old_res->aux.bo;
1462 struct iris_bo *old_clear_color_bo = old_res->aux.clear_color_bo;
1463
1464 /* Replace the structure fields with the new ones */
1465 old_res->base.b.bind = templ.bind;
1466 old_res->bo = new_res->bo;
1467 old_res->aux.surf = new_res->aux.surf;
1468 old_res->aux.bo = new_res->aux.bo;
1469 old_res->aux.offset = new_res->aux.offset;
1470 old_res->aux.extra_aux.surf = new_res->aux.extra_aux.surf;
1471 old_res->aux.extra_aux.offset = new_res->aux.extra_aux.offset;
1472 old_res->aux.clear_color_bo = new_res->aux.clear_color_bo;
1473 old_res->aux.clear_color_offset = new_res->aux.clear_color_offset;
1474 old_res->aux.usage = new_res->aux.usage;
1475
1476 if (new_res->aux.state) {
1477 assert(old_res->aux.state);
1478 for (unsigned l = 0; l <= templ.last_level; l++) {
1479 unsigned layers = util_num_layers(&templ, l);
1480 for (unsigned z = 0; z < layers; z++) {
1481 enum isl_aux_state aux =
1482 iris_resource_get_aux_state(new_res, l, z);
1483 iris_resource_set_aux_state(ice, old_res, l, z, 1, aux);
1484 }
1485 }
1486 }
1487
1488 /* old_res now points at the new BOs, make new_res point at the old ones
1489 * so they'll be freed when we unreference the resource below.
1490 */
1491 new_res->bo = old_bo;
1492 new_res->aux.bo = old_aux_bo;
1493 new_res->aux.clear_color_bo = old_clear_color_bo;
1494
1495 pipe_resource_reference((struct pipe_resource **)&new_res, NULL);
1496 }
1497
1498 static void
iris_resource_disable_suballoc_on_first_query(struct pipe_screen * pscreen,struct pipe_context * ctx,struct iris_resource * res)1499 iris_resource_disable_suballoc_on_first_query(struct pipe_screen *pscreen,
1500 struct pipe_context *ctx,
1501 struct iris_resource *res)
1502 {
1503 if (iris_bo_is_real(res->bo))
1504 return;
1505
1506 assert(!(res->base.b.bind & PIPE_BIND_SHARED));
1507
1508 bool destroy_context;
1509 if (ctx) {
1510 ctx = threaded_context_unwrap_sync(ctx);
1511 destroy_context = false;
1512 } else {
1513 /* We need to execute a blit on some GPU context, but the DRI layer
1514 * often doesn't give us one. So we have to invent a temporary one.
1515 *
1516 * We can't store a permanent context in the screen, as it would cause
1517 * circular refcounting where screens reference contexts that reference
1518 * resources, while resources reference screens...causing nothing to be
1519 * freed. So we just create and destroy a temporary one here.
1520 */
1521 ctx = iris_create_context(pscreen, NULL, 0);
1522 destroy_context = true;
1523 }
1524
1525 struct iris_context *ice = (struct iris_context *)ctx;
1526
1527 iris_reallocate_resource_inplace(ice, res, PIPE_BIND_SHARED);
1528 assert(res->base.b.bind & PIPE_BIND_SHARED);
1529
1530 if (destroy_context)
1531 iris_destroy_context(ctx);
1532 }
1533
1534
1535 static void
iris_resource_disable_aux_on_first_query(struct pipe_resource * resource,unsigned usage)1536 iris_resource_disable_aux_on_first_query(struct pipe_resource *resource,
1537 unsigned usage)
1538 {
1539 struct iris_resource *res = (struct iris_resource *)resource;
1540 bool mod_with_aux =
1541 res->mod_info && res->mod_info->aux_usage != ISL_AUX_USAGE_NONE;
1542
1543 /* Disable aux usage if explicit flush not set and this is the first time
1544 * we are dealing with this resource and the resource was not created with
1545 * a modifier with aux.
1546 */
1547 if (!mod_with_aux &&
1548 (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) && res->aux.usage != 0) &&
1549 p_atomic_read(&resource->reference.count) == 1) {
1550 iris_resource_disable_aux(res);
1551 }
1552 }
1553
1554 static bool
iris_resource_get_param(struct pipe_screen * pscreen,struct pipe_context * ctx,struct pipe_resource * resource,unsigned plane,unsigned layer,unsigned level,enum pipe_resource_param param,unsigned handle_usage,uint64_t * value)1555 iris_resource_get_param(struct pipe_screen *pscreen,
1556 struct pipe_context *ctx,
1557 struct pipe_resource *resource,
1558 unsigned plane,
1559 unsigned layer,
1560 unsigned level,
1561 enum pipe_resource_param param,
1562 unsigned handle_usage,
1563 uint64_t *value)
1564 {
1565 struct iris_screen *screen = (struct iris_screen *)pscreen;
1566 struct iris_resource *res = (struct iris_resource *)resource;
1567 bool mod_with_aux =
1568 res->mod_info && res->mod_info->aux_usage != ISL_AUX_USAGE_NONE;
1569 bool wants_aux = mod_with_aux && plane > 0;
1570 bool wants_cc = mod_with_aux &&
1571 mod_plane_is_clear_color(res->mod_info->modifier, plane);
1572 bool result;
1573 unsigned handle;
1574
1575 iris_resource_disable_aux_on_first_query(resource, handle_usage);
1576 iris_resource_disable_suballoc_on_first_query(pscreen, ctx, res);
1577
1578 struct iris_bo *bo = wants_cc ? res->aux.clear_color_bo :
1579 wants_aux ? res->aux.bo : res->bo;
1580
1581 assert(iris_bo_is_real(bo));
1582
1583 switch (param) {
1584 case PIPE_RESOURCE_PARAM_NPLANES:
1585 if (mod_with_aux) {
1586 *value = iris_get_dmabuf_modifier_planes(pscreen,
1587 res->mod_info->modifier,
1588 res->external_format);
1589 } else {
1590 *value = get_num_planes(&res->base.b);
1591 }
1592 return true;
1593 case PIPE_RESOURCE_PARAM_STRIDE:
1594 *value = wants_cc ? 1 :
1595 wants_aux ? res->aux.surf.row_pitch_B : res->surf.row_pitch_B;
1596
1597 /* Mesa's implementation of eglCreateImage rejects strides of zero (see
1598 * dri2_check_dma_buf_attribs). Ensure we return a non-zero stride as
1599 * this value may be queried from GBM and passed into EGL.
1600 */
1601 assert(*value);
1602
1603 return true;
1604 case PIPE_RESOURCE_PARAM_OFFSET:
1605 *value = wants_cc ? res->aux.clear_color_offset :
1606 wants_aux ? res->aux.offset : 0;
1607 return true;
1608 case PIPE_RESOURCE_PARAM_MODIFIER:
1609 *value = res->mod_info ? res->mod_info->modifier :
1610 tiling_to_modifier(isl_tiling_to_i915_tiling(res->surf.tiling));
1611 return true;
1612 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED:
1613 if (!wants_aux)
1614 iris_gem_set_tiling(bo, &res->surf);
1615
1616 result = iris_bo_flink(bo, &handle) == 0;
1617 if (result)
1618 *value = handle;
1619 return result;
1620 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS: {
1621 if (!wants_aux)
1622 iris_gem_set_tiling(bo, &res->surf);
1623
1624 /* Because we share the same drm file across multiple iris_screen, when
1625 * we export a GEM handle we must make sure it is valid in the DRM file
1626 * descriptor the caller is using (this is the FD given at screen
1627 * creation).
1628 */
1629 uint32_t handle;
1630 if (iris_bo_export_gem_handle_for_device(bo, screen->winsys_fd, &handle))
1631 return false;
1632 *value = handle;
1633 return true;
1634 }
1635
1636 case PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD:
1637 if (!wants_aux)
1638 iris_gem_set_tiling(bo, &res->surf);
1639
1640 result = iris_bo_export_dmabuf(bo, (int *) &handle) == 0;
1641 if (result)
1642 *value = handle;
1643 return result;
1644 default:
1645 return false;
1646 }
1647 }
1648
1649 static bool
iris_resource_get_handle(struct pipe_screen * pscreen,struct pipe_context * ctx,struct pipe_resource * resource,struct winsys_handle * whandle,unsigned usage)1650 iris_resource_get_handle(struct pipe_screen *pscreen,
1651 struct pipe_context *ctx,
1652 struct pipe_resource *resource,
1653 struct winsys_handle *whandle,
1654 unsigned usage)
1655 {
1656 struct iris_screen *screen = (struct iris_screen *) pscreen;
1657 struct iris_resource *res = (struct iris_resource *)resource;
1658 bool mod_with_aux =
1659 res->mod_info && res->mod_info->aux_usage != ISL_AUX_USAGE_NONE;
1660
1661 iris_resource_disable_aux_on_first_query(resource, usage);
1662 iris_resource_disable_suballoc_on_first_query(pscreen, ctx, res);
1663
1664 assert(iris_bo_is_real(res->bo));
1665
1666 struct iris_bo *bo;
1667 if (res->mod_info &&
1668 mod_plane_is_clear_color(res->mod_info->modifier, whandle->plane)) {
1669 bo = res->aux.clear_color_bo;
1670 whandle->offset = res->aux.clear_color_offset;
1671 } else if (mod_with_aux && whandle->plane > 0) {
1672 bo = res->aux.bo;
1673 whandle->stride = res->aux.surf.row_pitch_B;
1674 whandle->offset = res->aux.offset;
1675 } else {
1676 /* If this is a buffer, stride should be 0 - no need to special case */
1677 whandle->stride = res->surf.row_pitch_B;
1678 bo = res->bo;
1679 }
1680
1681 whandle->format = res->external_format;
1682 whandle->modifier =
1683 res->mod_info ? res->mod_info->modifier
1684 : tiling_to_modifier(isl_tiling_to_i915_tiling(res->surf.tiling));
1685
1686 #ifndef NDEBUG
1687 enum isl_aux_usage allowed_usage =
1688 usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH ? res->aux.usage :
1689 res->mod_info ? res->mod_info->aux_usage : ISL_AUX_USAGE_NONE;
1690
1691 if (res->aux.usage != allowed_usage) {
1692 enum isl_aux_state aux_state = iris_resource_get_aux_state(res, 0, 0);
1693 assert(aux_state == ISL_AUX_STATE_RESOLVED ||
1694 aux_state == ISL_AUX_STATE_PASS_THROUGH);
1695 }
1696 #endif
1697
1698 switch (whandle->type) {
1699 case WINSYS_HANDLE_TYPE_SHARED:
1700 iris_gem_set_tiling(bo, &res->surf);
1701 return iris_bo_flink(bo, &whandle->handle) == 0;
1702 case WINSYS_HANDLE_TYPE_KMS: {
1703 iris_gem_set_tiling(bo, &res->surf);
1704
1705 /* Because we share the same drm file across multiple iris_screen, when
1706 * we export a GEM handle we must make sure it is valid in the DRM file
1707 * descriptor the caller is using (this is the FD given at screen
1708 * creation).
1709 */
1710 uint32_t handle;
1711 if (iris_bo_export_gem_handle_for_device(bo, screen->winsys_fd, &handle))
1712 return false;
1713 whandle->handle = handle;
1714 return true;
1715 }
1716 case WINSYS_HANDLE_TYPE_FD:
1717 iris_gem_set_tiling(bo, &res->surf);
1718 return iris_bo_export_dmabuf(bo, (int *) &whandle->handle) == 0;
1719 }
1720
1721 return false;
1722 }
1723
1724 static bool
resource_is_busy(struct iris_context * ice,struct iris_resource * res)1725 resource_is_busy(struct iris_context *ice,
1726 struct iris_resource *res)
1727 {
1728 bool busy = iris_bo_busy(res->bo);
1729
1730 iris_foreach_batch(ice, batch)
1731 busy |= iris_batch_references(batch, res->bo);
1732
1733 return busy;
1734 }
1735
1736 void
iris_replace_buffer_storage(struct pipe_context * ctx,struct pipe_resource * p_dst,struct pipe_resource * p_src,unsigned num_rebinds,uint32_t rebind_mask,uint32_t delete_buffer_id)1737 iris_replace_buffer_storage(struct pipe_context *ctx,
1738 struct pipe_resource *p_dst,
1739 struct pipe_resource *p_src,
1740 unsigned num_rebinds,
1741 uint32_t rebind_mask,
1742 uint32_t delete_buffer_id)
1743 {
1744 struct iris_screen *screen = (void *) ctx->screen;
1745 struct iris_context *ice = (void *) ctx;
1746 struct iris_resource *dst = (void *) p_dst;
1747 struct iris_resource *src = (void *) p_src;
1748
1749 assert(memcmp(&dst->surf, &src->surf, sizeof(dst->surf)) == 0);
1750
1751 struct iris_bo *old_bo = dst->bo;
1752
1753 /* Swap out the backing storage */
1754 iris_bo_reference(src->bo);
1755 dst->bo = src->bo;
1756
1757 /* Rebind the buffer, replacing any state referring to the old BO's
1758 * address, and marking state dirty so it's reemitted.
1759 */
1760 screen->vtbl.rebind_buffer(ice, dst);
1761
1762 iris_bo_unreference(old_bo);
1763 }
1764
1765 static void
iris_invalidate_resource(struct pipe_context * ctx,struct pipe_resource * resource)1766 iris_invalidate_resource(struct pipe_context *ctx,
1767 struct pipe_resource *resource)
1768 {
1769 struct iris_screen *screen = (void *) ctx->screen;
1770 struct iris_context *ice = (void *) ctx;
1771 struct iris_resource *res = (void *) resource;
1772
1773 if (resource->target != PIPE_BUFFER)
1774 return;
1775
1776 /* If it's already invalidated, don't bother doing anything. */
1777 if (res->valid_buffer_range.start > res->valid_buffer_range.end)
1778 return;
1779
1780 if (!resource_is_busy(ice, res)) {
1781 /* The resource is idle, so just mark that it contains no data and
1782 * keep using the same underlying buffer object.
1783 */
1784 util_range_set_empty(&res->valid_buffer_range);
1785 return;
1786 }
1787
1788 /* Otherwise, try and replace the backing storage with a new BO. */
1789
1790 /* We can't reallocate memory we didn't allocate in the first place. */
1791 if (res->bo->gem_handle && res->bo->real.userptr)
1792 return;
1793
1794 struct iris_bo *old_bo = res->bo;
1795 struct iris_bo *new_bo =
1796 iris_bo_alloc(screen->bufmgr, res->bo->name, resource->width0, 1,
1797 iris_memzone_for_address(old_bo->address), 0);
1798 if (!new_bo)
1799 return;
1800
1801 /* Swap out the backing storage */
1802 res->bo = new_bo;
1803
1804 /* Rebind the buffer, replacing any state referring to the old BO's
1805 * address, and marking state dirty so it's reemitted.
1806 */
1807 screen->vtbl.rebind_buffer(ice, res);
1808
1809 util_range_set_empty(&res->valid_buffer_range);
1810
1811 iris_bo_unreference(old_bo);
1812 }
1813
1814 static void
iris_flush_staging_region(struct pipe_transfer * xfer,const struct pipe_box * flush_box)1815 iris_flush_staging_region(struct pipe_transfer *xfer,
1816 const struct pipe_box *flush_box)
1817 {
1818 if (!(xfer->usage & PIPE_MAP_WRITE))
1819 return;
1820
1821 struct iris_transfer *map = (void *) xfer;
1822
1823 struct pipe_box src_box = *flush_box;
1824
1825 /* Account for extra alignment padding in staging buffer */
1826 if (xfer->resource->target == PIPE_BUFFER)
1827 src_box.x += xfer->box.x % IRIS_MAP_BUFFER_ALIGNMENT;
1828
1829 struct pipe_box dst_box = (struct pipe_box) {
1830 .x = xfer->box.x + flush_box->x,
1831 .y = xfer->box.y + flush_box->y,
1832 .z = xfer->box.z + flush_box->z,
1833 .width = flush_box->width,
1834 .height = flush_box->height,
1835 .depth = flush_box->depth,
1836 };
1837
1838 iris_copy_region(map->blorp, map->batch, xfer->resource, xfer->level,
1839 dst_box.x, dst_box.y, dst_box.z, map->staging, 0,
1840 &src_box);
1841 }
1842
1843 static void
iris_unmap_copy_region(struct iris_transfer * map)1844 iris_unmap_copy_region(struct iris_transfer *map)
1845 {
1846 iris_resource_destroy(map->staging->screen, map->staging);
1847
1848 map->ptr = NULL;
1849 }
1850
1851 static void
iris_map_copy_region(struct iris_transfer * map)1852 iris_map_copy_region(struct iris_transfer *map)
1853 {
1854 struct pipe_screen *pscreen = &map->batch->screen->base;
1855 struct pipe_transfer *xfer = &map->base.b;
1856 struct pipe_box *box = &xfer->box;
1857 struct iris_resource *res = (void *) xfer->resource;
1858
1859 unsigned extra = xfer->resource->target == PIPE_BUFFER ?
1860 box->x % IRIS_MAP_BUFFER_ALIGNMENT : 0;
1861
1862 struct pipe_resource templ = (struct pipe_resource) {
1863 .usage = PIPE_USAGE_STAGING,
1864 .width0 = box->width + extra,
1865 .height0 = box->height,
1866 .depth0 = 1,
1867 .nr_samples = xfer->resource->nr_samples,
1868 .nr_storage_samples = xfer->resource->nr_storage_samples,
1869 .array_size = box->depth,
1870 .format = res->internal_format,
1871 };
1872
1873 if (xfer->resource->target == PIPE_BUFFER)
1874 templ.target = PIPE_BUFFER;
1875 else if (templ.array_size > 1)
1876 templ.target = PIPE_TEXTURE_2D_ARRAY;
1877 else
1878 templ.target = PIPE_TEXTURE_2D;
1879
1880 map->staging = iris_resource_create(pscreen, &templ);
1881 assert(map->staging);
1882
1883 if (templ.target != PIPE_BUFFER) {
1884 struct isl_surf *surf = &((struct iris_resource *) map->staging)->surf;
1885 xfer->stride = isl_surf_get_row_pitch_B(surf);
1886 xfer->layer_stride = isl_surf_get_array_pitch(surf);
1887 }
1888
1889 if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
1890 iris_copy_region(map->blorp, map->batch, map->staging, 0, extra, 0, 0,
1891 xfer->resource, xfer->level, box);
1892 /* Ensure writes to the staging BO land before we map it below. */
1893 iris_emit_pipe_control_flush(map->batch,
1894 "transfer read: flush before mapping",
1895 PIPE_CONTROL_RENDER_TARGET_FLUSH |
1896 PIPE_CONTROL_TILE_CACHE_FLUSH |
1897 PIPE_CONTROL_CS_STALL);
1898 }
1899
1900 struct iris_bo *staging_bo = iris_resource_bo(map->staging);
1901
1902 if (iris_batch_references(map->batch, staging_bo))
1903 iris_batch_flush(map->batch);
1904
1905 map->ptr =
1906 iris_bo_map(map->dbg, staging_bo, xfer->usage & MAP_FLAGS) + extra;
1907
1908 map->unmap = iris_unmap_copy_region;
1909 }
1910
1911 static void
get_image_offset_el(const struct isl_surf * surf,unsigned level,unsigned z,unsigned * out_x0_el,unsigned * out_y0_el)1912 get_image_offset_el(const struct isl_surf *surf, unsigned level, unsigned z,
1913 unsigned *out_x0_el, unsigned *out_y0_el)
1914 {
1915 ASSERTED uint32_t z0_el, a0_el;
1916 if (surf->dim == ISL_SURF_DIM_3D) {
1917 isl_surf_get_image_offset_el(surf, level, 0, z,
1918 out_x0_el, out_y0_el, &z0_el, &a0_el);
1919 } else {
1920 isl_surf_get_image_offset_el(surf, level, z, 0,
1921 out_x0_el, out_y0_el, &z0_el, &a0_el);
1922 }
1923 assert(z0_el == 0 && a0_el == 0);
1924 }
1925
1926 /**
1927 * Get pointer offset into stencil buffer.
1928 *
1929 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we
1930 * must decode the tile's layout in software.
1931 *
1932 * See
1933 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile
1934 * Format.
1935 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm
1936 *
1937 * Even though the returned offset is always positive, the return type is
1938 * signed due to
1939 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137
1940 * mesa: Fix return type of _mesa_get_format_bytes() (#37351)
1941 */
1942 static intptr_t
s8_offset(uint32_t stride,uint32_t x,uint32_t y)1943 s8_offset(uint32_t stride, uint32_t x, uint32_t y)
1944 {
1945 uint32_t tile_size = 4096;
1946 uint32_t tile_width = 64;
1947 uint32_t tile_height = 64;
1948 uint32_t row_size = 64 * stride / 2; /* Two rows are interleaved. */
1949
1950 uint32_t tile_x = x / tile_width;
1951 uint32_t tile_y = y / tile_height;
1952
1953 /* The byte's address relative to the tile's base addres. */
1954 uint32_t byte_x = x % tile_width;
1955 uint32_t byte_y = y % tile_height;
1956
1957 uintptr_t u = tile_y * row_size
1958 + tile_x * tile_size
1959 + 512 * (byte_x / 8)
1960 + 64 * (byte_y / 8)
1961 + 32 * ((byte_y / 4) % 2)
1962 + 16 * ((byte_x / 4) % 2)
1963 + 8 * ((byte_y / 2) % 2)
1964 + 4 * ((byte_x / 2) % 2)
1965 + 2 * (byte_y % 2)
1966 + 1 * (byte_x % 2);
1967
1968 return u;
1969 }
1970
1971 static void
iris_unmap_s8(struct iris_transfer * map)1972 iris_unmap_s8(struct iris_transfer *map)
1973 {
1974 struct pipe_transfer *xfer = &map->base.b;
1975 const struct pipe_box *box = &xfer->box;
1976 struct iris_resource *res = (struct iris_resource *) xfer->resource;
1977 struct isl_surf *surf = &res->surf;
1978
1979 if (xfer->usage & PIPE_MAP_WRITE) {
1980 uint8_t *untiled_s8_map = map->ptr;
1981 uint8_t *tiled_s8_map =
1982 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
1983
1984 for (int s = 0; s < box->depth; s++) {
1985 unsigned x0_el, y0_el;
1986 get_image_offset_el(surf, xfer->level, box->z + s, &x0_el, &y0_el);
1987
1988 for (uint32_t y = 0; y < box->height; y++) {
1989 for (uint32_t x = 0; x < box->width; x++) {
1990 ptrdiff_t offset = s8_offset(surf->row_pitch_B,
1991 x0_el + box->x + x,
1992 y0_el + box->y + y);
1993 tiled_s8_map[offset] =
1994 untiled_s8_map[s * xfer->layer_stride + y * xfer->stride + x];
1995 }
1996 }
1997 }
1998 }
1999
2000 free(map->buffer);
2001 }
2002
2003 static void
iris_map_s8(struct iris_transfer * map)2004 iris_map_s8(struct iris_transfer *map)
2005 {
2006 struct pipe_transfer *xfer = &map->base.b;
2007 const struct pipe_box *box = &xfer->box;
2008 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2009 struct isl_surf *surf = &res->surf;
2010
2011 xfer->stride = surf->row_pitch_B;
2012 xfer->layer_stride = xfer->stride * box->height;
2013
2014 /* The tiling and detiling functions require that the linear buffer has
2015 * a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
2016 * over-allocate the linear buffer to get the proper alignment.
2017 */
2018 map->buffer = map->ptr = malloc(xfer->layer_stride * box->depth);
2019 assert(map->buffer);
2020
2021 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no
2022 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless
2023 * invalidate is set, since we'll be writing the whole rectangle from our
2024 * temporary buffer back out.
2025 */
2026 if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
2027 uint8_t *untiled_s8_map = map->ptr;
2028 uint8_t *tiled_s8_map =
2029 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
2030
2031 for (int s = 0; s < box->depth; s++) {
2032 unsigned x0_el, y0_el;
2033 get_image_offset_el(surf, xfer->level, box->z + s, &x0_el, &y0_el);
2034
2035 for (uint32_t y = 0; y < box->height; y++) {
2036 for (uint32_t x = 0; x < box->width; x++) {
2037 ptrdiff_t offset = s8_offset(surf->row_pitch_B,
2038 x0_el + box->x + x,
2039 y0_el + box->y + y);
2040 untiled_s8_map[s * xfer->layer_stride + y * xfer->stride + x] =
2041 tiled_s8_map[offset];
2042 }
2043 }
2044 }
2045 }
2046
2047 map->unmap = iris_unmap_s8;
2048 }
2049
2050 /* Compute extent parameters for use with tiled_memcpy functions.
2051 * xs are in units of bytes and ys are in units of strides.
2052 */
2053 static inline void
tile_extents(const struct isl_surf * surf,const struct pipe_box * box,unsigned level,int z,unsigned * x1_B,unsigned * x2_B,unsigned * y1_el,unsigned * y2_el)2054 tile_extents(const struct isl_surf *surf,
2055 const struct pipe_box *box,
2056 unsigned level, int z,
2057 unsigned *x1_B, unsigned *x2_B,
2058 unsigned *y1_el, unsigned *y2_el)
2059 {
2060 const struct isl_format_layout *fmtl = isl_format_get_layout(surf->format);
2061 const unsigned cpp = fmtl->bpb / 8;
2062
2063 assert(box->x % fmtl->bw == 0);
2064 assert(box->y % fmtl->bh == 0);
2065
2066 unsigned x0_el, y0_el;
2067 get_image_offset_el(surf, level, box->z + z, &x0_el, &y0_el);
2068
2069 *x1_B = (box->x / fmtl->bw + x0_el) * cpp;
2070 *y1_el = box->y / fmtl->bh + y0_el;
2071 *x2_B = (DIV_ROUND_UP(box->x + box->width, fmtl->bw) + x0_el) * cpp;
2072 *y2_el = DIV_ROUND_UP(box->y + box->height, fmtl->bh) + y0_el;
2073 }
2074
2075 static void
iris_unmap_tiled_memcpy(struct iris_transfer * map)2076 iris_unmap_tiled_memcpy(struct iris_transfer *map)
2077 {
2078 struct pipe_transfer *xfer = &map->base.b;
2079 const struct pipe_box *box = &xfer->box;
2080 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2081 struct isl_surf *surf = &res->surf;
2082
2083 const bool has_swizzling = false;
2084
2085 if (xfer->usage & PIPE_MAP_WRITE) {
2086 char *dst =
2087 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
2088
2089 for (int s = 0; s < box->depth; s++) {
2090 unsigned x1, x2, y1, y2;
2091 tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
2092
2093 void *ptr = map->ptr + s * xfer->layer_stride;
2094
2095 isl_memcpy_linear_to_tiled(x1, x2, y1, y2, dst, ptr,
2096 surf->row_pitch_B, xfer->stride,
2097 has_swizzling, surf->tiling, ISL_MEMCPY);
2098 }
2099 }
2100 os_free_aligned(map->buffer);
2101 map->buffer = map->ptr = NULL;
2102 }
2103
2104 static void
iris_map_tiled_memcpy(struct iris_transfer * map)2105 iris_map_tiled_memcpy(struct iris_transfer *map)
2106 {
2107 struct pipe_transfer *xfer = &map->base.b;
2108 const struct pipe_box *box = &xfer->box;
2109 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2110 struct isl_surf *surf = &res->surf;
2111
2112 xfer->stride = ALIGN(surf->row_pitch_B, 16);
2113 xfer->layer_stride = xfer->stride * box->height;
2114
2115 unsigned x1, x2, y1, y2;
2116 tile_extents(surf, box, xfer->level, 0, &x1, &x2, &y1, &y2);
2117
2118 /* The tiling and detiling functions require that the linear buffer has
2119 * a 16-byte alignment (that is, its `x0` is 16-byte aligned). Here we
2120 * over-allocate the linear buffer to get the proper alignment.
2121 */
2122 map->buffer =
2123 os_malloc_aligned(xfer->layer_stride * box->depth, 16);
2124 assert(map->buffer);
2125 map->ptr = (char *)map->buffer + (x1 & 0xf);
2126
2127 const bool has_swizzling = false;
2128
2129 if (!(xfer->usage & PIPE_MAP_DISCARD_RANGE)) {
2130 char *src =
2131 iris_bo_map(map->dbg, res->bo, (xfer->usage | MAP_RAW) & MAP_FLAGS);
2132
2133 for (int s = 0; s < box->depth; s++) {
2134 unsigned x1, x2, y1, y2;
2135 tile_extents(surf, box, xfer->level, s, &x1, &x2, &y1, &y2);
2136
2137 /* Use 's' rather than 'box->z' to rebase the first slice to 0. */
2138 void *ptr = map->ptr + s * xfer->layer_stride;
2139
2140 isl_memcpy_tiled_to_linear(x1, x2, y1, y2, ptr, src, xfer->stride,
2141 surf->row_pitch_B, has_swizzling,
2142 surf->tiling, ISL_MEMCPY_STREAMING_LOAD);
2143 }
2144 }
2145
2146 map->unmap = iris_unmap_tiled_memcpy;
2147 }
2148
2149 static void
iris_map_direct(struct iris_transfer * map)2150 iris_map_direct(struct iris_transfer *map)
2151 {
2152 struct pipe_transfer *xfer = &map->base.b;
2153 struct pipe_box *box = &xfer->box;
2154 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2155
2156 void *ptr = iris_bo_map(map->dbg, res->bo, xfer->usage & MAP_FLAGS);
2157
2158 if (res->base.b.target == PIPE_BUFFER) {
2159 xfer->stride = 0;
2160 xfer->layer_stride = 0;
2161
2162 map->ptr = ptr + box->x;
2163 } else {
2164 struct isl_surf *surf = &res->surf;
2165 const struct isl_format_layout *fmtl =
2166 isl_format_get_layout(surf->format);
2167 const unsigned cpp = fmtl->bpb / 8;
2168 unsigned x0_el, y0_el;
2169
2170 assert(box->x % fmtl->bw == 0);
2171 assert(box->y % fmtl->bh == 0);
2172 get_image_offset_el(surf, xfer->level, box->z, &x0_el, &y0_el);
2173
2174 x0_el += box->x / fmtl->bw;
2175 y0_el += box->y / fmtl->bh;
2176
2177 xfer->stride = isl_surf_get_row_pitch_B(surf);
2178 xfer->layer_stride = isl_surf_get_array_pitch(surf);
2179
2180 map->ptr = ptr + y0_el * xfer->stride + x0_el * cpp;
2181 }
2182 }
2183
2184 static bool
can_promote_to_async(const struct iris_resource * res,const struct pipe_box * box,enum pipe_map_flags usage)2185 can_promote_to_async(const struct iris_resource *res,
2186 const struct pipe_box *box,
2187 enum pipe_map_flags usage)
2188 {
2189 /* If we're writing to a section of the buffer that hasn't even been
2190 * initialized with useful data, then we can safely promote this write
2191 * to be unsynchronized. This helps the common pattern of appending data.
2192 */
2193 return res->base.b.target == PIPE_BUFFER && (usage & PIPE_MAP_WRITE) &&
2194 !(usage & TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED) &&
2195 !util_ranges_intersect(&res->valid_buffer_range, box->x,
2196 box->x + box->width);
2197 }
2198
2199 static void *
iris_transfer_map(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,enum pipe_map_flags usage,const struct pipe_box * box,struct pipe_transfer ** ptransfer)2200 iris_transfer_map(struct pipe_context *ctx,
2201 struct pipe_resource *resource,
2202 unsigned level,
2203 enum pipe_map_flags usage,
2204 const struct pipe_box *box,
2205 struct pipe_transfer **ptransfer)
2206 {
2207 struct iris_context *ice = (struct iris_context *)ctx;
2208 struct iris_resource *res = (struct iris_resource *)resource;
2209 struct isl_surf *surf = &res->surf;
2210
2211 if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
2212 /* Replace the backing storage with a fresh buffer for non-async maps */
2213 if (!(usage & (PIPE_MAP_UNSYNCHRONIZED |
2214 TC_TRANSFER_MAP_NO_INVALIDATE)))
2215 iris_invalidate_resource(ctx, resource);
2216
2217 /* If we can discard the whole resource, we can discard the range. */
2218 usage |= PIPE_MAP_DISCARD_RANGE;
2219 }
2220
2221 if (!(usage & PIPE_MAP_UNSYNCHRONIZED) &&
2222 can_promote_to_async(res, box, usage)) {
2223 usage |= PIPE_MAP_UNSYNCHRONIZED;
2224 }
2225
2226 /* Avoid using GPU copies for persistent/coherent buffers, as the idea
2227 * there is to access them simultaneously on the CPU & GPU. This also
2228 * avoids trying to use GPU copies for our u_upload_mgr buffers which
2229 * contain state we're constructing for a GPU draw call, which would
2230 * kill us with infinite stack recursion.
2231 */
2232 if (usage & (PIPE_MAP_PERSISTENT | PIPE_MAP_COHERENT))
2233 usage |= PIPE_MAP_DIRECTLY;
2234
2235 /* We cannot provide a direct mapping of tiled resources, and we
2236 * may not be able to mmap imported BOs since they may come from
2237 * other devices that I915_GEM_MMAP cannot work with.
2238 */
2239 if ((usage & PIPE_MAP_DIRECTLY) &&
2240 (surf->tiling != ISL_TILING_LINEAR || iris_bo_is_imported(res->bo)))
2241 return NULL;
2242
2243 bool map_would_stall = false;
2244
2245 if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
2246 map_would_stall =
2247 resource_is_busy(ice, res) ||
2248 iris_has_invalid_primary(res, level, 1, box->z, box->depth);
2249
2250 if (map_would_stall && (usage & PIPE_MAP_DONTBLOCK) &&
2251 (usage & PIPE_MAP_DIRECTLY))
2252 return NULL;
2253 }
2254
2255 struct iris_transfer *map;
2256
2257 if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
2258 map = slab_zalloc(&ice->transfer_pool_unsync);
2259 else
2260 map = slab_zalloc(&ice->transfer_pool);
2261
2262 if (!map)
2263 return NULL;
2264
2265 struct pipe_transfer *xfer = &map->base.b;
2266
2267 map->dbg = &ice->dbg;
2268
2269 pipe_resource_reference(&xfer->resource, resource);
2270 xfer->level = level;
2271 xfer->usage = usage;
2272 xfer->box = *box;
2273 *ptransfer = xfer;
2274
2275 map->dest_had_defined_contents =
2276 util_ranges_intersect(&res->valid_buffer_range, box->x,
2277 box->x + box->width);
2278
2279 if (usage & PIPE_MAP_WRITE)
2280 util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width);
2281
2282 if (iris_bo_mmap_mode(res->bo) != IRIS_MMAP_NONE) {
2283 /* GPU copies are not useful for buffer reads. Instead of stalling to
2284 * read from the original buffer, we'd simply copy it to a temporary...
2285 * then stall (a bit longer) to read from that buffer.
2286 *
2287 * Images are less clear-cut. Resolves can be destructive, removing
2288 * some of the underlying compression, so we'd rather blit the data to
2289 * a linear temporary and map that, to avoid the resolve.
2290 */
2291 if (!(usage & PIPE_MAP_DISCARD_RANGE) &&
2292 !iris_has_invalid_primary(res, level, 1, box->z, box->depth)) {
2293 usage |= PIPE_MAP_DIRECTLY;
2294 }
2295
2296 /* We can map directly if it wouldn't stall, there's no compression,
2297 * and we aren't doing an uncached read.
2298 */
2299 if (!map_would_stall &&
2300 !isl_aux_usage_has_compression(res->aux.usage) &&
2301 !((usage & PIPE_MAP_READ) &&
2302 iris_bo_mmap_mode(res->bo) != IRIS_MMAP_WB)) {
2303 usage |= PIPE_MAP_DIRECTLY;
2304 }
2305 }
2306
2307 /* TODO: Teach iris_map_tiled_memcpy about Tile4... */
2308 if (res->surf.tiling == ISL_TILING_4)
2309 usage &= ~PIPE_MAP_DIRECTLY;
2310
2311 if (!(usage & PIPE_MAP_DIRECTLY)) {
2312 /* If we need a synchronous mapping and the resource is busy, or needs
2313 * resolving, we copy to/from a linear temporary buffer using the GPU.
2314 */
2315 map->batch = &ice->batches[IRIS_BATCH_RENDER];
2316 map->blorp = &ice->blorp;
2317 iris_map_copy_region(map);
2318 } else {
2319 /* Otherwise we're free to map on the CPU. */
2320
2321 if (resource->target != PIPE_BUFFER) {
2322 iris_resource_access_raw(ice, res, level, box->z, box->depth,
2323 usage & PIPE_MAP_WRITE);
2324 }
2325
2326 if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
2327 iris_foreach_batch(ice, batch) {
2328 if (iris_batch_references(batch, res->bo))
2329 iris_batch_flush(batch);
2330 }
2331 }
2332
2333 if (surf->tiling == ISL_TILING_W) {
2334 /* TODO: Teach iris_map_tiled_memcpy about W-tiling... */
2335 iris_map_s8(map);
2336 } else if (surf->tiling != ISL_TILING_LINEAR) {
2337 iris_map_tiled_memcpy(map);
2338 } else {
2339 iris_map_direct(map);
2340 }
2341 }
2342
2343 return map->ptr;
2344 }
2345
2346 static void
iris_transfer_flush_region(struct pipe_context * ctx,struct pipe_transfer * xfer,const struct pipe_box * box)2347 iris_transfer_flush_region(struct pipe_context *ctx,
2348 struct pipe_transfer *xfer,
2349 const struct pipe_box *box)
2350 {
2351 struct iris_context *ice = (struct iris_context *)ctx;
2352 struct iris_resource *res = (struct iris_resource *) xfer->resource;
2353 struct iris_transfer *map = (void *) xfer;
2354
2355 if (map->staging)
2356 iris_flush_staging_region(xfer, box);
2357
2358 uint32_t history_flush = 0;
2359
2360 if (res->base.b.target == PIPE_BUFFER) {
2361 if (map->staging)
2362 history_flush |= PIPE_CONTROL_RENDER_TARGET_FLUSH |
2363 PIPE_CONTROL_TILE_CACHE_FLUSH;
2364
2365 if (map->dest_had_defined_contents)
2366 history_flush |= iris_flush_bits_for_history(ice, res);
2367
2368 util_range_add(&res->base.b, &res->valid_buffer_range, box->x, box->x + box->width);
2369 }
2370
2371 if (history_flush & ~PIPE_CONTROL_CS_STALL) {
2372 iris_foreach_batch(ice, batch) {
2373 if (batch->contains_draw || batch->cache.render->entries) {
2374 iris_batch_maybe_flush(batch, 24);
2375 iris_emit_pipe_control_flush(batch,
2376 "cache history: transfer flush",
2377 history_flush);
2378 }
2379 }
2380 }
2381
2382 /* Make sure we flag constants dirty even if there's no need to emit
2383 * any PIPE_CONTROLs to a batch.
2384 */
2385 iris_dirty_for_history(ice, res);
2386 }
2387
2388 static void
iris_transfer_unmap(struct pipe_context * ctx,struct pipe_transfer * xfer)2389 iris_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer *xfer)
2390 {
2391 struct iris_context *ice = (struct iris_context *)ctx;
2392 struct iris_transfer *map = (void *) xfer;
2393
2394 if (!(xfer->usage & (PIPE_MAP_FLUSH_EXPLICIT |
2395 PIPE_MAP_COHERENT))) {
2396 struct pipe_box flush_box = {
2397 .x = 0, .y = 0, .z = 0,
2398 .width = xfer->box.width,
2399 .height = xfer->box.height,
2400 .depth = xfer->box.depth,
2401 };
2402 iris_transfer_flush_region(ctx, xfer, &flush_box);
2403 }
2404
2405 if (map->unmap)
2406 map->unmap(map);
2407
2408 pipe_resource_reference(&xfer->resource, NULL);
2409
2410 /* transfer_unmap is always called from the driver thread, so we have to
2411 * use transfer_pool, not transfer_pool_unsync. Freeing an object into a
2412 * different pool is allowed, however.
2413 */
2414 slab_free(&ice->transfer_pool, map);
2415 }
2416
2417 /**
2418 * The pipe->texture_subdata() driver hook.
2419 *
2420 * Mesa's state tracker takes this path whenever possible, even with
2421 * PIPE_CAP_TEXTURE_TRANSFER_MODES set.
2422 */
2423 static void
iris_texture_subdata(struct pipe_context * ctx,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)2424 iris_texture_subdata(struct pipe_context *ctx,
2425 struct pipe_resource *resource,
2426 unsigned level,
2427 unsigned usage,
2428 const struct pipe_box *box,
2429 const void *data,
2430 unsigned stride,
2431 unsigned layer_stride)
2432 {
2433 struct iris_context *ice = (struct iris_context *)ctx;
2434 struct iris_resource *res = (struct iris_resource *)resource;
2435 const struct isl_surf *surf = &res->surf;
2436
2437 assert(resource->target != PIPE_BUFFER);
2438
2439 /* Just use the transfer-based path for linear buffers - it will already
2440 * do a direct mapping, or a simple linear staging buffer.
2441 *
2442 * Linear staging buffers appear to be better than tiled ones, too, so
2443 * take that path if we need the GPU to perform color compression, or
2444 * stall-avoidance blits.
2445 *
2446 * TODO: Teach isl_memcpy_linear_to_tiled about Tile4...
2447 */
2448 if (surf->tiling == ISL_TILING_LINEAR ||
2449 surf->tiling == ISL_TILING_4 ||
2450 isl_aux_usage_has_compression(res->aux.usage) ||
2451 resource_is_busy(ice, res) ||
2452 iris_bo_mmap_mode(res->bo) == IRIS_MMAP_NONE) {
2453 return u_default_texture_subdata(ctx, resource, level, usage, box,
2454 data, stride, layer_stride);
2455 }
2456
2457 /* No state trackers pass any flags other than PIPE_MAP_WRITE */
2458
2459 iris_resource_access_raw(ice, res, level, box->z, box->depth, true);
2460
2461 iris_foreach_batch(ice, batch) {
2462 if (iris_batch_references(batch, res->bo))
2463 iris_batch_flush(batch);
2464 }
2465
2466 uint8_t *dst = iris_bo_map(&ice->dbg, res->bo, MAP_WRITE | MAP_RAW);
2467
2468 for (int s = 0; s < box->depth; s++) {
2469 const uint8_t *src = data + s * layer_stride;
2470
2471 if (surf->tiling == ISL_TILING_W) {
2472 unsigned x0_el, y0_el;
2473 get_image_offset_el(surf, level, box->z + s, &x0_el, &y0_el);
2474
2475 for (unsigned y = 0; y < box->height; y++) {
2476 for (unsigned x = 0; x < box->width; x++) {
2477 ptrdiff_t offset = s8_offset(surf->row_pitch_B,
2478 x0_el + box->x + x,
2479 y0_el + box->y + y);
2480 dst[offset] = src[y * stride + x];
2481 }
2482 }
2483 } else {
2484 unsigned x1, x2, y1, y2;
2485
2486 tile_extents(surf, box, level, s, &x1, &x2, &y1, &y2);
2487
2488 isl_memcpy_linear_to_tiled(x1, x2, y1, y2,
2489 (void *)dst, (void *)src,
2490 surf->row_pitch_B, stride,
2491 false, surf->tiling, ISL_MEMCPY);
2492 }
2493 }
2494 }
2495
2496 /**
2497 * Mark state dirty that needs to be re-emitted when a resource is written.
2498 */
2499 void
iris_dirty_for_history(struct iris_context * ice,struct iris_resource * res)2500 iris_dirty_for_history(struct iris_context *ice,
2501 struct iris_resource *res)
2502 {
2503 const uint64_t stages = res->bind_stages;
2504 uint64_t dirty = 0ull;
2505 uint64_t stage_dirty = 0ull;
2506
2507 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
2508 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
2509 if (stages & (1u << stage)) {
2510 struct iris_shader_state *shs = &ice->state.shaders[stage];
2511 shs->dirty_cbufs |= ~0u;
2512 }
2513 }
2514 dirty |= IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
2515 IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES;
2516 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_CONSTANTS);
2517 }
2518
2519 if (res->bind_history & (PIPE_BIND_SAMPLER_VIEW |
2520 PIPE_BIND_SHADER_IMAGE)) {
2521 dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES |
2522 IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES;
2523 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS);
2524 }
2525
2526 if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
2527 dirty |= IRIS_DIRTY_RENDER_MISC_BUFFER_FLUSHES |
2528 IRIS_DIRTY_COMPUTE_MISC_BUFFER_FLUSHES;
2529 stage_dirty |= (stages << IRIS_SHIFT_FOR_STAGE_DIRTY_BINDINGS);
2530 }
2531
2532 if (res->bind_history & PIPE_BIND_VERTEX_BUFFER)
2533 dirty |= IRIS_DIRTY_VERTEX_BUFFER_FLUSHES;
2534
2535 ice->state.dirty |= dirty;
2536 ice->state.stage_dirty |= stage_dirty;
2537 }
2538
2539 /**
2540 * Produce a set of PIPE_CONTROL bits which ensure data written to a
2541 * resource becomes visible, and any stale read cache data is invalidated.
2542 */
2543 uint32_t
iris_flush_bits_for_history(struct iris_context * ice,struct iris_resource * res)2544 iris_flush_bits_for_history(struct iris_context *ice,
2545 struct iris_resource *res)
2546 {
2547 struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
2548
2549 uint32_t flush = PIPE_CONTROL_CS_STALL;
2550
2551 if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
2552 flush |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
2553 flush |= screen->compiler->indirect_ubos_use_sampler ?
2554 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE :
2555 PIPE_CONTROL_DATA_CACHE_FLUSH;
2556 }
2557
2558 if (res->bind_history & PIPE_BIND_SAMPLER_VIEW)
2559 flush |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
2560
2561 if (res->bind_history & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
2562 flush |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
2563
2564 if (res->bind_history & (PIPE_BIND_SHADER_BUFFER | PIPE_BIND_SHADER_IMAGE))
2565 flush |= PIPE_CONTROL_DATA_CACHE_FLUSH;
2566
2567 return flush;
2568 }
2569
2570 void
iris_flush_and_dirty_for_history(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,uint32_t extra_flags,const char * reason)2571 iris_flush_and_dirty_for_history(struct iris_context *ice,
2572 struct iris_batch *batch,
2573 struct iris_resource *res,
2574 uint32_t extra_flags,
2575 const char *reason)
2576 {
2577 if (res->base.b.target != PIPE_BUFFER)
2578 return;
2579
2580 uint32_t flush = iris_flush_bits_for_history(ice, res) | extra_flags;
2581
2582 iris_emit_pipe_control_flush(batch, reason, flush);
2583
2584 iris_dirty_for_history(ice, res);
2585 }
2586
2587 bool
iris_resource_set_clear_color(struct iris_context * ice,struct iris_resource * res,union isl_color_value color)2588 iris_resource_set_clear_color(struct iris_context *ice,
2589 struct iris_resource *res,
2590 union isl_color_value color)
2591 {
2592 if (res->aux.clear_color_unknown ||
2593 memcmp(&res->aux.clear_color, &color, sizeof(color)) != 0) {
2594 res->aux.clear_color = color;
2595 res->aux.clear_color_unknown = false;
2596 return true;
2597 }
2598
2599 return false;
2600 }
2601
2602 static enum pipe_format
iris_resource_get_internal_format(struct pipe_resource * p_res)2603 iris_resource_get_internal_format(struct pipe_resource *p_res)
2604 {
2605 struct iris_resource *res = (void *) p_res;
2606 return res->internal_format;
2607 }
2608
2609 static const struct u_transfer_vtbl transfer_vtbl = {
2610 .resource_create = iris_resource_create,
2611 .resource_destroy = iris_resource_destroy,
2612 .transfer_map = iris_transfer_map,
2613 .transfer_unmap = iris_transfer_unmap,
2614 .transfer_flush_region = iris_transfer_flush_region,
2615 .get_internal_format = iris_resource_get_internal_format,
2616 .set_stencil = iris_resource_set_separate_stencil,
2617 .get_stencil = iris_resource_get_separate_stencil,
2618 };
2619
2620 void
iris_init_screen_resource_functions(struct pipe_screen * pscreen)2621 iris_init_screen_resource_functions(struct pipe_screen *pscreen)
2622 {
2623 pscreen->query_dmabuf_modifiers = iris_query_dmabuf_modifiers;
2624 pscreen->is_dmabuf_modifier_supported = iris_is_dmabuf_modifier_supported;
2625 pscreen->get_dmabuf_modifier_planes = iris_get_dmabuf_modifier_planes;
2626 pscreen->resource_create_with_modifiers =
2627 iris_resource_create_with_modifiers;
2628 pscreen->resource_create = u_transfer_helper_resource_create;
2629 pscreen->resource_from_user_memory = iris_resource_from_user_memory;
2630 pscreen->resource_from_handle = iris_resource_from_handle;
2631 pscreen->resource_from_memobj = iris_resource_from_memobj_wrapper;
2632 pscreen->resource_get_handle = iris_resource_get_handle;
2633 pscreen->resource_get_param = iris_resource_get_param;
2634 pscreen->resource_destroy = u_transfer_helper_resource_destroy;
2635 pscreen->memobj_create_from_handle = iris_memobj_create_from_handle;
2636 pscreen->memobj_destroy = iris_memobj_destroy;
2637 pscreen->transfer_helper =
2638 u_transfer_helper_create(&transfer_vtbl, true, true, false, true, false);
2639 }
2640
2641 void
iris_init_resource_functions(struct pipe_context * ctx)2642 iris_init_resource_functions(struct pipe_context *ctx)
2643 {
2644 ctx->flush_resource = iris_flush_resource;
2645 ctx->invalidate_resource = iris_invalidate_resource;
2646 ctx->buffer_map = u_transfer_helper_transfer_map;
2647 ctx->texture_map = u_transfer_helper_transfer_map;
2648 ctx->transfer_flush_region = u_transfer_helper_transfer_flush_region;
2649 ctx->buffer_unmap = u_transfer_helper_transfer_unmap;
2650 ctx->texture_unmap = u_transfer_helper_transfer_unmap;
2651 ctx->buffer_subdata = u_default_buffer_subdata;
2652 ctx->texture_subdata = iris_texture_subdata;
2653 }
2654