1 /*
2  * Copyright (c) 2012-2015 Etnaviv Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Wladimir J. van der Laan <laanwj@gmail.com>
25  */
26 
27 #include "etnaviv_transfer.h"
28 #include "etnaviv_clear_blit.h"
29 #include "etnaviv_context.h"
30 #include "etnaviv_debug.h"
31 #include "etnaviv_etc2.h"
32 #include "etnaviv_screen.h"
33 
34 #include "pipe/p_defines.h"
35 #include "pipe/p_format.h"
36 #include "pipe/p_screen.h"
37 #include "pipe/p_state.h"
38 #include "util/format/u_format.h"
39 #include "util/u_inlines.h"
40 #include "util/u_memory.h"
41 #include "util/u_surface.h"
42 #include "util/u_transfer.h"
43 
44 #include "hw/common_3d.xml.h"
45 
46 #include "drm-uapi/drm_fourcc.h"
47 
48 /* Compute offset into a 1D/2D/3D buffer of a certain box.
49  * This box must be aligned to the block width and height of the
50  * underlying format. */
51 static inline size_t
etna_compute_offset(enum pipe_format format,const struct pipe_box * box,size_t stride,size_t layer_stride)52 etna_compute_offset(enum pipe_format format, const struct pipe_box *box,
53                     size_t stride, size_t layer_stride)
54 {
55    return box->z * layer_stride +
56           box->y / util_format_get_blockheight(format) * stride +
57           box->x / util_format_get_blockwidth(format) *
58              util_format_get_blocksize(format);
59 }
60 
etna_patch_data(void * buffer,const struct pipe_transfer * ptrans)61 static void etna_patch_data(void *buffer, const struct pipe_transfer *ptrans)
62 {
63    struct pipe_resource *prsc = ptrans->resource;
64    struct etna_resource *rsc = etna_resource(prsc);
65    struct etna_resource_level *level = &rsc->levels[ptrans->level];
66 
67    if (likely(!etna_etc2_needs_patching(prsc)))
68       return;
69 
70    if (level->patched)
71       return;
72 
73    /* do have the offsets of blocks to patch? */
74    if (!level->patch_offsets) {
75       level->patch_offsets = CALLOC_STRUCT(util_dynarray);
76 
77       etna_etc2_calculate_blocks(buffer, ptrans->stride,
78                                          ptrans->box.width, ptrans->box.height,
79                                          prsc->format, level->patch_offsets);
80    }
81 
82    etna_etc2_patch(buffer, level->patch_offsets);
83 
84    level->patched = true;
85 }
86 
etna_unpatch_data(void * buffer,const struct pipe_transfer * ptrans)87 static void etna_unpatch_data(void *buffer, const struct pipe_transfer *ptrans)
88 {
89    struct pipe_resource *prsc = ptrans->resource;
90    struct etna_resource *rsc = etna_resource(prsc);
91    struct etna_resource_level *level = &rsc->levels[ptrans->level];
92 
93    if (!level->patched)
94       return;
95 
96    etna_etc2_patch(buffer, level->patch_offsets);
97 
98    level->patched = false;
99 }
100 
101 static void
etna_transfer_unmap(struct pipe_context * pctx,struct pipe_transfer * ptrans)102 etna_transfer_unmap(struct pipe_context *pctx, struct pipe_transfer *ptrans)
103 {
104    struct etna_context *ctx = etna_context(pctx);
105    struct etna_transfer *trans = etna_transfer(ptrans);
106    struct etna_resource *rsc = etna_resource(ptrans->resource);
107 
108    /* XXX
109     * When writing to a resource that is already in use, replace the resource
110     * with a completely new buffer
111     * and free the old one using a fenced free.
112     * The most tricky case to implement will be: tiled or supertiled surface,
113     * partial write, target not aligned to 4/64. */
114    assert(ptrans->level <= rsc->base.last_level);
115 
116    if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture)))
117       rsc = etna_resource(rsc->texture); /* switch to using the texture resource */
118 
119    /*
120     * Temporary resources are always pulled into the CPU domain, must push them
121     * back into GPU domain before the RS execs the blit to the base resource.
122     */
123    if (trans->rsc)
124       etna_bo_cpu_fini(etna_resource(trans->rsc)->bo);
125 
126    if (ptrans->usage & PIPE_MAP_WRITE) {
127       if (trans->rsc) {
128          /* We have a temporary resource due to either tile status or
129           * tiling format. Write back the updated buffer contents.
130           * FIXME: we need to invalidate the tile status. */
131          etna_copy_resource_box(pctx, ptrans->resource, trans->rsc, ptrans->level, &ptrans->box);
132       } else if (trans->staging) {
133          /* map buffer object */
134          struct etna_resource_level *res_level = &rsc->levels[ptrans->level];
135 
136          if (rsc->layout == ETNA_LAYOUT_TILED) {
137             for (unsigned z = 0; z < ptrans->box.depth; z++) {
138                etna_texture_tile(
139                   trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
140                   trans->staging + z * ptrans->layer_stride,
141                   ptrans->box.x, ptrans->box.y,
142                   res_level->stride, ptrans->box.width, ptrans->box.height,
143                   ptrans->stride, util_format_get_blocksize(rsc->base.format));
144             }
145          } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
146             util_copy_box(trans->mapped, rsc->base.format, res_level->stride,
147                           res_level->layer_stride, ptrans->box.x,
148                           ptrans->box.y, ptrans->box.z, ptrans->box.width,
149                           ptrans->box.height, ptrans->box.depth,
150                           trans->staging, ptrans->stride,
151                           ptrans->layer_stride, 0, 0, 0 /* src x,y,z */);
152          } else {
153             BUG("unsupported tiling %i", rsc->layout);
154          }
155 
156          FREE(trans->staging);
157       }
158 
159       rsc->seqno++;
160 
161       if (rsc->base.bind & PIPE_BIND_SAMPLER_VIEW) {
162          ctx->dirty |= ETNA_DIRTY_TEXTURE_CACHES;
163       }
164    }
165 
166    /* We need to have the patched data ready for the GPU. */
167    etna_patch_data(trans->mapped, ptrans);
168 
169    /*
170     * Transfers without a temporary are only pulled into the CPU domain if they
171     * are not mapped unsynchronized. If they are, must push them back into GPU
172     * domain after CPU access is finished.
173     */
174    if (!trans->rsc && !(ptrans->usage & PIPE_MAP_UNSYNCHRONIZED))
175       etna_bo_cpu_fini(rsc->bo);
176 
177    if ((ptrans->resource->target == PIPE_BUFFER) &&
178        (ptrans->usage & PIPE_MAP_WRITE)) {
179       util_range_add(&rsc->base,
180                      &rsc->valid_buffer_range,
181                      ptrans->box.x,
182                      ptrans->box.x + ptrans->box.width);
183       }
184 
185    pipe_resource_reference(&trans->rsc, NULL);
186    pipe_resource_reference(&ptrans->resource, NULL);
187    slab_free(&ctx->transfer_pool, trans);
188 }
189 
190 static void *
etna_transfer_map(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** out_transfer)191 etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
192                   unsigned level,
193                   unsigned usage,
194                   const struct pipe_box *box,
195                   struct pipe_transfer **out_transfer)
196 {
197    struct etna_context *ctx = etna_context(pctx);
198    struct etna_screen *screen = ctx->screen;
199    struct etna_resource *rsc = etna_resource(prsc);
200    struct etna_transfer *trans;
201    struct pipe_transfer *ptrans;
202    enum pipe_format format = prsc->format;
203 
204    trans = slab_alloc(&ctx->transfer_pool);
205    if (!trans)
206       return NULL;
207 
208    /* slab_alloc() doesn't zero */
209    memset(trans, 0, sizeof(*trans));
210 
211    /*
212     * Upgrade to UNSYNCHRONIZED if target is PIPE_BUFFER and range is uninitialized.
213     */
214    if ((usage & PIPE_MAP_WRITE) &&
215        (prsc->target == PIPE_BUFFER) &&
216        !util_ranges_intersect(&rsc->valid_buffer_range,
217                               box->x,
218                               box->x + box->width)) {
219       usage |= PIPE_MAP_UNSYNCHRONIZED;
220    }
221 
222    /* Upgrade DISCARD_RANGE to WHOLE_RESOURCE if the whole resource is
223     * being mapped. If we add buffer reallocation to avoid CPU/GPU sync this
224     * check needs to be extended to coherent mappings and shared resources.
225     */
226    if ((usage & PIPE_MAP_DISCARD_RANGE) &&
227        !(usage & PIPE_MAP_UNSYNCHRONIZED) &&
228        prsc->last_level == 0 &&
229        prsc->width0 == box->width &&
230        prsc->height0 == box->height &&
231        prsc->depth0 == box->depth &&
232        prsc->array_size == 1) {
233       usage |= PIPE_MAP_DISCARD_WHOLE_RESOURCE;
234    }
235 
236    ptrans = &trans->base;
237    pipe_resource_reference(&ptrans->resource, prsc);
238    ptrans->level = level;
239    ptrans->usage = usage;
240    ptrans->box = *box;
241 
242    assert(level <= prsc->last_level);
243 
244    /* This one is a little tricky: if we have a separate render resource, which
245     * is newer than the base resource we want the transfer to target this one,
246     * to get the most up-to-date content, but only if we don't have a texture
247     * target of the same age, as transfering in/out of the texture target is
248     * generally preferred for the reasons listed below */
249    if (rsc->render && etna_resource_newer(etna_resource(rsc->render), rsc) &&
250        (!rsc->texture || etna_resource_newer(etna_resource(rsc->render),
251                                              etna_resource(rsc->texture)))) {
252       rsc = etna_resource(rsc->render);
253    }
254 
255    if (rsc->texture && !etna_resource_newer(rsc, etna_resource(rsc->texture))) {
256       /* We have a texture resource which is the same age or newer than the
257        * render resource. Use the texture resource, which avoids bouncing
258        * pixels between the two resources, and we can de-tile it in s/w. */
259       rsc = etna_resource(rsc->texture);
260    } else if (rsc->ts_bo ||
261               (rsc->layout != ETNA_LAYOUT_LINEAR &&
262                etna_resource_hw_tileable(screen->specs.use_blt, prsc) &&
263                /* HALIGN 4 resources are incompatible with the resolve engine,
264                 * so fall back to using software to detile this resource. */
265                rsc->halign != TEXTURE_HALIGN_FOUR)) {
266       /* If the surface has tile status, we need to resolve it first.
267        * The strategy we implement here is to use the RS to copy the
268        * depth buffer, filling in the "holes" where the tile status
269        * indicates that it's clear. We also do this for tiled
270        * resources, but only if the RS can blit them. */
271       if (usage & PIPE_MAP_DIRECTLY) {
272          slab_free(&ctx->transfer_pool, trans);
273          BUG("unsupported map flags %#x with tile status/tiled layout", usage);
274          return NULL;
275       }
276 
277       if (prsc->depth0 > 1 && rsc->ts_bo) {
278          slab_free(&ctx->transfer_pool, trans);
279          BUG("resource has depth >1 with tile status");
280          return NULL;
281       }
282 
283       struct pipe_resource templ = *prsc;
284       templ.nr_samples = 0;
285       templ.bind = PIPE_BIND_RENDER_TARGET;
286 
287       trans->rsc = etna_resource_alloc(pctx->screen, ETNA_LAYOUT_LINEAR,
288                                        DRM_FORMAT_MOD_LINEAR, &templ);
289       if (!trans->rsc) {
290          slab_free(&ctx->transfer_pool, trans);
291          return NULL;
292       }
293 
294       if (!screen->specs.use_blt) {
295          /* Need to align the transfer region to satisfy RS restrictions, as we
296           * really want to hit the RS blit path here.
297           */
298          unsigned w_align, h_align;
299 
300          if (rsc->layout & ETNA_LAYOUT_BIT_SUPER) {
301             w_align = 64;
302             h_align = 64 * ctx->screen->specs.pixel_pipes;
303          } else {
304             w_align = ETNA_RS_WIDTH_MASK + 1;
305             h_align = ETNA_RS_HEIGHT_MASK + 1;
306          }
307 
308          ptrans->box.width += ptrans->box.x & (w_align - 1);
309          ptrans->box.x = ptrans->box.x & ~(w_align - 1);
310          ptrans->box.width = align(ptrans->box.width, (ETNA_RS_WIDTH_MASK + 1));
311          ptrans->box.height += ptrans->box.y & (h_align - 1);
312          ptrans->box.y = ptrans->box.y & ~(h_align - 1);
313          ptrans->box.height = align(ptrans->box.height, ETNA_RS_HEIGHT_MASK + 1);
314       }
315 
316       if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE))
317          etna_copy_resource_box(pctx, trans->rsc, &rsc->base, level, &ptrans->box);
318 
319       /* Switch to using the temporary resource instead */
320       rsc = etna_resource(trans->rsc);
321    }
322 
323    struct etna_resource_level *res_level = &rsc->levels[level];
324 
325    /* XXX we don't handle PIPE_MAP_FLUSH_EXPLICIT; this flag can be ignored
326     * when mapping in-place,
327     * but when not in place we need to fire off the copy operation in
328     * transfer_flush_region (currently
329     * a no-op) instead of unmap. Need to handle this to support
330     * ARB_map_buffer_range extension at least.
331     */
332    /* XXX we don't take care of current operations on the resource; which can
333       be, at some point in the pipeline
334       which is not yet executed:
335 
336       - bound as surface
337       - bound through vertex buffer
338       - bound through index buffer
339       - bound in sampler view
340       - used in clear_render_target / clear_depth_stencil operation
341       - used in blit
342       - used in resource_copy_region
343 
344       How do other drivers record this information over course of the rendering
345       pipeline?
346       Is it necessary at all? Only in case we want to provide a fast path and
347       map the resource directly
348       (and for PIPE_MAP_DIRECTLY) and we don't want to force a sync.
349       We also need to know whether the resource is in use to determine if a sync
350       is needed (or just do it
351       always, but that comes at the expense of performance).
352 
353       A conservative approximation without too much overhead would be to mark
354       all resources that have
355       been bound at some point as busy. A drawback would be that accessing
356       resources that have
357       been bound but are no longer in use for a while still carry a performance
358       penalty. On the other hand,
359       the program could be using PIPE_MAP_DISCARD_WHOLE_RESOURCE or
360       PIPE_MAP_UNSYNCHRONIZED to
361       avoid this in the first place...
362 
363       A) We use an in-pipe copy engine, and queue the copy operation after unmap
364       so that the copy
365          will be performed when all current commands have been executed.
366          Using the RS is possible, not sure if always efficient. This can also
367       do any kind of tiling for us.
368          Only possible when PIPE_MAP_DISCARD_RANGE is set.
369       B) We discard the entire resource (or at least, the mipmap level) and
370       allocate new memory for it.
371          Only possible when mapping the entire resource or
372       PIPE_MAP_DISCARD_WHOLE_RESOURCE is set.
373     */
374 
375    /*
376     * Pull resources into the CPU domain. Only skipped for unsynchronized
377     * transfers without a temporary resource.
378     */
379    if (trans->rsc || !(usage & PIPE_MAP_UNSYNCHRONIZED)) {
380       uint32_t prep_flags = 0;
381 
382       /*
383        * Always flush if we have the temporary resource and have a copy to this
384        * outstanding. Otherwise infer flush requirement from resource access and
385        * current GPU usage (reads must wait for GPU writes, writes must have
386        * exclusive access to the buffer).
387        */
388       mtx_lock(&ctx->lock);
389 
390       if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
391           (!trans->rsc &&
392            (((usage & PIPE_MAP_READ) && (rsc->status & ETNA_PENDING_WRITE)) ||
393            ((usage & PIPE_MAP_WRITE) && rsc->status)))) {
394          mtx_lock(&rsc->lock);
395          set_foreach(rsc->pending_ctx, entry) {
396             struct etna_context *pend_ctx = (struct etna_context *)entry->key;
397             struct pipe_context *pend_pctx = &pend_ctx->base;
398 
399             pend_pctx->flush(pend_pctx, NULL, 0);
400          }
401          mtx_unlock(&rsc->lock);
402       }
403 
404       mtx_unlock(&ctx->lock);
405 
406       if (usage & PIPE_MAP_READ)
407          prep_flags |= DRM_ETNA_PREP_READ;
408       if (usage & PIPE_MAP_WRITE)
409          prep_flags |= DRM_ETNA_PREP_WRITE;
410 
411       /*
412        * The ETC2 patching operates in-place on the resource, so the resource will
413        * get written even on read-only transfers. This blocks the GPU to sample
414        * from this resource.
415        */
416       if ((usage & PIPE_MAP_READ) && etna_etc2_needs_patching(prsc))
417          prep_flags |= DRM_ETNA_PREP_WRITE;
418 
419       if (etna_bo_cpu_prep(rsc->bo, prep_flags))
420          goto fail_prep;
421    }
422 
423    /* map buffer object */
424    trans->mapped = etna_bo_map(rsc->bo);
425    if (!trans->mapped)
426       goto fail;
427 
428    *out_transfer = ptrans;
429 
430    if (rsc->layout == ETNA_LAYOUT_LINEAR) {
431       ptrans->stride = res_level->stride;
432       ptrans->layer_stride = res_level->layer_stride;
433 
434       trans->mapped += res_level->offset +
435              etna_compute_offset(prsc->format, box, res_level->stride,
436                                  res_level->layer_stride);
437 
438       /* We need to have the unpatched data ready for the gfx stack. */
439       if (usage & PIPE_MAP_READ)
440          etna_unpatch_data(trans->mapped, ptrans);
441 
442       return trans->mapped;
443    } else {
444       unsigned divSizeX = util_format_get_blockwidth(format);
445       unsigned divSizeY = util_format_get_blockheight(format);
446 
447       /* No direct mappings of tiled, since we need to manually
448        * tile/untile.
449        */
450       if (usage & PIPE_MAP_DIRECTLY)
451          goto fail;
452 
453       trans->mapped += res_level->offset;
454       ptrans->stride = align(box->width, divSizeX) * util_format_get_blocksize(format); /* row stride in bytes */
455       ptrans->layer_stride = align(box->height, divSizeY) * ptrans->stride;
456       size_t size = ptrans->layer_stride * box->depth;
457 
458       trans->staging = MALLOC(size);
459       if (!trans->staging)
460          goto fail;
461 
462       if (usage & PIPE_MAP_READ) {
463          if (rsc->layout == ETNA_LAYOUT_TILED) {
464             for (unsigned z = 0; z < ptrans->box.depth; z++) {
465                etna_texture_untile(trans->staging + z * ptrans->layer_stride,
466                                    trans->mapped + (ptrans->box.z + z) * res_level->layer_stride,
467                                    ptrans->box.x, ptrans->box.y, res_level->stride,
468                                    ptrans->box.width, ptrans->box.height, ptrans->stride,
469                                    util_format_get_blocksize(rsc->base.format));
470             }
471          } else if (rsc->layout == ETNA_LAYOUT_LINEAR) {
472             util_copy_box(trans->staging, rsc->base.format, ptrans->stride,
473                           ptrans->layer_stride, 0, 0, 0, /* dst x,y,z */
474                           ptrans->box.width, ptrans->box.height,
475                           ptrans->box.depth, trans->mapped, res_level->stride,
476                           res_level->layer_stride, ptrans->box.x,
477                           ptrans->box.y, ptrans->box.z);
478          } else {
479             /* TODO supertiling */
480             BUG("unsupported tiling %i for reading", rsc->layout);
481          }
482       }
483 
484       return trans->staging;
485    }
486 
487 fail:
488    etna_bo_cpu_fini(rsc->bo);
489 fail_prep:
490    etna_transfer_unmap(pctx, ptrans);
491    return NULL;
492 }
493 
494 static void
etna_transfer_flush_region(struct pipe_context * pctx,struct pipe_transfer * ptrans,const struct pipe_box * box)495 etna_transfer_flush_region(struct pipe_context *pctx,
496                            struct pipe_transfer *ptrans,
497                            const struct pipe_box *box)
498 {
499    struct etna_resource *rsc = etna_resource(ptrans->resource);
500 
501    if (ptrans->resource->target == PIPE_BUFFER)
502       util_range_add(&rsc->base,
503                      &rsc->valid_buffer_range,
504                      ptrans->box.x + box->x,
505                      ptrans->box.x + box->x + box->width);
506 }
507 
508 void
etna_transfer_init(struct pipe_context * pctx)509 etna_transfer_init(struct pipe_context *pctx)
510 {
511    pctx->buffer_map = etna_transfer_map;
512    pctx->texture_map = etna_transfer_map;
513    pctx->transfer_flush_region = etna_transfer_flush_region;
514    pctx->buffer_unmap = etna_transfer_unmap;
515    pctx->texture_unmap = etna_transfer_unmap;
516    pctx->buffer_subdata = u_default_buffer_subdata;
517    pctx->texture_subdata = u_default_texture_subdata;
518 }
519