1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  *
24  */
25 
26 /* This file implements randomized texture blit tests. */
27 
28 #include "si_pipe.h"
29 #include "util/rand_xor.h"
30 #include "util/u_surface.h"
31 
32 static uint64_t seed_xorshift128plus[2];
33 
34 #define RAND_NUM_SIZE 8
35 
36 /* The GPU blits are emulated on the CPU using these CPU textures. */
37 
38 struct cpu_texture {
39    uint8_t *ptr;
40    uint64_t size;
41    uint64_t layer_stride;
42    unsigned stride;
43 };
44 
alloc_cpu_texture(struct cpu_texture * tex,struct pipe_resource * templ)45 static void alloc_cpu_texture(struct cpu_texture *tex, struct pipe_resource *templ)
46 {
47    tex->stride = align(util_format_get_stride(templ->format, templ->width0), RAND_NUM_SIZE);
48    tex->layer_stride = (uint64_t)tex->stride * templ->height0;
49    tex->size = tex->layer_stride * templ->array_size;
50    tex->ptr = malloc(tex->size);
51    assert(tex->ptr);
52 }
53 
set_random_pixels(struct pipe_context * ctx,struct pipe_resource * tex,struct cpu_texture * cpu)54 static void set_random_pixels(struct pipe_context *ctx, struct pipe_resource *tex,
55                               struct cpu_texture *cpu)
56 {
57    struct pipe_transfer *t;
58    uint8_t *map;
59    int x, y, z;
60 
61    map = pipe_texture_map_3d(ctx, tex, 0, PIPE_MAP_WRITE, 0, 0, 0, tex->width0, tex->height0,
62                               tex->array_size, &t);
63    assert(map);
64 
65    for (z = 0; z < tex->array_size; z++) {
66       for (y = 0; y < tex->height0; y++) {
67          uint64_t *ptr = (uint64_t *)(map + t->layer_stride * z + t->stride * y);
68          uint64_t *ptr_cpu = (uint64_t *)(cpu->ptr + cpu->layer_stride * z + cpu->stride * y);
69          unsigned size = cpu->stride / RAND_NUM_SIZE;
70 
71          assert(t->stride % RAND_NUM_SIZE == 0);
72          assert(cpu->stride % RAND_NUM_SIZE == 0);
73 
74          for (x = 0; x < size; x++) {
75             *ptr++ = *ptr_cpu++ = rand_xorshift128plus(seed_xorshift128plus);
76          }
77       }
78    }
79 
80    pipe_texture_unmap(ctx, t);
81 }
82 
compare_textures(struct pipe_context * ctx,struct pipe_resource * tex,struct cpu_texture * cpu)83 static bool compare_textures(struct pipe_context *ctx, struct pipe_resource *tex,
84                              struct cpu_texture *cpu)
85 {
86    struct pipe_transfer *t;
87    uint8_t *map;
88    int y, z;
89    bool pass = true;
90    unsigned stride = util_format_get_stride(tex->format, tex->width0);
91 
92    map = pipe_texture_map_3d(ctx, tex, 0, PIPE_MAP_READ, 0, 0, 0, tex->width0, tex->height0,
93                               tex->array_size, &t);
94    assert(map);
95 
96    for (z = 0; z < tex->array_size; z++) {
97       for (y = 0; y < tex->height0; y++) {
98          uint8_t *ptr = map + t->layer_stride * z + t->stride * y;
99          uint8_t *cpu_ptr = cpu->ptr + cpu->layer_stride * z + cpu->stride * y;
100 
101          if (memcmp(ptr, cpu_ptr, stride)) {
102             pass = false;
103             goto done;
104          }
105       }
106    }
107 done:
108    pipe_texture_unmap(ctx, t);
109    return pass;
110 }
111 
choose_format()112 static enum pipe_format choose_format()
113 {
114    enum pipe_format formats[] = {
115       PIPE_FORMAT_R8_UINT,     PIPE_FORMAT_R16_UINT,          PIPE_FORMAT_R32_UINT,
116       PIPE_FORMAT_R32G32_UINT, PIPE_FORMAT_R32G32B32A32_UINT, PIPE_FORMAT_G8R8_B8R8_UNORM,
117    };
118    return formats[rand() % ARRAY_SIZE(formats)];
119 }
120 
array_mode_to_string(struct si_screen * sscreen,struct radeon_surf * surf)121 static const char *array_mode_to_string(struct si_screen *sscreen, struct radeon_surf *surf)
122 {
123    if (sscreen->info.chip_class >= GFX9) {
124       switch (surf->u.gfx9.swizzle_mode) {
125       case 0:
126          return "  LINEAR";
127       case 21:
128          return " 4KB_S_X";
129       case 22:
130          return " 4KB_D_X";
131       case 25:
132          return "64KB_S_X";
133       case 26:
134          return "64KB_D_X";
135       case 27:
136          return "64KB_R_X";
137       default:
138          printf("Unhandled swizzle mode = %u\n", surf->u.gfx9.swizzle_mode);
139          return " UNKNOWN";
140       }
141    } else {
142       switch (surf->u.legacy.level[0].mode) {
143       case RADEON_SURF_MODE_LINEAR_ALIGNED:
144          return "LINEAR_ALIGNED";
145       case RADEON_SURF_MODE_1D:
146          return "1D_TILED_THIN1";
147       case RADEON_SURF_MODE_2D:
148          return "2D_TILED_THIN1";
149       default:
150          assert(0);
151          return "       UNKNOWN";
152       }
153    }
154 }
155 
generate_max_tex_side(unsigned max_tex_side)156 static unsigned generate_max_tex_side(unsigned max_tex_side)
157 {
158    switch (rand() % 4) {
159    case 0:
160       /* Try to hit large sizes in 1/4 of the cases. */
161       return max_tex_side;
162    case 1:
163       /* Try to hit 1D tiling in 1/4 of the cases. */
164       return 128;
165    default:
166       /* Try to hit common sizes in 2/4 of the cases. */
167       return 2048;
168    }
169 }
170 
si_test_blit(struct si_screen * sscreen)171 void si_test_blit(struct si_screen *sscreen)
172 {
173    struct pipe_screen *screen = &sscreen->b;
174    struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
175    struct si_context *sctx = (struct si_context *)ctx;
176    uint64_t max_alloc_size;
177    unsigned i, iterations, num_partial_copies, max_tex_side;
178    unsigned num_pass = 0, num_fail = 0;
179 
180    max_tex_side = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_SIZE);
181 
182    /* Max 128 MB allowed for both textures. */
183    max_alloc_size = 128 * 1024 * 1024;
184 
185    /* the seed for random test parameters */
186    srand(0x9b47d95b);
187    /* the seed for random pixel data */
188    s_rand_xorshift128plus(seed_xorshift128plus, false);
189 
190    iterations = 1000000000; /* just kill it when you are bored */
191    num_partial_copies = 30;
192 
193    /* These parameters are randomly generated per test:
194     * - whether to do one whole-surface copy or N partial copies per test
195     * - which tiling modes to use (LINEAR_ALIGNED, 1D, 2D)
196     * - which texture dimensions to use
197     * - whether to use VRAM (all tiling modes) and GTT (staging, linear
198     *   only) allocations
199     * - random initial pixels in src
200     * - generate random subrectangle copies for partial blits
201     */
202    for (i = 0; i < iterations; i++) {
203       struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
204       struct si_texture *sdst;
205       struct si_texture *ssrc;
206       struct cpu_texture src_cpu, dst_cpu;
207       unsigned max_width, max_height, max_depth, j, num;
208       unsigned gfx_blits = 0, cs_blits = 0, max_tex_side_gen;
209       unsigned max_tex_layers;
210       bool pass;
211       bool do_partial_copies = rand() & 1;
212 
213       /* generate a random test case */
214       tsrc.target = tdst.target = PIPE_TEXTURE_2D_ARRAY;
215       tsrc.depth0 = tdst.depth0 = 1;
216 
217       tsrc.format = tdst.format = choose_format();
218 
219       max_tex_side_gen = generate_max_tex_side(max_tex_side);
220       max_tex_layers = rand() % 4 ? 1 : 5;
221 
222       tsrc.width0 = (rand() % max_tex_side_gen) + 1;
223       tsrc.height0 = (rand() % max_tex_side_gen) + 1;
224       tsrc.array_size = (rand() % max_tex_layers) + 1;
225 
226       if (tsrc.format == PIPE_FORMAT_G8R8_B8R8_UNORM)
227          tsrc.width0 = align(tsrc.width0, 2);
228 
229       /* Have a 1/4 chance of getting power-of-two dimensions. */
230       if (rand() % 4 == 0) {
231          tsrc.width0 = util_next_power_of_two(tsrc.width0);
232          tsrc.height0 = util_next_power_of_two(tsrc.height0);
233       }
234 
235       if (!do_partial_copies) {
236          /* whole-surface copies only, same dimensions */
237          tdst = tsrc;
238       } else {
239          max_tex_side_gen = generate_max_tex_side(max_tex_side);
240          max_tex_layers = rand() % 4 ? 1 : 5;
241 
242          /* many partial copies, dimensions can be different */
243          tdst.width0 = (rand() % max_tex_side_gen) + 1;
244          tdst.height0 = (rand() % max_tex_side_gen) + 1;
245          tdst.array_size = (rand() % max_tex_layers) + 1;
246 
247          /* Have a 1/4 chance of getting power-of-two dimensions. */
248          if (rand() % 4 == 0) {
249             tdst.width0 = util_next_power_of_two(tdst.width0);
250             tdst.height0 = util_next_power_of_two(tdst.height0);
251          }
252       }
253 
254       /* check texture sizes */
255       if ((uint64_t)util_format_get_nblocks(tsrc.format, tsrc.width0, tsrc.height0) *
256                 tsrc.array_size * util_format_get_blocksize(tsrc.format) +
257              (uint64_t)util_format_get_nblocks(tdst.format, tdst.width0, tdst.height0) *
258                 tdst.array_size * util_format_get_blocksize(tdst.format) >
259           max_alloc_size) {
260          /* too large, try again */
261          i--;
262          continue;
263       }
264 
265       /* VRAM + the tiling mode depends on dimensions (3/4 of cases),
266        * or GTT + linear only (1/4 of cases)
267        */
268       tsrc.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
269       tdst.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
270 
271       /* Allocate textures (both the GPU and CPU copies).
272        * The CPU will emulate what the GPU should be doing.
273        */
274       src = screen->resource_create(screen, &tsrc);
275       dst = screen->resource_create(screen, &tdst);
276       assert(src);
277       assert(dst);
278       sdst = (struct si_texture *)dst;
279       ssrc = (struct si_texture *)src;
280       alloc_cpu_texture(&src_cpu, &tsrc);
281       alloc_cpu_texture(&dst_cpu, &tdst);
282 
283       printf("%4u: dst = (%5u x %5u x %u, %s), "
284              " src = (%5u x %5u x %u, %s), format = %s, ",
285              i, tdst.width0, tdst.height0, tdst.array_size,
286              array_mode_to_string(sscreen, &sdst->surface), tsrc.width0, tsrc.height0,
287              tsrc.array_size, array_mode_to_string(sscreen, &ssrc->surface),
288              util_format_description(tsrc.format)->name);
289       fflush(stdout);
290 
291       /* set src pixels */
292       set_random_pixels(ctx, src, &src_cpu);
293 
294       /* clear dst pixels */
295       uint32_t zero = 0;
296       si_clear_buffer(sctx, dst, 0, sdst->surface.surf_size, &zero, 4, SI_OP_SYNC_BEFORE_AFTER,
297                       SI_COHERENCY_SHADER, SI_AUTO_SELECT_CLEAR_METHOD);
298       memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
299 
300       /* preparation */
301       max_width = MIN2(tsrc.width0, tdst.width0);
302       max_height = MIN2(tsrc.height0, tdst.height0);
303       max_depth = MIN2(tsrc.array_size, tdst.array_size);
304 
305       num = do_partial_copies ? num_partial_copies : 1;
306       for (j = 0; j < num; j++) {
307          int width, height, depth;
308          int srcx, srcy, srcz, dstx, dsty, dstz;
309          struct pipe_box box;
310          unsigned old_num_draw_calls = sctx->num_draw_calls;
311          unsigned old_num_cs_calls = sctx->num_compute_calls;
312 
313          if (!do_partial_copies) {
314             /* copy whole src to dst */
315             width = max_width;
316             height = max_height;
317             depth = max_depth;
318 
319             srcx = srcy = srcz = dstx = dsty = dstz = 0;
320          } else {
321             /* random sub-rectangle copies from src to dst */
322             depth = (rand() % max_depth) + 1;
323             srcz = rand() % (tsrc.array_size - depth + 1);
324             dstz = rand() % (tdst.array_size - depth + 1);
325 
326             /* special code path to hit the tiled partial copies */
327             if (!ssrc->surface.is_linear && !sdst->surface.is_linear && rand() & 1) {
328                if (max_width < 8 || max_height < 8)
329                   continue;
330                width = ((rand() % (max_width / 8)) + 1) * 8;
331                height = ((rand() % (max_height / 8)) + 1) * 8;
332 
333                srcx = rand() % (tsrc.width0 - width + 1) & ~0x7;
334                srcy = rand() % (tsrc.height0 - height + 1) & ~0x7;
335 
336                dstx = rand() % (tdst.width0 - width + 1) & ~0x7;
337                dsty = rand() % (tdst.height0 - height + 1) & ~0x7;
338             } else {
339                /* just make sure that it doesn't divide by zero */
340                assert(max_width > 0 && max_height > 0);
341 
342                width = (rand() % max_width) + 1;
343                height = (rand() % max_height) + 1;
344 
345                srcx = rand() % (tsrc.width0 - width + 1);
346                srcy = rand() % (tsrc.height0 - height + 1);
347 
348                dstx = rand() % (tdst.width0 - width + 1);
349                dsty = rand() % (tdst.height0 - height + 1);
350             }
351 
352             /* special code path to hit out-of-bounds reads in L2T */
353             if (ssrc->surface.is_linear && !sdst->surface.is_linear && rand() % 4 == 0) {
354                srcx = 0;
355                srcy = 0;
356                srcz = 0;
357             }
358          }
359 
360          /* GPU copy */
361          u_box_3d(srcx, srcy, srcz, width, height, depth, &box);
362          si_resource_copy_region(ctx, dst, 0, dstx, dsty, dstz, src, 0, &box);
363 
364          /* See which engine was used. */
365          gfx_blits += sctx->num_draw_calls > old_num_draw_calls;
366          cs_blits += sctx->num_compute_calls > old_num_cs_calls;
367 
368          /* CPU copy */
369          util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride, dst_cpu.layer_stride, dstx, dsty,
370                        dstz, width, height, depth, src_cpu.ptr, src_cpu.stride,
371                        src_cpu.layer_stride, srcx, srcy, srcz);
372       }
373 
374       pass = compare_textures(ctx, dst, &dst_cpu);
375       if (pass)
376          num_pass++;
377       else
378          num_fail++;
379 
380       printf("BLITs: GFX = %2u, CS = %2u, %s [%u/%u]\n", gfx_blits, cs_blits,
381              pass ? "pass" : "fail", num_pass, num_pass + num_fail);
382 
383       /* cleanup */
384       pipe_resource_reference(&src, NULL);
385       pipe_resource_reference(&dst, NULL);
386       free(src_cpu.ptr);
387       free(dst_cpu.ptr);
388    }
389 
390    ctx->destroy(ctx);
391    exit(0);
392 }
393