1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "i915_selftest.h" 7 8 #include "gt/intel_engine_user.h" 9 #include "gt/intel_gt.h" 10 11 #include "selftests/igt_flush_test.h" 12 #include "selftests/mock_drm.h" 13 #include "huge_gem_object.h" 14 #include "mock_context.h" 15 16 static int __igt_client_fill(struct intel_engine_cs *engine) 17 { 18 struct intel_context *ce = engine->kernel_context; 19 struct drm_i915_gem_object *obj; 20 struct rnd_state prng; 21 IGT_TIMEOUT(end); 22 u32 *vaddr; 23 int err = 0; 24 25 prandom_seed_state(&prng, i915_selftest.random_seed); 26 27 intel_engine_pm_get(engine); 28 do { 29 const u32 max_block_size = S16_MAX * PAGE_SIZE; 30 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); 31 u32 phys_sz = sz % (max_block_size + 1); 32 u32 val = prandom_u32_state(&prng); 33 u32 i; 34 35 sz = round_up(sz, PAGE_SIZE); 36 phys_sz = round_up(phys_sz, PAGE_SIZE); 37 38 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, 39 phys_sz, sz, val); 40 41 obj = huge_gem_object(engine->i915, phys_sz, sz); 42 if (IS_ERR(obj)) { 43 err = PTR_ERR(obj); 44 goto err_flush; 45 } 46 47 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 48 if (IS_ERR(vaddr)) { 49 err = PTR_ERR(vaddr); 50 goto err_put; 51 } 52 53 /* 54 * XXX: The goal is move this to get_pages, so try to dirty the 55 * CPU cache first to check that we do the required clflush 56 * before scheduling the blt for !llc platforms. This matches 57 * some version of reality where at get_pages the pages 58 * themselves may not yet be coherent with the GPU(swap-in). If 59 * we are missing the flush then we should see the stale cache 60 * values after we do the set_to_cpu_domain and pick it up as a 61 * test failure. 62 */ 63 memset32(vaddr, val ^ 0xdeadbeaf, 64 huge_gem_object_phys_size(obj) / sizeof(u32)); 65 66 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) 67 obj->cache_dirty = true; 68 69 err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages, 70 &obj->mm.page_sizes, 71 val); 72 if (err) 73 goto err_unpin; 74 75 i915_gem_object_lock(obj); 76 err = i915_gem_object_set_to_cpu_domain(obj, false); 77 i915_gem_object_unlock(obj); 78 if (err) 79 goto err_unpin; 80 81 for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) { 82 if (vaddr[i] != val) { 83 pr_err("vaddr[%u]=%x, expected=%x\n", i, 84 vaddr[i], val); 85 err = -EINVAL; 86 goto err_unpin; 87 } 88 } 89 90 i915_gem_object_unpin_map(obj); 91 i915_gem_object_put(obj); 92 } while (!time_after(jiffies, end)); 93 94 goto err_flush; 95 96 err_unpin: 97 i915_gem_object_unpin_map(obj); 98 err_put: 99 i915_gem_object_put(obj); 100 err_flush: 101 if (err == -ENOMEM) 102 err = 0; 103 intel_engine_pm_put(engine); 104 105 return err; 106 } 107 108 static int igt_client_fill(void *arg) 109 { 110 int inst = 0; 111 112 do { 113 struct intel_engine_cs *engine; 114 int err; 115 116 engine = intel_engine_lookup_user(arg, 117 I915_ENGINE_CLASS_COPY, 118 inst++); 119 if (!engine) 120 return 0; 121 122 err = __igt_client_fill(engine); 123 if (err == -ENOMEM) 124 err = 0; 125 if (err) 126 return err; 127 } while (1); 128 } 129 130 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) 131 { 132 static const struct i915_subtest tests[] = { 133 SUBTEST(igt_client_fill), 134 }; 135 136 if (intel_gt_is_wedged(&i915->gt)) 137 return 0; 138 139 if (!HAS_ENGINE(i915, BCS0)) 140 return 0; 141 142 return i915_live_subtests(tests, i915); 143 } 144