1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_selftest.h"
7 
8 #include "gt/intel_gt.h"
9 
10 #include "selftests/igt_flush_test.h"
11 #include "selftests/mock_drm.h"
12 #include "huge_gem_object.h"
13 #include "mock_context.h"
14 
15 static int igt_client_fill(void *arg)
16 {
17 	struct drm_i915_private *i915 = arg;
18 	struct intel_context *ce = i915->engine[BCS0]->kernel_context;
19 	struct drm_i915_gem_object *obj;
20 	struct rnd_state prng;
21 	IGT_TIMEOUT(end);
22 	u32 *vaddr;
23 	int err = 0;
24 
25 	prandom_seed_state(&prng, i915_selftest.random_seed);
26 
27 	do {
28 		const u32 max_block_size = S16_MAX * PAGE_SIZE;
29 		u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
30 		u32 phys_sz = sz % (max_block_size + 1);
31 		u32 val = prandom_u32_state(&prng);
32 		u32 i;
33 
34 		sz = round_up(sz, PAGE_SIZE);
35 		phys_sz = round_up(phys_sz, PAGE_SIZE);
36 
37 		pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
38 			 phys_sz, sz, val);
39 
40 		obj = huge_gem_object(i915, phys_sz, sz);
41 		if (IS_ERR(obj)) {
42 			err = PTR_ERR(obj);
43 			goto err_flush;
44 		}
45 
46 		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
47 		if (IS_ERR(vaddr)) {
48 			err = PTR_ERR(vaddr);
49 			goto err_put;
50 		}
51 
52 		/*
53 		 * XXX: The goal is move this to get_pages, so try to dirty the
54 		 * CPU cache first to check that we do the required clflush
55 		 * before scheduling the blt for !llc platforms. This matches
56 		 * some version of reality where at get_pages the pages
57 		 * themselves may not yet be coherent with the GPU(swap-in). If
58 		 * we are missing the flush then we should see the stale cache
59 		 * values after we do the set_to_cpu_domain and pick it up as a
60 		 * test failure.
61 		 */
62 		memset32(vaddr, val ^ 0xdeadbeaf,
63 			 huge_gem_object_phys_size(obj) / sizeof(u32));
64 
65 		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
66 			obj->cache_dirty = true;
67 
68 		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
69 						       &obj->mm.page_sizes,
70 						       val);
71 		if (err)
72 			goto err_unpin;
73 
74 		i915_gem_object_lock(obj);
75 		err = i915_gem_object_set_to_cpu_domain(obj, false);
76 		i915_gem_object_unlock(obj);
77 		if (err)
78 			goto err_unpin;
79 
80 		for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
81 			if (vaddr[i] != val) {
82 				pr_err("vaddr[%u]=%x, expected=%x\n", i,
83 				       vaddr[i], val);
84 				err = -EINVAL;
85 				goto err_unpin;
86 			}
87 		}
88 
89 		i915_gem_object_unpin_map(obj);
90 		i915_gem_object_put(obj);
91 	} while (!time_after(jiffies, end));
92 
93 	goto err_flush;
94 
95 err_unpin:
96 	i915_gem_object_unpin_map(obj);
97 err_put:
98 	i915_gem_object_put(obj);
99 err_flush:
100 	if (err == -ENOMEM)
101 		err = 0;
102 
103 	return err;
104 }
105 
106 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
107 {
108 	static const struct i915_subtest tests[] = {
109 		SUBTEST(igt_client_fill),
110 	};
111 
112 	if (intel_gt_is_wedged(&i915->gt))
113 		return 0;
114 
115 	if (!HAS_ENGINE(i915, BCS0))
116 		return 0;
117 
118 	return i915_live_subtests(tests, i915);
119 }
120