1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6 
7 #include <drm/drm_cache.h>
8 
9 #include "display/intel_frontbuffer.h"
10 
11 #include "i915_drv.h"
12 #include "i915_gem_clflush.h"
13 #include "i915_sw_fence_work.h"
14 #include "i915_trace.h"
15 
16 struct clflush {
17 	struct dma_fence_work base;
18 	struct drm_i915_gem_object *obj;
19 };
20 
21 static void __do_clflush(struct drm_i915_gem_object *obj)
22 {
23 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
24 	drm_clflush_sg(obj->mm.pages);
25 
26 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
27 }
28 
29 static void clflush_work(struct dma_fence_work *base)
30 {
31 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
32 
33 	__do_clflush(clflush->obj);
34 }
35 
36 static void clflush_release(struct dma_fence_work *base)
37 {
38 	struct clflush *clflush = container_of(base, typeof(*clflush), base);
39 
40 	i915_gem_object_unpin_pages(clflush->obj);
41 	i915_gem_object_put(clflush->obj);
42 }
43 
44 static const struct dma_fence_work_ops clflush_ops = {
45 	.name = "clflush",
46 	.work = clflush_work,
47 	.release = clflush_release,
48 };
49 
50 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
51 {
52 	struct clflush *clflush;
53 
54 	GEM_BUG_ON(!obj->cache_dirty);
55 
56 	clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
57 	if (!clflush)
58 		return NULL;
59 
60 	if (__i915_gem_object_get_pages(obj) < 0) {
61 		kfree(clflush);
62 		return NULL;
63 	}
64 
65 	dma_fence_work_init(&clflush->base, &clflush_ops);
66 	clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
67 
68 	return clflush;
69 }
70 
71 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
72 			     unsigned int flags)
73 {
74 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
75 	struct clflush *clflush;
76 
77 	assert_object_held(obj);
78 
79 	if (IS_DGFX(i915)) {
80 		WARN_ON_ONCE(obj->cache_dirty);
81 		return false;
82 	}
83 
84 	/*
85 	 * Stolen memory is always coherent with the GPU as it is explicitly
86 	 * marked as wc by the system, or the system is cache-coherent.
87 	 * Similarly, we only access struct pages through the CPU cache, so
88 	 * anything not backed by physical memory we consider to be always
89 	 * coherent and not need clflushing.
90 	 */
91 	if (!i915_gem_object_has_struct_page(obj)) {
92 		obj->cache_dirty = false;
93 		return false;
94 	}
95 
96 	/* If the GPU is snooping the contents of the CPU cache,
97 	 * we do not need to manually clear the CPU cache lines.  However,
98 	 * the caches are only snooped when the render cache is
99 	 * flushed/invalidated.  As we always have to emit invalidations
100 	 * and flushes when moving into and out of the RENDER domain, correct
101 	 * snooping behaviour occurs naturally as the result of our domain
102 	 * tracking.
103 	 */
104 	if (!(flags & I915_CLFLUSH_FORCE) &&
105 	    obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
106 		return false;
107 
108 	trace_i915_gem_object_clflush(obj);
109 
110 	clflush = NULL;
111 	if (!(flags & I915_CLFLUSH_SYNC))
112 		clflush = clflush_work_create(obj);
113 	if (clflush) {
114 		i915_sw_fence_await_reservation(&clflush->base.chain,
115 						obj->base.resv, NULL, true,
116 						i915_fence_timeout(i915),
117 						I915_FENCE_GFP);
118 		dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
119 		dma_fence_work_commit(&clflush->base);
120 		/*
121 		 * We must have successfully populated the pages(since we are
122 		 * holding a pin on the pages as per the flush worker) to reach
123 		 * this point, which must mean we have already done the required
124 		 * flush-on-acquire, hence resetting cache_dirty here should be
125 		 * safe.
126 		 */
127 		obj->cache_dirty = false;
128 	} else if (obj->mm.pages) {
129 		__do_clflush(obj);
130 		obj->cache_dirty = false;
131 	} else {
132 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
133 	}
134 
135 	return true;
136 }
137