xref: /dragonfly/sys/dev/drm/i915/i915_gem_clflush.c (revision 2b57e6df)
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "i915_drv.h"
26 #include "intel_frontbuffer.h"
27 #include "i915_gem_clflush.h"
28 
29 static DEFINE_SPINLOCK(clflush_lock);
30 static u64 clflush_context;
31 
32 struct clflush {
33 	struct dma_fence dma; /* Must be first for dma_fence_free() */
34 	struct i915_sw_fence wait;
35 	struct work_struct work;
36 	struct drm_i915_gem_object *obj;
37 };
38 
39 static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
40 {
41 	return DRIVER_NAME;
42 }
43 
44 static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
45 {
46 	return "clflush";
47 }
48 
49 static bool i915_clflush_enable_signaling(struct dma_fence *fence)
50 {
51 	return true;
52 }
53 
54 static void i915_clflush_release(struct dma_fence *fence)
55 {
56 	struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
57 
58 	i915_sw_fence_fini(&clflush->wait);
59 
60 	BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
61 	dma_fence_free(&clflush->dma);
62 }
63 
64 static const struct dma_fence_ops i915_clflush_ops = {
65 	.get_driver_name = i915_clflush_get_driver_name,
66 	.get_timeline_name = i915_clflush_get_timeline_name,
67 	.enable_signaling = i915_clflush_enable_signaling,
68 	.wait = dma_fence_default_wait,
69 	.release = i915_clflush_release,
70 };
71 
72 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
73 {
74 	drm_clflush_sg(obj->mm.pages);
75 	obj->cache_dirty = false;
76 
77 	intel_fb_obj_flush(obj, ORIGIN_CPU);
78 }
79 
80 static void i915_clflush_work(struct work_struct *work)
81 {
82 	struct clflush *clflush = container_of(work, typeof(*clflush), work);
83 	struct drm_i915_gem_object *obj = clflush->obj;
84 
85 	if (!obj->cache_dirty)
86 		goto out;
87 
88 	if (i915_gem_object_pin_pages(obj)) {
89 		DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
90 		goto out;
91 	}
92 
93 	__i915_do_clflush(obj);
94 
95 	i915_gem_object_unpin_pages(obj);
96 
97 out:
98 	i915_gem_object_put(obj);
99 
100 	dma_fence_signal(&clflush->dma);
101 	dma_fence_put(&clflush->dma);
102 }
103 
104 static int __i915_sw_fence_call
105 i915_clflush_notify(struct i915_sw_fence *fence,
106 		    enum i915_sw_fence_notify state)
107 {
108 	struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
109 
110 	switch (state) {
111 	case FENCE_COMPLETE:
112 		schedule_work(&clflush->work);
113 		break;
114 
115 	case FENCE_FREE:
116 		dma_fence_put(&clflush->dma);
117 		break;
118 	}
119 
120 	return NOTIFY_DONE;
121 }
122 
123 void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
124 			     unsigned int flags)
125 {
126 	struct clflush *clflush;
127 
128 	/*
129 	 * Stolen memory is always coherent with the GPU as it is explicitly
130 	 * marked as wc by the system, or the system is cache-coherent.
131 	 * Similarly, we only access struct pages through the CPU cache, so
132 	 * anything not backed by physical memory we consider to be always
133 	 * coherent and not need clflushing.
134 	 */
135 	if (!i915_gem_object_has_struct_page(obj))
136 		return;
137 
138 	obj->cache_dirty = true;
139 
140 	/* If the GPU is snooping the contents of the CPU cache,
141 	 * we do not need to manually clear the CPU cache lines.  However,
142 	 * the caches are only snooped when the render cache is
143 	 * flushed/invalidated.  As we always have to emit invalidations
144 	 * and flushes when moving into and out of the RENDER domain, correct
145 	 * snooping behaviour occurs naturally as the result of our domain
146 	 * tracking.
147 	 */
148 	if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
149 		return;
150 
151 	trace_i915_gem_object_clflush(obj);
152 
153 	clflush = NULL;
154 	if (!(flags & I915_CLFLUSH_SYNC))
155 		clflush = kmalloc(sizeof(*clflush), M_DRM, GFP_KERNEL);
156 	if (clflush) {
157 		dma_fence_init(&clflush->dma,
158 			       &i915_clflush_ops,
159 			       &clflush_lock,
160 			       clflush_context,
161 			       0);
162 		i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
163 
164 		clflush->obj = i915_gem_object_get(obj);
165 		INIT_WORK(&clflush->work, i915_clflush_work);
166 
167 		dma_fence_get(&clflush->dma);
168 
169 		i915_sw_fence_await_reservation(&clflush->wait,
170 						obj->resv, NULL,
171 						true, I915_FENCE_TIMEOUT,
172 						GFP_KERNEL);
173 
174 		reservation_object_lock(obj->resv, NULL);
175 		reservation_object_add_excl_fence(obj->resv, &clflush->dma);
176 		reservation_object_unlock(obj->resv);
177 
178 		i915_sw_fence_commit(&clflush->wait);
179 	} else if (obj->mm.pages) {
180 		__i915_do_clflush(obj);
181 	} else {
182 		GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
183 	}
184 }
185 
186 void i915_gem_clflush_init(struct drm_i915_private *i915)
187 {
188 	clflush_context = dma_fence_context_alloc(1);
189 }
190