1 /*	$NetBSD: intel_context.c,v 1.3 2021/12/19 11:38:04 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2019 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: intel_context.c,v 1.3 2021/12/19 11:38:04 riastradh Exp $");
11 
12 #include "gem/i915_gem_context.h"
13 #include "gem/i915_gem_pm.h"
14 
15 #include "i915_drv.h"
16 #include "i915_globals.h"
17 
18 #include "intel_context.h"
19 #include "intel_engine.h"
20 #include "intel_engine_pm.h"
21 #include "intel_ring.h"
22 
23 #include <linux/nbsd-namespace.h>
24 
25 static struct i915_global_context {
26 	struct i915_global base;
27 	struct kmem_cache *slab_ce;
28 } global;
29 
intel_context_alloc(void)30 static struct intel_context *intel_context_alloc(void)
31 {
32 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
33 }
34 
intel_context_free(struct intel_context * ce)35 void intel_context_free(struct intel_context *ce)
36 {
37 	kmem_cache_free(global.slab_ce, ce);
38 }
39 
40 struct intel_context *
intel_context_create(struct intel_engine_cs * engine)41 intel_context_create(struct intel_engine_cs *engine)
42 {
43 	struct intel_context *ce;
44 
45 	ce = intel_context_alloc();
46 	if (!ce)
47 		return ERR_PTR(-ENOMEM);
48 
49 	intel_context_init(ce, engine);
50 	return ce;
51 }
52 
intel_context_alloc_state(struct intel_context * ce)53 int intel_context_alloc_state(struct intel_context *ce)
54 {
55 	int err = 0;
56 
57 	if (mutex_lock_interruptible(&ce->pin_mutex))
58 		return -EINTR;
59 
60 	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
61 		err = ce->ops->alloc(ce);
62 		if (unlikely(err))
63 			goto unlock;
64 
65 		set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
66 	}
67 
68 unlock:
69 	mutex_unlock(&ce->pin_mutex);
70 	return err;
71 }
72 
intel_context_active_acquire(struct intel_context * ce)73 static int intel_context_active_acquire(struct intel_context *ce)
74 {
75 	int err;
76 
77 	__i915_active_acquire(&ce->active);
78 
79 	if (intel_context_is_barrier(ce))
80 		return 0;
81 
82 	/* Preallocate tracking nodes */
83 	err = i915_active_acquire_preallocate_barrier(&ce->active,
84 						      ce->engine);
85 	if (err)
86 		i915_active_release(&ce->active);
87 
88 	return err;
89 }
90 
intel_context_active_release(struct intel_context * ce)91 static void intel_context_active_release(struct intel_context *ce)
92 {
93 	/* Nodes preallocated in intel_context_active() */
94 	i915_active_acquire_barrier(&ce->active);
95 	i915_active_release(&ce->active);
96 }
97 
__intel_context_do_pin(struct intel_context * ce)98 int __intel_context_do_pin(struct intel_context *ce)
99 {
100 	int err;
101 
102 	if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
103 		err = intel_context_alloc_state(ce);
104 		if (err)
105 			return err;
106 	}
107 
108 	err = i915_active_acquire(&ce->active);
109 	if (err)
110 		return err;
111 
112 	if (mutex_lock_interruptible(&ce->pin_mutex)) {
113 		err = -EINTR;
114 		goto out_release;
115 	}
116 
117 	if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
118 		err = intel_context_active_acquire(ce);
119 		if (unlikely(err))
120 			goto out_unlock;
121 
122 		err = ce->ops->pin(ce);
123 		if (unlikely(err))
124 			goto err_active;
125 
126 		CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
127 			 ce->ring->head, ce->ring->tail);
128 
129 		smp_mb__before_atomic(); /* flush pin before it is visible */
130 		atomic_inc(&ce->pin_count);
131 	}
132 
133 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
134 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
135 	goto out_unlock;
136 
137 err_active:
138 	intel_context_active_release(ce);
139 out_unlock:
140 	mutex_unlock(&ce->pin_mutex);
141 out_release:
142 	i915_active_release(&ce->active);
143 	return err;
144 }
145 
intel_context_unpin(struct intel_context * ce)146 void intel_context_unpin(struct intel_context *ce)
147 {
148 	if (!atomic_dec_and_test(&ce->pin_count))
149 		return;
150 
151 	CE_TRACE(ce, "unpin\n");
152 	ce->ops->unpin(ce);
153 
154 	/*
155 	 * Once released, we may asynchronously drop the active reference.
156 	 * As that may be the only reference keeping the context alive,
157 	 * take an extra now so that it is not freed before we finish
158 	 * dereferencing it.
159 	 */
160 	intel_context_get(ce);
161 	intel_context_active_release(ce);
162 	intel_context_put(ce);
163 }
164 
__context_pin_state(struct i915_vma * vma)165 static int __context_pin_state(struct i915_vma *vma)
166 {
167 	unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
168 	int err;
169 
170 	err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
171 	if (err)
172 		return err;
173 
174 	err = i915_active_acquire(&vma->active);
175 	if (err)
176 		goto err_unpin;
177 
178 	/*
179 	 * And mark it as a globally pinned object to let the shrinker know
180 	 * it cannot reclaim the object until we release it.
181 	 */
182 	i915_vma_make_unshrinkable(vma);
183 	vma->obj->mm.dirty = true;
184 
185 	return 0;
186 
187 err_unpin:
188 	i915_vma_unpin(vma);
189 	return err;
190 }
191 
__context_unpin_state(struct i915_vma * vma)192 static void __context_unpin_state(struct i915_vma *vma)
193 {
194 	i915_vma_make_shrinkable(vma);
195 	i915_active_release(&vma->active);
196 	__i915_vma_unpin(vma);
197 }
198 
__ring_active(struct intel_ring * ring)199 static int __ring_active(struct intel_ring *ring)
200 {
201 	int err;
202 
203 	err = i915_active_acquire(&ring->vma->active);
204 	if (err)
205 		return err;
206 
207 	err = intel_ring_pin(ring);
208 	if (err)
209 		goto err_active;
210 
211 	return 0;
212 
213 err_active:
214 	i915_active_release(&ring->vma->active);
215 	return err;
216 }
217 
__ring_retire(struct intel_ring * ring)218 static void __ring_retire(struct intel_ring *ring)
219 {
220 	intel_ring_unpin(ring);
221 	i915_active_release(&ring->vma->active);
222 }
223 
224 __i915_active_call
__intel_context_retire(struct i915_active * active)225 static void __intel_context_retire(struct i915_active *active)
226 {
227 	struct intel_context *ce = container_of(active, typeof(*ce), active);
228 
229 	CE_TRACE(ce, "retire\n");
230 
231 	set_bit(CONTEXT_VALID_BIT, &ce->flags);
232 	if (ce->state)
233 		__context_unpin_state(ce->state);
234 
235 	intel_timeline_unpin(ce->timeline);
236 	__ring_retire(ce->ring);
237 
238 	intel_context_put(ce);
239 }
240 
__intel_context_active(struct i915_active * active)241 static int __intel_context_active(struct i915_active *active)
242 {
243 	struct intel_context *ce = container_of(active, typeof(*ce), active);
244 	int err;
245 
246 	CE_TRACE(ce, "active\n");
247 
248 	intel_context_get(ce);
249 
250 	err = __ring_active(ce->ring);
251 	if (err)
252 		goto err_put;
253 
254 	err = intel_timeline_pin(ce->timeline);
255 	if (err)
256 		goto err_ring;
257 
258 	if (!ce->state)
259 		return 0;
260 
261 	err = __context_pin_state(ce->state);
262 	if (err)
263 		goto err_timeline;
264 
265 	return 0;
266 
267 err_timeline:
268 	intel_timeline_unpin(ce->timeline);
269 err_ring:
270 	__ring_retire(ce->ring);
271 err_put:
272 	intel_context_put(ce);
273 	return err;
274 }
275 
276 void
intel_context_init(struct intel_context * ce,struct intel_engine_cs * engine)277 intel_context_init(struct intel_context *ce,
278 		   struct intel_engine_cs *engine)
279 {
280 	GEM_BUG_ON(!engine->cops);
281 	GEM_BUG_ON(!engine->gt->vm);
282 
283 	kref_init(&ce->ref);
284 
285 	ce->engine = engine;
286 	ce->ops = engine->cops;
287 	ce->sseu = engine->sseu;
288 	ce->ring = __intel_context_ring_size(SZ_4K);
289 
290 	ce->vm = i915_vm_get(engine->gt->vm);
291 
292 	INIT_LIST_HEAD(&ce->signal_link);
293 	INIT_LIST_HEAD(&ce->signals);
294 
295 	mutex_init(&ce->pin_mutex);
296 
297 	i915_active_init(&ce->active,
298 			 __intel_context_active, __intel_context_retire);
299 }
300 
intel_context_fini(struct intel_context * ce)301 void intel_context_fini(struct intel_context *ce)
302 {
303 	if (ce->timeline)
304 		intel_timeline_put(ce->timeline);
305 	i915_vm_put(ce->vm);
306 
307 	mutex_destroy(&ce->pin_mutex);
308 	i915_active_fini(&ce->active);
309 }
310 
i915_global_context_shrink(void)311 static void i915_global_context_shrink(void)
312 {
313 	kmem_cache_shrink(global.slab_ce);
314 }
315 
i915_global_context_exit(void)316 static void i915_global_context_exit(void)
317 {
318 	kmem_cache_destroy(global.slab_ce);
319 }
320 
321 static struct i915_global_context global = { {
322 	.shrink = i915_global_context_shrink,
323 	.exit = i915_global_context_exit,
324 } };
325 
i915_global_context_init(void)326 int __init i915_global_context_init(void)
327 {
328 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
329 	if (!global.slab_ce)
330 		return -ENOMEM;
331 
332 	i915_global_register(&global.base);
333 	return 0;
334 }
335 
intel_context_enter_engine(struct intel_context * ce)336 void intel_context_enter_engine(struct intel_context *ce)
337 {
338 	intel_engine_pm_get(ce->engine);
339 	intel_timeline_enter(ce->timeline);
340 }
341 
intel_context_exit_engine(struct intel_context * ce)342 void intel_context_exit_engine(struct intel_context *ce)
343 {
344 	intel_timeline_exit(ce->timeline);
345 	intel_engine_pm_put(ce->engine);
346 }
347 
intel_context_prepare_remote_request(struct intel_context * ce,struct i915_request * rq)348 int intel_context_prepare_remote_request(struct intel_context *ce,
349 					 struct i915_request *rq)
350 {
351 	struct intel_timeline *tl = ce->timeline;
352 	int err;
353 
354 	/* Only suitable for use in remotely modifying this context */
355 	GEM_BUG_ON(rq->context == ce);
356 
357 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
358 		/* Queue this switch after current activity by this context. */
359 		err = i915_active_fence_set(&tl->last_request, rq);
360 		if (err)
361 			return err;
362 	}
363 
364 	/*
365 	 * Guarantee context image and the timeline remains pinned until the
366 	 * modifying request is retired by setting the ce activity tracker.
367 	 *
368 	 * But we only need to take one pin on the account of it. Or in other
369 	 * words transfer the pinned ce object to tracked active request.
370 	 */
371 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
372 	return i915_active_add_request(&ce->active, rq);
373 }
374 
intel_context_create_request(struct intel_context * ce)375 struct i915_request *intel_context_create_request(struct intel_context *ce)
376 {
377 	struct i915_request *rq;
378 	int err;
379 
380 	err = intel_context_pin(ce);
381 	if (unlikely(err))
382 		return ERR_PTR(err);
383 
384 	rq = i915_request_create(ce);
385 	intel_context_unpin(ce);
386 
387 	return rq;
388 }
389 
390 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
391 #include "selftest_context.c"
392 #endif
393