1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "i915_drv.h"
26 #include "i915_syncmap.h"
27
__intel_timeline_init(struct intel_timeline * tl,struct i915_gem_timeline * parent,u64 context,struct lock_class_key * lockclass,const char * lockname)28 static void __intel_timeline_init(struct intel_timeline *tl,
29 struct i915_gem_timeline *parent,
30 u64 context,
31 struct lock_class_key *lockclass,
32 const char *lockname)
33 {
34 tl->fence_context = context;
35 tl->common = parent;
36 #ifdef CONFIG_DEBUG_SPINLOCK
37 __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
38 #else
39 lockinit(&tl->lock, "i9tll", 0, 0);
40 #endif
41 init_request_active(&tl->last_request, NULL);
42 INIT_LIST_HEAD(&tl->requests);
43 i915_syncmap_init(&tl->sync);
44 }
45
__intel_timeline_fini(struct intel_timeline * tl)46 static void __intel_timeline_fini(struct intel_timeline *tl)
47 {
48 GEM_BUG_ON(!list_empty(&tl->requests));
49
50 i915_syncmap_free(&tl->sync);
51 }
52
__i915_gem_timeline_init(struct drm_i915_private * i915,struct i915_gem_timeline * timeline,const char * name,struct lock_class_key * lockclass,const char * lockname)53 static int __i915_gem_timeline_init(struct drm_i915_private *i915,
54 struct i915_gem_timeline *timeline,
55 const char *name,
56 struct lock_class_key *lockclass,
57 const char *lockname)
58 {
59 unsigned int i;
60 u64 fences;
61
62 lockdep_assert_held(&i915->drm.struct_mutex);
63
64 /*
65 * Ideally we want a set of engines on a single leaf as we expect
66 * to mostly be tracking synchronisation between engines. It is not
67 * a huge issue if this is not the case, but we may want to mitigate
68 * any page crossing penalties if they become an issue.
69 */
70 BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
71
72 timeline->i915 = i915;
73 timeline->name = kstrdup(name ?: "[kernel]", M_DRM);
74 if (!timeline->name)
75 return -ENOMEM;
76
77 list_add(&timeline->link, &i915->gt.timelines);
78
79 /* Called during early_init before we know how many engines there are */
80 fences = dma_fence_context_alloc(ARRAY_SIZE(timeline->engine));
81 for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
82 __intel_timeline_init(&timeline->engine[i],
83 timeline, fences++,
84 lockclass, lockname);
85
86 return 0;
87 }
88
i915_gem_timeline_init(struct drm_i915_private * i915,struct i915_gem_timeline * timeline,const char * name)89 int i915_gem_timeline_init(struct drm_i915_private *i915,
90 struct i915_gem_timeline *timeline,
91 const char *name)
92 {
93 static struct lock_class_key class;
94
95 return __i915_gem_timeline_init(i915, timeline, name,
96 &class, "&timeline->lock");
97 }
98
i915_gem_timeline_init__global(struct drm_i915_private * i915)99 int i915_gem_timeline_init__global(struct drm_i915_private *i915)
100 {
101 static struct lock_class_key class;
102
103 return __i915_gem_timeline_init(i915,
104 &i915->gt.global_timeline,
105 "[execution]",
106 &class, "&global_timeline->lock");
107 }
108
109 /**
110 * i915_gem_timelines_mark_idle -- called when the driver idles
111 * @i915 - the drm_i915_private device
112 *
113 * When the driver is completely idle, we know that all of our sync points
114 * have been signaled and our tracking is then entirely redundant. Any request
115 * to wait upon an older sync point will be completed instantly as we know
116 * the fence is signaled and therefore we will not even look them up in the
117 * sync point map.
118 */
i915_gem_timelines_mark_idle(struct drm_i915_private * i915)119 void i915_gem_timelines_mark_idle(struct drm_i915_private *i915)
120 {
121 struct i915_gem_timeline *timeline;
122 int i;
123
124 lockdep_assert_held(&i915->drm.struct_mutex);
125
126 list_for_each_entry(timeline, &i915->gt.timelines, link) {
127 for (i = 0; i < ARRAY_SIZE(timeline->engine); i++) {
128 struct intel_timeline *tl = &timeline->engine[i];
129
130 /*
131 * All known fences are completed so we can scrap
132 * the current sync point tracking and start afresh,
133 * any attempt to wait upon a previous sync point
134 * will be skipped as the fence was signaled.
135 */
136 i915_syncmap_free(&tl->sync);
137 }
138 }
139 }
140
i915_gem_timeline_fini(struct i915_gem_timeline * timeline)141 void i915_gem_timeline_fini(struct i915_gem_timeline *timeline)
142 {
143 int i;
144
145 lockdep_assert_held(&timeline->i915->drm.struct_mutex);
146
147 for (i = 0; i < ARRAY_SIZE(timeline->engine); i++)
148 __intel_timeline_fini(&timeline->engine[i]);
149
150 list_del(&timeline->link);
151 kfree(timeline->name);
152 }
153
154 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
155 #include "selftests/mock_timeline.c"
156 #include "selftests/i915_gem_timeline.c"
157 #endif
158