xref: /dragonfly/sys/dev/drm/ttm/ttm_execbuf_util.c (revision 896f2e3a)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/export.h>
32 #include <linux/wait.h>
33 
34 static void ttm_eu_backoff_reservation_locked(struct list_head *list)
35 {
36 	struct ttm_validate_buffer *entry;
37 
38 	list_for_each_entry(entry, list, head) {
39 		struct ttm_buffer_object *bo = entry->bo;
40 		if (!entry->reserved)
41 			continue;
42 
43 		if (entry->removed) {
44 			ttm_bo_add_to_lru(bo);
45 			entry->removed = false;
46 
47 		}
48 		entry->reserved = false;
49 		atomic_set(&bo->reserved, 0);
50 		wake_up_all(&bo->event_queue);
51 	}
52 }
53 
54 static void ttm_eu_del_from_lru_locked(struct list_head *list)
55 {
56 	struct ttm_validate_buffer *entry;
57 
58 	list_for_each_entry(entry, list, head) {
59 		struct ttm_buffer_object *bo = entry->bo;
60 		if (!entry->reserved)
61 			continue;
62 
63 		if (!entry->removed) {
64 			entry->put_count = ttm_bo_del_from_lru(bo);
65 			entry->removed = true;
66 		}
67 	}
68 }
69 
70 static void ttm_eu_list_ref_sub(struct list_head *list)
71 {
72 	struct ttm_validate_buffer *entry;
73 
74 	list_for_each_entry(entry, list, head) {
75 		struct ttm_buffer_object *bo = entry->bo;
76 
77 		if (entry->put_count) {
78 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
79 			entry->put_count = 0;
80 		}
81 	}
82 }
83 
84 void ttm_eu_backoff_reservation(struct list_head *list)
85 {
86 	struct ttm_validate_buffer *entry;
87 	struct ttm_bo_global *glob;
88 
89 	if (list_empty(list))
90 		return;
91 
92 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 	glob = entry->bo->glob;
94 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
95 	ttm_eu_backoff_reservation_locked(list);
96 	lockmgr(&glob->lru_lock, LK_RELEASE);
97 }
98 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
99 
100 /*
101  * Reserve buffers for validation.
102  *
103  * If a buffer in the list is marked for CPU access, we back off and
104  * wait for that buffer to become free for GPU access.
105  *
106  * If a buffer is reserved for another validation, the validator with
107  * the highest validation sequence backs off and waits for that buffer
108  * to become unreserved. This prevents deadlocks when validating multiple
109  * buffers in different orders.
110  */
111 
112 int ttm_eu_reserve_buffers(struct list_head *list)
113 {
114 	struct ttm_bo_global *glob;
115 	struct ttm_validate_buffer *entry;
116 	int ret;
117 	uint32_t val_seq;
118 
119 	if (list_empty(list))
120 		return 0;
121 
122 	list_for_each_entry(entry, list, head) {
123 		entry->reserved = false;
124 		entry->put_count = 0;
125 		entry->removed = false;
126 	}
127 
128 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
129 	glob = entry->bo->glob;
130 
131 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
132 	val_seq = entry->bo->bdev->val_seq++;
133 
134 retry:
135 	list_for_each_entry(entry, list, head) {
136 		struct ttm_buffer_object *bo = entry->bo;
137 		int owned;
138 
139 		/* already slowpath reserved? */
140 		if (entry->reserved)
141 			continue;
142 
143 		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
144 		switch (ret) {
145 		case 0:
146 			break;
147 		case -EBUSY:
148 			ttm_eu_del_from_lru_locked(list);
149 			owned = lockstatus(&glob->lru_lock, curthread);
150 			if (owned == LK_EXCLUSIVE)
151 				lockmgr(&glob->lru_lock, LK_RELEASE);
152 			ret = ttm_bo_reserve_nolru(bo, true, false,
153 						   true, val_seq);
154 			if (owned == LK_EXCLUSIVE)
155 				lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
156 			if (!ret)
157 				break;
158 
159 			if (unlikely(ret != -EAGAIN))
160 				goto err;
161 
162 			/* fallthrough */
163 		case -EAGAIN:
164 			ttm_eu_backoff_reservation_locked(list);
165 
166 			/*
167 			 * temporarily increase sequence number every retry,
168 			 * to prevent us from seeing our old reservation
169 			 * sequence when someone else reserved the buffer,
170 			 * but hasn't updated the seq_valid/seqno members yet.
171 			 */
172 			val_seq = entry->bo->bdev->val_seq++;
173 
174 			lockmgr(&glob->lru_lock, LK_RELEASE);
175 			ttm_eu_list_ref_sub(list);
176 			ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
177 			if (unlikely(ret != 0))
178 				return ret;
179 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
180 			entry->reserved = true;
181 			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
182 				ret = -EBUSY;
183 				goto err;
184 			}
185 			goto retry;
186 		default:
187 			goto err;
188 		}
189 
190 		entry->reserved = true;
191 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
192 			ret = -EBUSY;
193 			goto err;
194 		}
195 	}
196 
197 	ttm_eu_del_from_lru_locked(list);
198 	lockmgr(&glob->lru_lock, LK_RELEASE);
199 	ttm_eu_list_ref_sub(list);
200 
201 	return 0;
202 
203 err:
204 	ttm_eu_backoff_reservation_locked(list);
205 	lockmgr(&glob->lru_lock, LK_RELEASE);
206 	ttm_eu_list_ref_sub(list);
207 	return ret;
208 }
209 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
210 
211 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
212 {
213 	struct ttm_validate_buffer *entry;
214 	struct ttm_buffer_object *bo;
215 	struct ttm_bo_global *glob;
216 	struct ttm_bo_device *bdev;
217 	struct ttm_bo_driver *driver;
218 
219 	if (list_empty(list))
220 		return;
221 
222 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
223 	bdev = bo->bdev;
224 	driver = bdev->driver;
225 	glob = bo->glob;
226 
227 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
228 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
229 
230 	list_for_each_entry(entry, list, head) {
231 		bo = entry->bo;
232 		entry->old_sync_obj = bo->sync_obj;
233 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
234 		ttm_bo_unreserve_locked(bo);
235 		entry->reserved = false;
236 	}
237 	lockmgr(&bdev->fence_lock, LK_RELEASE);
238 	lockmgr(&glob->lru_lock, LK_RELEASE);
239 
240 	list_for_each_entry(entry, list, head) {
241 		if (entry->old_sync_obj)
242 			driver->sync_obj_unref(&entry->old_sync_obj);
243 	}
244 }
245 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
246