xref: /dragonfly/sys/dev/drm/ttm/ttm_execbuf_util.c (revision 3851e4b8)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34 
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list,
36 					      struct ww_acquire_ctx *ticket)
37 {
38 	struct ttm_validate_buffer *entry;
39 
40 	list_for_each_entry(entry, list, head) {
41 		struct ttm_buffer_object *bo = entry->bo;
42 		if (!entry->reserved)
43 			continue;
44 
45 		entry->reserved = false;
46 		if (entry->removed) {
47 			ttm_bo_add_to_lru(bo);
48 			entry->removed = false;
49 		}
50 		ww_mutex_unlock(&bo->resv->lock);
51 	}
52 }
53 
54 static void ttm_eu_del_from_lru_locked(struct list_head *list)
55 {
56 	struct ttm_validate_buffer *entry;
57 
58 	list_for_each_entry(entry, list, head) {
59 		struct ttm_buffer_object *bo = entry->bo;
60 		if (!entry->reserved)
61 			continue;
62 
63 		if (!entry->removed) {
64 			entry->put_count = ttm_bo_del_from_lru(bo);
65 			entry->removed = true;
66 		}
67 	}
68 }
69 
70 static void ttm_eu_list_ref_sub(struct list_head *list)
71 {
72 	struct ttm_validate_buffer *entry;
73 
74 	list_for_each_entry(entry, list, head) {
75 		struct ttm_buffer_object *bo = entry->bo;
76 
77 		if (entry->put_count) {
78 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
79 			entry->put_count = 0;
80 		}
81 	}
82 }
83 
84 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
85 				struct list_head *list)
86 {
87 	struct ttm_validate_buffer *entry;
88 	struct ttm_bo_global *glob;
89 
90 	if (list_empty(list))
91 		return;
92 
93 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
94 	glob = entry->bo->glob;
95 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
96 	ttm_eu_backoff_reservation_locked(list, ticket);
97 	ww_acquire_fini(ticket);
98 	lockmgr(&glob->lru_lock, LK_RELEASE);
99 }
100 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101 
102 /*
103  * Reserve buffers for validation.
104  *
105  * If a buffer in the list is marked for CPU access, we back off and
106  * wait for that buffer to become free for GPU access.
107  *
108  * If a buffer is reserved for another validation, the validator with
109  * the highest validation sequence backs off and waits for that buffer
110  * to become unreserved. This prevents deadlocks when validating multiple
111  * buffers in different orders.
112  */
113 
114 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 			   struct list_head *list)
116 {
117 	struct ttm_bo_global *glob;
118 	struct ttm_validate_buffer *entry;
119 	int ret;
120 
121 	if (list_empty(list))
122 		return 0;
123 
124 	list_for_each_entry(entry, list, head) {
125 		entry->reserved = false;
126 		entry->put_count = 0;
127 		entry->removed = false;
128 	}
129 
130 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 	glob = entry->bo->glob;
132 
133 	ww_acquire_init(ticket, &reservation_ww_class);
134 
135 retry:
136 	list_for_each_entry(entry, list, head) {
137 		struct ttm_buffer_object *bo = entry->bo;
138 
139 		/* already slowpath reserved? */
140 		if (entry->reserved)
141 			continue;
142 
143 		ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
144 
145 
146 		if (ret == -EDEADLK) {
147 			/* uh oh, we lost out, drop every reservation and try
148 			 * to only reserve this buffer, then start over if
149 			 * this succeeds.
150 			 */
151 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
152 			ttm_eu_backoff_reservation_locked(list, ticket);
153 			lockmgr(&glob->lru_lock, LK_RELEASE);
154 			ttm_eu_list_ref_sub(list);
155 			ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
156 							       ticket);
157 			if (unlikely(ret != 0)) {
158 				if (ret == -EINTR)
159 					ret = -ERESTARTSYS;
160 				goto err_fini;
161 			}
162 
163 			entry->reserved = true;
164 			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
165 				ret = -EBUSY;
166 				goto err;
167 			}
168 			goto retry;
169 		} else if (ret)
170 			goto err;
171 
172 		entry->reserved = true;
173 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
174 			ret = -EBUSY;
175 			goto err;
176 		}
177 	}
178 
179 	ww_acquire_done(ticket);
180 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
181 	ttm_eu_del_from_lru_locked(list);
182 	lockmgr(&glob->lru_lock, LK_RELEASE);
183 	ttm_eu_list_ref_sub(list);
184 	return 0;
185 
186 err:
187 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
188 	ttm_eu_backoff_reservation_locked(list, ticket);
189 	lockmgr(&glob->lru_lock, LK_RELEASE);
190 	ttm_eu_list_ref_sub(list);
191 err_fini:
192 	ww_acquire_done(ticket);
193 	ww_acquire_fini(ticket);
194 	return ret;
195 }
196 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
197 
198 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
199 				 struct list_head *list, void *sync_obj)
200 {
201 	struct ttm_validate_buffer *entry;
202 	struct ttm_buffer_object *bo;
203 	struct ttm_bo_global *glob;
204 	struct ttm_bo_device *bdev;
205 	struct ttm_bo_driver *driver;
206 
207 	if (list_empty(list))
208 		return;
209 
210 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
211 	bdev = bo->bdev;
212 	driver = bdev->driver;
213 	glob = bo->glob;
214 
215 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
216 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
217 
218 	list_for_each_entry(entry, list, head) {
219 		bo = entry->bo;
220 		entry->old_sync_obj = bo->sync_obj;
221 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
222 		ttm_bo_add_to_lru(bo);
223 		ww_mutex_unlock(&bo->resv->lock);
224 		entry->reserved = false;
225 	}
226 	lockmgr(&bdev->fence_lock, LK_RELEASE);
227 	lockmgr(&glob->lru_lock, LK_RELEASE);
228 	ww_acquire_fini(ticket);
229 
230 	list_for_each_entry(entry, list, head) {
231 		if (entry->old_sync_obj)
232 			driver->sync_obj_unref(&entry->old_sync_obj);
233 	}
234 }
235 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
236