xref: /dragonfly/sys/dev/drm/ttm/ttm_execbuf_util.c (revision 50b09fda)
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/ttm/ttm_execbuf_util.h>
29 #include <drm/ttm/ttm_bo_driver.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <linux/wait.h>
32 #include <linux/sched.h>
33 #include <linux/module.h>
34 
35 static void ttm_eu_backoff_reservation_locked(struct list_head *list,
36 					      struct ww_acquire_ctx *ticket)
37 {
38 	struct ttm_validate_buffer *entry;
39 
40 	list_for_each_entry(entry, list, head) {
41 		struct ttm_buffer_object *bo = entry->bo;
42 		if (!entry->reserved)
43 			continue;
44 
45 		entry->reserved = false;
46 		if (entry->removed) {
47 			ttm_bo_unreserve_ticket_locked(bo, ticket);
48 			entry->removed = false;
49 
50 		} else {
51 			atomic_set(&bo->reserved, 0);
52 			wake_up_all(&bo->event_queue);
53 		}
54 	}
55 }
56 
57 static void ttm_eu_del_from_lru_locked(struct list_head *list)
58 {
59 	struct ttm_validate_buffer *entry;
60 
61 	list_for_each_entry(entry, list, head) {
62 		struct ttm_buffer_object *bo = entry->bo;
63 		if (!entry->reserved)
64 			continue;
65 
66 		if (!entry->removed) {
67 			entry->put_count = ttm_bo_del_from_lru(bo);
68 			entry->removed = true;
69 		}
70 	}
71 }
72 
73 static void ttm_eu_list_ref_sub(struct list_head *list)
74 {
75 	struct ttm_validate_buffer *entry;
76 
77 	list_for_each_entry(entry, list, head) {
78 		struct ttm_buffer_object *bo = entry->bo;
79 
80 		if (entry->put_count) {
81 			ttm_bo_list_ref_sub(bo, entry->put_count, true);
82 			entry->put_count = 0;
83 		}
84 	}
85 }
86 
87 void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
88 				struct list_head *list)
89 {
90 	struct ttm_validate_buffer *entry;
91 	struct ttm_bo_global *glob;
92 
93 	if (list_empty(list))
94 		return;
95 
96 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
97 	glob = entry->bo->glob;
98 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
99 	ttm_eu_backoff_reservation_locked(list, ticket);
100 	ww_acquire_fini(ticket);
101 	lockmgr(&glob->lru_lock, LK_RELEASE);
102 }
103 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
104 
105 /*
106  * Reserve buffers for validation.
107  *
108  * If a buffer in the list is marked for CPU access, we back off and
109  * wait for that buffer to become free for GPU access.
110  *
111  * If a buffer is reserved for another validation, the validator with
112  * the highest validation sequence backs off and waits for that buffer
113  * to become unreserved. This prevents deadlocks when validating multiple
114  * buffers in different orders.
115  */
116 
117 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
118 			   struct list_head *list)
119 {
120 	struct ttm_bo_global *glob;
121 	struct ttm_validate_buffer *entry;
122 	int ret;
123 
124 	if (list_empty(list))
125 		return 0;
126 
127 	list_for_each_entry(entry, list, head) {
128 		entry->reserved = false;
129 		entry->put_count = 0;
130 		entry->removed = false;
131 	}
132 
133 	entry = list_first_entry(list, struct ttm_validate_buffer, head);
134 	glob = entry->bo->glob;
135 
136 	ww_acquire_init(ticket, &reservation_ww_class);
137 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
138 
139 retry:
140 	list_for_each_entry(entry, list, head) {
141 		struct ttm_buffer_object *bo = entry->bo;
142 
143 		/* already slowpath reserved? */
144 		if (entry->reserved)
145 			continue;
146 
147 		ret = ttm_bo_reserve_nolru(bo, true, true, true, ticket);
148 		switch (ret) {
149 		case 0:
150 			break;
151 		case -EBUSY:
152 			ttm_eu_del_from_lru_locked(list);
153 			lockmgr(&glob->lru_lock, LK_RELEASE);
154 			ret = ttm_bo_reserve_nolru(bo, true, false,
155 						   true, ticket);
156 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
157 
158 			if (!ret)
159 				break;
160 
161 			if (unlikely(ret != -EAGAIN))
162 				goto err;
163 
164 			/* fallthrough */
165 		case -EAGAIN:
166 			ttm_eu_backoff_reservation_locked(list, ticket);
167 			lockmgr(&glob->lru_lock, LK_RELEASE);
168 			ttm_eu_list_ref_sub(list);
169 			ret = ttm_bo_reserve_slowpath_nolru(bo, true, ticket);
170 			if (unlikely(ret != 0))
171 				goto err_fini;
172 
173 			lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
174 			entry->reserved = true;
175 			if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
176 				ret = -EBUSY;
177 				goto err;
178 			}
179 			goto retry;
180 		default:
181 			goto err;
182 		}
183 
184 		entry->reserved = true;
185 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
186 			ret = -EBUSY;
187 			goto err;
188 		}
189 	}
190 
191 	ww_acquire_done(ticket);
192 	ttm_eu_del_from_lru_locked(list);
193 	lockmgr(&glob->lru_lock, LK_RELEASE);
194 	ttm_eu_list_ref_sub(list);
195 	return 0;
196 
197 err:
198 	ttm_eu_backoff_reservation_locked(list, ticket);
199 	lockmgr(&glob->lru_lock, LK_RELEASE);
200 	ttm_eu_list_ref_sub(list);
201 err_fini:
202 	ww_acquire_done(ticket);
203 	ww_acquire_fini(ticket);
204 	return ret;
205 }
206 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
207 
208 void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
209 				 struct list_head *list, void *sync_obj)
210 {
211 	struct ttm_validate_buffer *entry;
212 	struct ttm_buffer_object *bo;
213 	struct ttm_bo_global *glob;
214 	struct ttm_bo_device *bdev;
215 	struct ttm_bo_driver *driver;
216 
217 	if (list_empty(list))
218 		return;
219 
220 	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
221 	bdev = bo->bdev;
222 	driver = bdev->driver;
223 	glob = bo->glob;
224 
225 	lockmgr(&glob->lru_lock, LK_EXCLUSIVE);
226 	lockmgr(&bdev->fence_lock, LK_EXCLUSIVE);
227 
228 	list_for_each_entry(entry, list, head) {
229 		bo = entry->bo;
230 		entry->old_sync_obj = bo->sync_obj;
231 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
232 		ttm_bo_unreserve_ticket_locked(bo, ticket);
233 		entry->reserved = false;
234 	}
235 	lockmgr(&bdev->fence_lock, LK_RELEASE);
236 	lockmgr(&glob->lru_lock, LK_RELEASE);
237 	ww_acquire_fini(ticket);
238 
239 	list_for_each_entry(entry, list, head) {
240 		if (entry->old_sync_obj)
241 			driver->sync_obj_unref(&entry->old_sync_obj);
242 	}
243 }
244 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
245