1 /* 2 * Copyright 2015 Advanced Micro Devices, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 19 * USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * 21 * The above copyright notice and this permission notice (including the 22 * next paragraph) shall be included in all copies or substantial portions 23 * of the Software. 24 * 25 */ 26 /* 27 * Authors: 28 * Christian König <deathsimple@vodafone.de> 29 */ 30 31 #include <drm/drmP.h> 32 #include "amdgpu.h" 33 #include "amdgpu_trace.h" 34 35 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u 36 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) 37 38 static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu) 39 { 40 struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list, 41 rhead); 42 43 kvfree(list); 44 } 45 46 static void amdgpu_bo_list_free(struct kref *ref) 47 { 48 struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list, 49 refcount); 50 struct amdgpu_bo_list_entry *e; 51 52 amdgpu_bo_list_for_each_entry(e, list) 53 amdgpu_bo_unref(&e->robj); 54 55 call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); 56 } 57 58 int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, 59 struct drm_amdgpu_bo_list_entry *info, 60 unsigned num_entries, struct amdgpu_bo_list **result) 61 { 62 unsigned last_entry = 0, first_userptr = num_entries; 63 struct amdgpu_bo_list_entry *array; 64 struct amdgpu_bo_list *list; 65 uint64_t total_size = 0; 66 size_t size; 67 unsigned i; 68 int r; 69 70 if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list)) 71 / sizeof(struct amdgpu_bo_list_entry)) 72 return -EINVAL; 73 74 size = sizeof(struct amdgpu_bo_list); 75 size += num_entries * sizeof(struct amdgpu_bo_list_entry); 76 list = kmalloc(size, M_DRM, GFP_KERNEL); 77 if (!list) 78 return -ENOMEM; 79 80 kref_init(&list->refcount); 81 list->gds_obj = adev->gds.gds_gfx_bo; 82 list->gws_obj = adev->gds.gws_gfx_bo; 83 list->oa_obj = adev->gds.oa_gfx_bo; 84 85 array = amdgpu_bo_list_array_entry(list, 0); 86 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); 87 88 for (i = 0; i < num_entries; ++i) { 89 struct amdgpu_bo_list_entry *entry; 90 struct drm_gem_object *gobj; 91 struct amdgpu_bo *bo; 92 struct mm_struct *usermm; 93 94 gobj = drm_gem_object_lookup(filp, info[i].bo_handle); 95 if (!gobj) { 96 r = -ENOENT; 97 goto error_free; 98 } 99 100 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 101 drm_gem_object_put_unlocked(gobj); 102 103 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 104 if (usermm) { 105 if (usermm != current->mm) { 106 amdgpu_bo_unref(&bo); 107 r = -EPERM; 108 goto error_free; 109 } 110 entry = &array[--first_userptr]; 111 } else { 112 entry = &array[last_entry++]; 113 } 114 115 entry->robj = bo; 116 entry->priority = min(info[i].bo_priority, 117 AMDGPU_BO_LIST_MAX_PRIORITY); 118 entry->tv.bo = &entry->robj->tbo; 119 entry->tv.shared = !entry->robj->prime_shared_count; 120 121 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) 122 list->gds_obj = entry->robj; 123 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) 124 list->gws_obj = entry->robj; 125 if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) 126 list->oa_obj = entry->robj; 127 128 total_size += amdgpu_bo_size(entry->robj); 129 trace_amdgpu_bo_list_set(list, entry->robj); 130 } 131 132 list->first_userptr = first_userptr; 133 list->num_entries = num_entries; 134 135 trace_amdgpu_cs_bo_status(list->num_entries, total_size); 136 137 *result = list; 138 return 0; 139 140 error_free: 141 while (i--) 142 amdgpu_bo_unref(&array[i].robj); 143 kvfree(list); 144 return r; 145 146 } 147 148 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) 149 { 150 struct amdgpu_bo_list *list; 151 152 mutex_lock(&fpriv->bo_list_lock); 153 list = idr_remove(&fpriv->bo_list_handles, id); 154 mutex_unlock(&fpriv->bo_list_lock); 155 if (list) 156 kref_put(&list->refcount, amdgpu_bo_list_free); 157 } 158 159 int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id, 160 struct amdgpu_bo_list **result) 161 { 162 rcu_read_lock(); 163 *result = idr_find(&fpriv->bo_list_handles, id); 164 165 if (*result && kref_get_unless_zero(&(*result)->refcount)) { 166 rcu_read_unlock(); 167 return 0; 168 } 169 170 rcu_read_unlock(); 171 return -ENOENT; 172 } 173 174 void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, 175 struct list_head *validated) 176 { 177 /* This is based on the bucket sort with O(n) time complexity. 178 * An item with priority "i" is added to bucket[i]. The lists are then 179 * concatenated in descending order. 180 */ 181 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; 182 struct amdgpu_bo_list_entry *e; 183 unsigned i; 184 185 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 186 INIT_LIST_HEAD(&bucket[i]); 187 188 /* Since buffers which appear sooner in the relocation list are 189 * likely to be used more often than buffers which appear later 190 * in the list, the sort mustn't change the ordering of buffers 191 * with the same priority, i.e. it must be stable. 192 */ 193 amdgpu_bo_list_for_each_entry(e, list) { 194 unsigned priority = e->priority; 195 196 if (!e->robj->parent) 197 list_add_tail(&e->tv.head, &bucket[priority]); 198 199 e->user_pages = NULL; 200 } 201 202 /* Connect the sorted buckets in the output list. */ 203 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) 204 list_splice(&bucket[i], validated); 205 } 206 207 void amdgpu_bo_list_put(struct amdgpu_bo_list *list) 208 { 209 kref_put(&list->refcount, amdgpu_bo_list_free); 210 } 211 212 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, 213 struct drm_amdgpu_bo_list_entry **info_param) 214 { 215 const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); 216 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 217 struct drm_amdgpu_bo_list_entry *info; 218 int r; 219 220 info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); 221 if (!info) 222 return -ENOMEM; 223 224 /* copy the handle array from userspace to a kernel buffer */ 225 r = -EFAULT; 226 if (likely(info_size == in->bo_info_size)) { 227 unsigned long bytes = in->bo_number * 228 in->bo_info_size; 229 230 if (copy_from_user(info, uptr, bytes)) 231 goto error_free; 232 233 } else { 234 unsigned long bytes = min(in->bo_info_size, info_size); 235 unsigned i; 236 237 memset(info, 0, in->bo_number * info_size); 238 for (i = 0; i < in->bo_number; ++i) { 239 if (copy_from_user(&info[i], uptr, bytes)) 240 goto error_free; 241 242 uptr += in->bo_info_size; 243 } 244 } 245 246 *info_param = info; 247 return 0; 248 249 error_free: 250 kvfree(info); 251 return r; 252 } 253 254 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, 255 struct drm_file *filp) 256 { 257 struct amdgpu_device *adev = dev->dev_private; 258 struct amdgpu_fpriv *fpriv = filp->driver_priv; 259 union drm_amdgpu_bo_list *args = data; 260 uint32_t handle = args->in.list_handle; 261 struct drm_amdgpu_bo_list_entry *info = NULL; 262 struct amdgpu_bo_list *list, *old; 263 int r; 264 265 r = amdgpu_bo_create_list_entry_array(&args->in, &info); 266 if (r) 267 return r; 268 269 switch (args->in.operation) { 270 case AMDGPU_BO_LIST_OP_CREATE: 271 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 272 &list); 273 if (r) 274 goto error_free; 275 276 mutex_lock(&fpriv->bo_list_lock); 277 r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL); 278 mutex_unlock(&fpriv->bo_list_lock); 279 if (r < 0) { 280 goto error_put_list; 281 } 282 283 handle = r; 284 break; 285 286 case AMDGPU_BO_LIST_OP_DESTROY: 287 amdgpu_bo_list_destroy(fpriv, handle); 288 handle = 0; 289 break; 290 291 case AMDGPU_BO_LIST_OP_UPDATE: 292 r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number, 293 &list); 294 if (r) 295 goto error_free; 296 297 mutex_lock(&fpriv->bo_list_lock); 298 old = idr_replace(&fpriv->bo_list_handles, list, handle); 299 mutex_unlock(&fpriv->bo_list_lock); 300 301 if (IS_ERR(old)) { 302 r = PTR_ERR(old); 303 goto error_put_list; 304 } 305 306 amdgpu_bo_list_put(old); 307 break; 308 309 default: 310 r = -EINVAL; 311 goto error_free; 312 } 313 314 memset(args, 0, sizeof(*args)); 315 args->out.list_handle = handle; 316 kvfree(info); 317 318 return 0; 319 320 error_put_list: 321 amdgpu_bo_list_put(list); 322 323 error_free: 324 kvfree(info); 325 return r; 326 } 327