1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 #include "xe_drm_client.h" 6 7 #include <drm/drm_print.h> 8 #include <uapi/drm/xe_drm.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 13 #include "xe_assert.h" 14 #include "xe_bo.h" 15 #include "xe_bo_types.h" 16 #include "xe_device_types.h" 17 #include "xe_exec_queue.h" 18 #include "xe_force_wake.h" 19 #include "xe_gt.h" 20 #include "xe_hw_engine.h" 21 #include "xe_pm.h" 22 #include "xe_trace.h" 23 24 /** 25 * DOC: DRM Client usage stats 26 * 27 * The drm/xe driver implements the DRM client usage stats specification as 28 * documented in :ref:`drm-client-usage-stats`. 29 * 30 * Example of the output showing the implemented key value pairs and entirety of 31 * the currently possible format options: 32 * 33 * :: 34 * 35 * pos: 0 36 * flags: 0100002 37 * mnt_id: 26 38 * ino: 685 39 * drm-driver: xe 40 * drm-client-id: 3 41 * drm-pdev: 0000:03:00.0 42 * drm-total-system: 0 43 * drm-shared-system: 0 44 * drm-active-system: 0 45 * drm-resident-system: 0 46 * drm-purgeable-system: 0 47 * drm-total-gtt: 192 KiB 48 * drm-shared-gtt: 0 49 * drm-active-gtt: 0 50 * drm-resident-gtt: 192 KiB 51 * drm-total-vram0: 23992 KiB 52 * drm-shared-vram0: 16 MiB 53 * drm-active-vram0: 0 54 * drm-resident-vram0: 23992 KiB 55 * drm-total-stolen: 0 56 * drm-shared-stolen: 0 57 * drm-active-stolen: 0 58 * drm-resident-stolen: 0 59 * drm-cycles-rcs: 28257900 60 * drm-total-cycles-rcs: 7655183225 61 * drm-cycles-bcs: 0 62 * drm-total-cycles-bcs: 7655183225 63 * drm-cycles-vcs: 0 64 * drm-total-cycles-vcs: 7655183225 65 * drm-engine-capacity-vcs: 2 66 * drm-cycles-vecs: 0 67 * drm-total-cycles-vecs: 7655183225 68 * drm-engine-capacity-vecs: 2 69 * drm-cycles-ccs: 0 70 * drm-total-cycles-ccs: 7655183225 71 * drm-engine-capacity-ccs: 4 72 * 73 * Possible `drm-cycles-` key names are: `rcs`, `ccs`, `bcs`, `vcs`, `vecs` and 74 * "other". 75 */ 76 77 /** 78 * xe_drm_client_alloc() - Allocate drm client 79 * @void: No arg 80 * 81 * Allocate drm client struct to track client memory against 82 * same till client life. Call this API whenever new client 83 * has opened xe device. 84 * 85 * Return: pointer to client struct or NULL if can't allocate 86 */ 87 struct xe_drm_client *xe_drm_client_alloc(void) 88 { 89 struct xe_drm_client *client; 90 91 client = kzalloc(sizeof(*client), GFP_KERNEL); 92 if (!client) 93 return NULL; 94 95 kref_init(&client->kref); 96 97 #ifdef CONFIG_PROC_FS 98 spin_lock_init(&client->bos_lock); 99 INIT_LIST_HEAD(&client->bos_list); 100 #endif 101 return client; 102 } 103 104 /** 105 * __xe_drm_client_free() - Free client struct 106 * @kref: The reference 107 * 108 * This frees client struct. Call this API when xe device is closed 109 * by drm client. 110 * 111 * Return: void 112 */ 113 void __xe_drm_client_free(struct kref *kref) 114 { 115 struct xe_drm_client *client = 116 container_of(kref, typeof(*client), kref); 117 118 kfree(client); 119 } 120 121 #ifdef CONFIG_PROC_FS 122 /** 123 * xe_drm_client_add_bo() - Add BO for tracking client mem usage 124 * @client: The drm client ptr 125 * @bo: The xe BO ptr 126 * 127 * Add all BO created by individual drm client by calling this function. 128 * This helps in tracking client memory usage. 129 * 130 * Return: void 131 */ 132 void xe_drm_client_add_bo(struct xe_drm_client *client, 133 struct xe_bo *bo) 134 { 135 XE_WARN_ON(bo->client); 136 XE_WARN_ON(!list_empty(&bo->client_link)); 137 138 spin_lock(&client->bos_lock); 139 bo->client = xe_drm_client_get(client); 140 list_add_tail(&bo->client_link, &client->bos_list); 141 spin_unlock(&client->bos_lock); 142 } 143 144 /** 145 * xe_drm_client_remove_bo() - Remove BO for tracking client mem usage 146 * @bo: The xe BO ptr 147 * 148 * Remove all BO removed by individual drm client by calling this function. 149 * This helps in tracking client memory usage. 150 * 151 * Return: void 152 */ 153 void xe_drm_client_remove_bo(struct xe_bo *bo) 154 { 155 struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev); 156 struct xe_drm_client *client = bo->client; 157 158 xe_assert(xe, !kref_read(&bo->ttm.base.refcount)); 159 160 spin_lock(&client->bos_lock); 161 list_del_init(&bo->client_link); 162 spin_unlock(&client->bos_lock); 163 164 xe_drm_client_put(client); 165 } 166 167 static void bo_meminfo(struct xe_bo *bo, 168 struct drm_memory_stats stats[TTM_NUM_MEM_TYPES]) 169 { 170 u64 sz = bo->size; 171 u32 mem_type = bo->ttm.resource->mem_type; 172 173 xe_bo_assert_held(bo); 174 175 if (drm_gem_object_is_shared_for_memory_stats(&bo->ttm.base)) 176 stats[mem_type].shared += sz; 177 else 178 stats[mem_type].private += sz; 179 180 if (xe_bo_has_pages(bo)) { 181 stats[mem_type].resident += sz; 182 183 if (!dma_resv_test_signaled(bo->ttm.base.resv, 184 DMA_RESV_USAGE_BOOKKEEP)) 185 stats[mem_type].active += sz; 186 else if (mem_type == XE_PL_SYSTEM) 187 stats[mem_type].purgeable += sz; 188 } 189 } 190 191 static void show_meminfo(struct drm_printer *p, struct drm_file *file) 192 { 193 struct drm_memory_stats stats[TTM_NUM_MEM_TYPES] = {}; 194 struct xe_file *xef = file->driver_priv; 195 struct ttm_device *bdev = &xef->xe->ttm; 196 struct ttm_resource_manager *man; 197 struct xe_drm_client *client; 198 struct drm_gem_object *obj; 199 struct xe_bo *bo; 200 LLIST_HEAD(deferred); 201 unsigned int id; 202 u32 mem_type; 203 204 client = xef->client; 205 206 /* Public objects. */ 207 spin_lock(&file->table_lock); 208 idr_for_each_entry(&file->object_idr, obj, id) { 209 struct xe_bo *bo = gem_to_xe_bo(obj); 210 211 if (dma_resv_trylock(bo->ttm.base.resv)) { 212 bo_meminfo(bo, stats); 213 xe_bo_unlock(bo); 214 } else { 215 xe_bo_get(bo); 216 spin_unlock(&file->table_lock); 217 218 xe_bo_lock(bo, false); 219 bo_meminfo(bo, stats); 220 xe_bo_unlock(bo); 221 222 xe_bo_put(bo); 223 spin_lock(&file->table_lock); 224 } 225 } 226 spin_unlock(&file->table_lock); 227 228 /* Internal objects. */ 229 spin_lock(&client->bos_lock); 230 list_for_each_entry(bo, &client->bos_list, client_link) { 231 if (!kref_get_unless_zero(&bo->ttm.base.refcount)) 232 continue; 233 234 if (dma_resv_trylock(bo->ttm.base.resv)) { 235 bo_meminfo(bo, stats); 236 xe_bo_unlock(bo); 237 } else { 238 spin_unlock(&client->bos_lock); 239 240 xe_bo_lock(bo, false); 241 bo_meminfo(bo, stats); 242 xe_bo_unlock(bo); 243 244 spin_lock(&client->bos_lock); 245 /* The bo ref will prevent this bo from being removed from the list */ 246 xe_assert(xef->xe, !list_empty(&bo->client_link)); 247 } 248 249 xe_bo_put_deferred(bo, &deferred); 250 } 251 spin_unlock(&client->bos_lock); 252 253 xe_bo_put_commit(&deferred); 254 255 for (mem_type = XE_PL_SYSTEM; mem_type < TTM_NUM_MEM_TYPES; ++mem_type) { 256 if (!xe_mem_type_to_name[mem_type]) 257 continue; 258 259 man = ttm_manager_type(bdev, mem_type); 260 261 if (man) { 262 drm_print_memory_stats(p, 263 &stats[mem_type], 264 DRM_GEM_OBJECT_RESIDENT | 265 (mem_type != XE_PL_SYSTEM ? 0 : 266 DRM_GEM_OBJECT_PURGEABLE), 267 xe_mem_type_to_name[mem_type]); 268 } 269 } 270 } 271 272 static void show_run_ticks(struct drm_printer *p, struct drm_file *file) 273 { 274 unsigned long class, i, gt_id, capacity[XE_ENGINE_CLASS_MAX] = { }; 275 struct xe_file *xef = file->driver_priv; 276 struct xe_device *xe = xef->xe; 277 struct xe_gt *gt; 278 struct xe_hw_engine *hwe; 279 struct xe_exec_queue *q; 280 u64 gpu_timestamp; 281 282 xe_pm_runtime_get(xe); 283 284 /* Accumulate all the exec queues from this client */ 285 mutex_lock(&xef->exec_queue.lock); 286 xa_for_each(&xef->exec_queue.xa, i, q) 287 xe_exec_queue_update_run_ticks(q); 288 mutex_unlock(&xef->exec_queue.lock); 289 290 /* Get the total GPU cycles */ 291 for_each_gt(gt, xe, gt_id) { 292 enum xe_force_wake_domains fw; 293 294 hwe = xe_gt_any_hw_engine(gt); 295 if (!hwe) 296 continue; 297 298 fw = xe_hw_engine_to_fw_domain(hwe); 299 if (xe_force_wake_get(gt_to_fw(gt), fw)) { 300 hwe = NULL; 301 break; 302 } 303 304 gpu_timestamp = xe_hw_engine_read_timestamp(hwe); 305 XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), fw)); 306 break; 307 } 308 309 xe_pm_runtime_put(xe); 310 311 if (unlikely(!hwe)) 312 return; 313 314 for (class = 0; class < XE_ENGINE_CLASS_MAX; class++) { 315 const char *class_name; 316 317 for_each_gt(gt, xe, gt_id) 318 capacity[class] += gt->user_engines.instances_per_class[class]; 319 320 /* 321 * Engines may be fused off or not exposed to userspace. Don't 322 * return anything if this entire class is not available 323 */ 324 if (!capacity[class]) 325 continue; 326 327 class_name = xe_hw_engine_class_to_str(class); 328 drm_printf(p, "drm-cycles-%s:\t%llu\n", 329 class_name, xef->run_ticks[class]); 330 drm_printf(p, "drm-total-cycles-%s:\t%llu\n", 331 class_name, gpu_timestamp); 332 333 if (capacity[class] > 1) 334 drm_printf(p, "drm-engine-capacity-%s:\t%lu\n", 335 class_name, capacity[class]); 336 } 337 } 338 339 /** 340 * xe_drm_client_fdinfo() - Callback for fdinfo interface 341 * @p: The drm_printer ptr 342 * @file: The drm_file ptr 343 * 344 * This is callabck for drm fdinfo interface. Register this callback 345 * in drm driver ops for show_fdinfo. 346 * 347 * Return: void 348 */ 349 void xe_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file) 350 { 351 show_meminfo(p, file); 352 show_run_ticks(p, file); 353 } 354 #endif 355