1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/kobject.h> 7 #include <linux/sysfs.h> 8 9 #include "i915_drv.h" 10 #include "intel_engine.h" 11 #include "intel_engine_heartbeat.h" 12 #include "sysfs_engines.h" 13 14 #ifdef __linux__ 15 16 struct kobj_engine { 17 struct kobject base; 18 struct intel_engine_cs *engine; 19 }; 20 21 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj) 22 { 23 return container_of(kobj, struct kobj_engine, base)->engine; 24 } 25 26 static ssize_t 27 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 28 { 29 return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name); 30 } 31 32 static struct kobj_attribute name_attr = 33 __ATTR(name, 0444, name_show, NULL); 34 35 static ssize_t 36 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 37 { 38 return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class); 39 } 40 41 static struct kobj_attribute class_attr = 42 __ATTR(class, 0444, class_show, NULL); 43 44 static ssize_t 45 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 46 { 47 return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance); 48 } 49 50 static struct kobj_attribute inst_attr = 51 __ATTR(instance, 0444, inst_show, NULL); 52 53 static ssize_t 54 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 55 { 56 return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base); 57 } 58 59 static struct kobj_attribute mmio_attr = 60 __ATTR(mmio_base, 0444, mmio_show, NULL); 61 62 static const char * const vcs_caps[] = { 63 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc", 64 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", 65 }; 66 67 static const char * const vecs_caps[] = { 68 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", 69 }; 70 71 static ssize_t repr_trim(char *buf, ssize_t len) 72 { 73 /* Trim off the trailing space and replace with a newline */ 74 if (len > PAGE_SIZE) 75 len = PAGE_SIZE; 76 if (len > 0) 77 buf[len - 1] = '\n'; 78 79 return len; 80 } 81 82 static ssize_t 83 __caps_show(struct intel_engine_cs *engine, 84 u32 caps, char *buf, bool show_unknown) 85 { 86 const char * const *repr; 87 int count, n; 88 ssize_t len; 89 90 BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities)); 91 92 switch (engine->class) { 93 case VIDEO_DECODE_CLASS: 94 repr = vcs_caps; 95 count = ARRAY_SIZE(vcs_caps); 96 break; 97 98 case VIDEO_ENHANCEMENT_CLASS: 99 repr = vecs_caps; 100 count = ARRAY_SIZE(vecs_caps); 101 break; 102 103 default: 104 repr = NULL; 105 count = 0; 106 break; 107 } 108 GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps))); 109 110 len = 0; 111 for_each_set_bit(n, 112 (unsigned long *)&caps, 113 show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) { 114 if (n >= count || !repr[n]) { 115 if (GEM_WARN_ON(show_unknown)) 116 len += snprintf(buf + len, PAGE_SIZE - len, 117 "[%x] ", n); 118 } else { 119 len += snprintf(buf + len, PAGE_SIZE - len, 120 "%s ", repr[n]); 121 } 122 if (GEM_WARN_ON(len >= PAGE_SIZE)) 123 break; 124 } 125 return repr_trim(buf, len); 126 } 127 128 static ssize_t 129 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 130 { 131 struct intel_engine_cs *engine = kobj_to_engine(kobj); 132 133 return __caps_show(engine, engine->uabi_capabilities, buf, true); 134 } 135 136 static struct kobj_attribute caps_attr = 137 __ATTR(capabilities, 0444, caps_show, NULL); 138 139 static ssize_t 140 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 141 { 142 return __caps_show(kobj_to_engine(kobj), -1, buf, false); 143 } 144 145 static struct kobj_attribute all_caps_attr = 146 __ATTR(known_capabilities, 0444, all_caps_show, NULL); 147 148 static ssize_t 149 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr, 150 const char *buf, size_t count) 151 { 152 struct intel_engine_cs *engine = kobj_to_engine(kobj); 153 unsigned long long duration; 154 int err; 155 156 /* 157 * When waiting for a request, if is it currently being executed 158 * on the GPU, we busywait for a short while before sleeping. The 159 * premise is that most requests are short, and if it is already 160 * executing then there is a good chance that it will complete 161 * before we can setup the interrupt handler and go to sleep. 162 * We try to offset the cost of going to sleep, by first spinning 163 * on the request -- if it completed in less time than it would take 164 * to go sleep, process the interrupt and return back to the client, 165 * then we have saved the client some latency, albeit at the cost 166 * of spinning on an expensive CPU core. 167 * 168 * While we try to avoid waiting at all for a request that is unlikely 169 * to complete, deciding how long it is worth spinning is for is an 170 * arbitrary decision: trading off power vs latency. 171 */ 172 173 err = kstrtoull(buf, 0, &duration); 174 if (err) 175 return err; 176 177 if (duration > jiffies_to_nsecs(2)) 178 return -EINVAL; 179 180 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); 181 182 return count; 183 } 184 185 static ssize_t 186 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 187 { 188 struct intel_engine_cs *engine = kobj_to_engine(kobj); 189 190 return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns); 191 } 192 193 static struct kobj_attribute max_spin_attr = 194 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store); 195 196 static ssize_t 197 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 198 { 199 struct intel_engine_cs *engine = kobj_to_engine(kobj); 200 201 return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns); 202 } 203 204 static struct kobj_attribute max_spin_def = 205 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL); 206 207 static ssize_t 208 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr, 209 const char *buf, size_t count) 210 { 211 struct intel_engine_cs *engine = kobj_to_engine(kobj); 212 unsigned long long duration; 213 int err; 214 215 /* 216 * Execlists uses a scheduling quantum (a timeslice) to alternate 217 * execution between ready-to-run contexts of equal priority. This 218 * ensures that all users (though only if they of equal importance) 219 * have the opportunity to run and prevents livelocks where contexts 220 * may have implicit ordering due to userspace semaphores. 221 */ 222 223 err = kstrtoull(buf, 0, &duration); 224 if (err) 225 return err; 226 227 if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) 228 return -EINVAL; 229 230 WRITE_ONCE(engine->props.timeslice_duration_ms, duration); 231 232 if (execlists_active(&engine->execlists)) 233 set_timer_ms(&engine->execlists.timer, duration); 234 235 return count; 236 } 237 238 static ssize_t 239 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 240 { 241 struct intel_engine_cs *engine = kobj_to_engine(kobj); 242 243 return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms); 244 } 245 246 static struct kobj_attribute timeslice_duration_attr = 247 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store); 248 249 static ssize_t 250 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 251 { 252 struct intel_engine_cs *engine = kobj_to_engine(kobj); 253 254 return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms); 255 } 256 257 static struct kobj_attribute timeslice_duration_def = 258 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL); 259 260 static ssize_t 261 stop_store(struct kobject *kobj, struct kobj_attribute *attr, 262 const char *buf, size_t count) 263 { 264 struct intel_engine_cs *engine = kobj_to_engine(kobj); 265 unsigned long long duration; 266 int err; 267 268 /* 269 * When we allow ourselves to sleep before a GPU reset after disabling 270 * submission, even for a few milliseconds, gives an innocent context 271 * the opportunity to clear the GPU before the reset occurs. However, 272 * how long to sleep depends on the typical non-preemptible duration 273 * (a similar problem to determining the ideal preempt-reset timeout 274 * or even the heartbeat interval). 275 */ 276 277 err = kstrtoull(buf, 0, &duration); 278 if (err) 279 return err; 280 281 if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) 282 return -EINVAL; 283 284 WRITE_ONCE(engine->props.stop_timeout_ms, duration); 285 return count; 286 } 287 288 static ssize_t 289 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 290 { 291 struct intel_engine_cs *engine = kobj_to_engine(kobj); 292 293 return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms); 294 } 295 296 static struct kobj_attribute stop_timeout_attr = 297 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store); 298 299 static ssize_t 300 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 301 { 302 struct intel_engine_cs *engine = kobj_to_engine(kobj); 303 304 return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms); 305 } 306 307 static struct kobj_attribute stop_timeout_def = 308 __ATTR(stop_timeout_ms, 0444, stop_default, NULL); 309 310 static ssize_t 311 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, 312 const char *buf, size_t count) 313 { 314 struct intel_engine_cs *engine = kobj_to_engine(kobj); 315 unsigned long long timeout; 316 int err; 317 318 /* 319 * After initialising a preemption request, we give the current 320 * resident a small amount of time to vacate the GPU. The preemption 321 * request is for a higher priority context and should be immediate to 322 * maintain high quality of service (and avoid priority inversion). 323 * However, the preemption granularity of the GPU can be quite coarse 324 * and so we need a compromise. 325 */ 326 327 err = kstrtoull(buf, 0, &timeout); 328 if (err) 329 return err; 330 331 if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) 332 return -EINVAL; 333 334 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout); 335 336 if (READ_ONCE(engine->execlists.pending[0])) 337 set_timer_ms(&engine->execlists.preempt, timeout); 338 339 return count; 340 } 341 342 static ssize_t 343 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr, 344 char *buf) 345 { 346 struct intel_engine_cs *engine = kobj_to_engine(kobj); 347 348 return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms); 349 } 350 351 static struct kobj_attribute preempt_timeout_attr = 352 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store); 353 354 static ssize_t 355 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr, 356 char *buf) 357 { 358 struct intel_engine_cs *engine = kobj_to_engine(kobj); 359 360 return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms); 361 } 362 363 static struct kobj_attribute preempt_timeout_def = 364 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL); 365 366 static ssize_t 367 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr, 368 const char *buf, size_t count) 369 { 370 struct intel_engine_cs *engine = kobj_to_engine(kobj); 371 unsigned long long delay; 372 int err; 373 374 /* 375 * We monitor the health of the system via periodic heartbeat pulses. 376 * The pulses also provide the opportunity to perform garbage 377 * collection. However, we interpret an incomplete pulse (a missed 378 * heartbeat) as an indication that the system is no longer responsive, 379 * i.e. hung, and perform an engine or full GPU reset. Given that the 380 * preemption granularity can be very coarse on a system, the optimal 381 * value for any workload is unknowable! 382 */ 383 384 err = kstrtoull(buf, 0, &delay); 385 if (err) 386 return err; 387 388 if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)) 389 return -EINVAL; 390 391 err = intel_engine_set_heartbeat(engine, delay); 392 if (err) 393 return err; 394 395 return count; 396 } 397 398 static ssize_t 399 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 400 { 401 struct intel_engine_cs *engine = kobj_to_engine(kobj); 402 403 return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms); 404 } 405 406 static struct kobj_attribute heartbeat_interval_attr = 407 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store); 408 409 static ssize_t 410 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 411 { 412 struct intel_engine_cs *engine = kobj_to_engine(kobj); 413 414 return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms); 415 } 416 417 static struct kobj_attribute heartbeat_interval_def = 418 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL); 419 420 static void kobj_engine_release(struct kobject *kobj) 421 { 422 kfree(kobj); 423 } 424 425 static struct kobj_type kobj_engine_type = { 426 .release = kobj_engine_release, 427 .sysfs_ops = &kobj_sysfs_ops 428 }; 429 430 static struct kobject * 431 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine) 432 { 433 struct kobj_engine *ke; 434 435 ke = kzalloc(sizeof(*ke), GFP_KERNEL); 436 if (!ke) 437 return NULL; 438 439 kobject_init(&ke->base, &kobj_engine_type); 440 ke->engine = engine; 441 442 if (kobject_add(&ke->base, dir, "%s", engine->name)) { 443 kobject_put(&ke->base); 444 return NULL; 445 } 446 447 /* xfer ownership to sysfs tree */ 448 return &ke->base; 449 } 450 451 static void add_defaults(struct kobj_engine *parent) 452 { 453 static const struct attribute *files[] = { 454 &max_spin_def.attr, 455 &stop_timeout_def.attr, 456 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL 457 &heartbeat_interval_def.attr, 458 #endif 459 NULL 460 }; 461 struct kobj_engine *ke; 462 463 ke = kzalloc(sizeof(*ke), GFP_KERNEL); 464 if (!ke) 465 return; 466 467 kobject_init(&ke->base, &kobj_engine_type); 468 ke->engine = parent->engine; 469 470 if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) { 471 kobject_put(&ke->base); 472 return; 473 } 474 475 if (sysfs_create_files(&ke->base, files)) 476 return; 477 478 if (intel_engine_has_timeslices(ke->engine) && 479 sysfs_create_file(&ke->base, ×lice_duration_def.attr)) 480 return; 481 482 if (intel_engine_has_preempt_reset(ke->engine) && 483 sysfs_create_file(&ke->base, &preempt_timeout_def.attr)) 484 return; 485 } 486 487 #endif /* __linux__ */ 488 489 void intel_engines_add_sysfs(struct drm_i915_private *i915) 490 { 491 #ifdef __linux__ 492 static const struct attribute *files[] = { 493 &name_attr.attr, 494 &class_attr.attr, 495 &inst_attr.attr, 496 &mmio_attr.attr, 497 &caps_attr.attr, 498 &all_caps_attr.attr, 499 &max_spin_attr.attr, 500 &stop_timeout_attr.attr, 501 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL 502 &heartbeat_interval_attr.attr, 503 #endif 504 NULL 505 }; 506 507 struct device *kdev = i915->drm.primary->kdev; 508 struct intel_engine_cs *engine; 509 struct kobject *dir; 510 511 dir = kobject_create_and_add("engine", &kdev->kobj); 512 if (!dir) 513 return; 514 515 for_each_uabi_engine(engine, i915) { 516 struct kobject *kobj; 517 518 kobj = kobj_engine(dir, engine); 519 if (!kobj) 520 goto err_engine; 521 522 if (sysfs_create_files(kobj, files)) 523 goto err_object; 524 525 if (intel_engine_has_timeslices(engine) && 526 sysfs_create_file(kobj, ×lice_duration_attr.attr)) 527 goto err_engine; 528 529 if (intel_engine_has_preempt_reset(engine) && 530 sysfs_create_file(kobj, &preempt_timeout_attr.attr)) 531 goto err_engine; 532 533 add_defaults(container_of(kobj, struct kobj_engine, base)); 534 535 if (0) { 536 err_object: 537 kobject_put(kobj); 538 err_engine: 539 dev_err(kdev, "Failed to add sysfs engine '%s'\n", 540 engine->name); 541 break; 542 } 543 } 544 #endif /* __linux__ */ 545 } 546