1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT_TYPES__ 7 #define __INTEL_GT_TYPES__ 8 9 #include <linux/ktime.h> 10 #include <linux/list.h> 11 #include <linux/mutex.h> 12 #include <linux/notifier.h> 13 #include <linux/spinlock.h> 14 #include <linux/types.h> 15 16 #include "uc/intel_uc.h" 17 18 #include "i915_vma.h" 19 #include "intel_engine_types.h" 20 #include "intel_llc_types.h" 21 #include "intel_reset_types.h" 22 #include "intel_rc6_types.h" 23 #include "intel_rps_types.h" 24 #include "intel_wakeref.h" 25 26 struct drm_i915_private; 27 struct i915_ggtt; 28 struct intel_engine_cs; 29 struct intel_uncore; 30 31 struct intel_gt { 32 struct drm_i915_private *i915; 33 struct intel_uncore *uncore; 34 struct i915_ggtt *ggtt; 35 36 struct intel_uc uc; 37 38 struct intel_gt_timelines { 39 spinlock_t lock; /* protects active_list */ 40 struct list_head active_list; 41 42 /* Pack multiple timelines' seqnos into the same page */ 43 spinlock_t hwsp_lock; 44 struct list_head hwsp_free_list; 45 } timelines; 46 47 struct intel_gt_requests { 48 /** 49 * We leave the user IRQ off as much as possible, 50 * but this means that requests will finish and never 51 * be retired once the system goes idle. Set a timer to 52 * fire periodically while the ring is running. When it 53 * fires, go retire requests. 54 */ 55 struct delayed_work retire_work; 56 } requests; 57 58 struct intel_wakeref wakeref; 59 atomic_t user_wakeref; 60 61 struct list_head closed_vma; 62 spinlock_t closed_lock; /* guards the list of closed_vma */ 63 64 struct intel_reset reset; 65 66 /** 67 * Is the GPU currently considered idle, or busy executing 68 * userspace requests? Whilst idle, we allow runtime power 69 * management to power down the hardware and display clocks. 70 * In order to reduce the effect on performance, there 71 * is a slight delay before we do so. 72 */ 73 intel_wakeref_t awake; 74 75 struct intel_llc llc; 76 struct intel_rc6 rc6; 77 struct intel_rps rps; 78 79 ktime_t last_init_time; 80 81 struct i915_vma *scratch; 82 83 spinlock_t irq_lock; 84 u32 gt_imr; 85 u32 pm_ier; 86 u32 pm_imr; 87 88 u32 pm_guc_events; 89 90 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 91 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 92 [MAX_ENGINE_INSTANCE + 1]; 93 }; 94 95 enum intel_gt_scratch_field { 96 /* 8 bytes */ 97 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, 98 99 /* 8 bytes */ 100 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, 101 102 /* 8 bytes */ 103 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, 104 105 /* 6 * 8 bytes */ 106 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, 107 108 /* 4 bytes */ 109 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, 110 }; 111 112 #endif /* __INTEL_GT_TYPES_H__ */ 113