1 /* 2 * Copyright © 2017 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef __INTEL_UNCORE_H__ 26 #define __INTEL_UNCORE_H__ 27 28 #include <linux/spinlock.h> 29 #include <linux/notifier.h> 30 #include <linux/hrtimer.h> 31 32 #include <linux/ioport.h> 33 #include <linux/seq_file.h> 34 #include <linux/workqueue.h> 35 36 #include "i915_reg.h" 37 38 struct drm_i915_private; 39 40 enum forcewake_domain_id { 41 FW_DOMAIN_ID_RENDER = 0, 42 FW_DOMAIN_ID_BLITTER, 43 FW_DOMAIN_ID_MEDIA, 44 FW_DOMAIN_ID_MEDIA_VDBOX0, 45 FW_DOMAIN_ID_MEDIA_VDBOX1, 46 FW_DOMAIN_ID_MEDIA_VDBOX2, 47 FW_DOMAIN_ID_MEDIA_VDBOX3, 48 FW_DOMAIN_ID_MEDIA_VEBOX0, 49 FW_DOMAIN_ID_MEDIA_VEBOX1, 50 51 FW_DOMAIN_ID_COUNT 52 }; 53 54 enum forcewake_domains { 55 FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER), 56 FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER), 57 FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA), 58 FORCEWAKE_MEDIA_VDBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX0), 59 FORCEWAKE_MEDIA_VDBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1), 60 FORCEWAKE_MEDIA_VDBOX2 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2), 61 FORCEWAKE_MEDIA_VDBOX3 = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3), 62 FORCEWAKE_MEDIA_VEBOX0 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0), 63 FORCEWAKE_MEDIA_VEBOX1 = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1), 64 65 FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1 66 }; 67 68 struct intel_uncore_funcs { 69 void (*force_wake_get)(struct drm_i915_private *dev_priv, 70 enum forcewake_domains domains); 71 void (*force_wake_put)(struct drm_i915_private *dev_priv, 72 enum forcewake_domains domains); 73 74 u8 (*mmio_readb)(struct drm_i915_private *dev_priv, 75 i915_reg_t r, bool trace); 76 u16 (*mmio_readw)(struct drm_i915_private *dev_priv, 77 i915_reg_t r, bool trace); 78 u32 (*mmio_readl)(struct drm_i915_private *dev_priv, 79 i915_reg_t r, bool trace); 80 u64 (*mmio_readq)(struct drm_i915_private *dev_priv, 81 i915_reg_t r, bool trace); 82 83 void (*mmio_writeb)(struct drm_i915_private *dev_priv, 84 i915_reg_t r, u8 val, bool trace); 85 void (*mmio_writew)(struct drm_i915_private *dev_priv, 86 i915_reg_t r, u16 val, bool trace); 87 void (*mmio_writel)(struct drm_i915_private *dev_priv, 88 i915_reg_t r, u32 val, bool trace); 89 }; 90 91 struct intel_forcewake_range { 92 u32 start; 93 u32 end; 94 95 enum forcewake_domains domains; 96 }; 97 98 struct intel_uncore { 99 spinlock_t lock; /** lock is also taken in irq contexts. */ 100 101 const struct intel_forcewake_range *fw_domains_table; 102 unsigned int fw_domains_table_entries; 103 104 struct notifier_block pmic_bus_access_nb; 105 struct intel_uncore_funcs funcs; 106 107 unsigned int fifo_count; 108 109 enum forcewake_domains fw_domains; 110 enum forcewake_domains fw_domains_active; 111 enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */ 112 113 u32 fw_set; 114 u32 fw_clear; 115 u32 fw_reset; 116 117 struct intel_uncore_forcewake_domain { 118 enum forcewake_domain_id id; 119 enum forcewake_domains mask; 120 unsigned int wake_count; 121 bool active; 122 struct timeout timer; 123 i915_reg_t reg_set; 124 i915_reg_t reg_ack; 125 } fw_domain[FW_DOMAIN_ID_COUNT]; 126 127 struct { 128 unsigned int count; 129 130 int saved_mmio_check; 131 int saved_mmio_debug; 132 } user_forcewake; 133 134 int unclaimed_mmio_check; 135 }; 136 137 /* Iterate over initialised fw domains */ 138 #define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \ 139 for (tmp__ = (mask__); \ 140 tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;) 141 142 #define for_each_fw_domain(domain__, dev_priv__, tmp__) \ 143 for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__) 144 145 146 void intel_uncore_sanitize(struct drm_i915_private *dev_priv); 147 void intel_uncore_init(struct drm_i915_private *dev_priv); 148 void intel_uncore_prune(struct drm_i915_private *dev_priv); 149 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 150 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 151 void intel_uncore_fini(struct drm_i915_private *dev_priv); 152 void intel_uncore_suspend(struct drm_i915_private *dev_priv); 153 void intel_uncore_resume_early(struct drm_i915_private *dev_priv); 154 void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv); 155 156 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 157 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 158 void assert_forcewakes_active(struct drm_i915_private *dev_priv, 159 enum forcewake_domains fw_domains); 160 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 161 162 enum forcewake_domains 163 intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, 164 i915_reg_t reg, unsigned int op); 165 #define FW_REG_READ (1) 166 #define FW_REG_WRITE (2) 167 168 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 169 enum forcewake_domains domains); 170 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 171 enum forcewake_domains domains); 172 /* Like above but the caller must manage the uncore.lock itself. 173 * Must be used with I915_READ_FW and friends. 174 */ 175 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 176 enum forcewake_domains domains); 177 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 178 enum forcewake_domains domains); 179 180 void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv); 181 void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv); 182 183 int __intel_wait_for_register(struct drm_i915_private *dev_priv, 184 i915_reg_t reg, 185 u32 mask, 186 u32 value, 187 unsigned int fast_timeout_us, 188 unsigned int slow_timeout_ms, 189 u32 *out_value); 190 static inline 191 int intel_wait_for_register(struct drm_i915_private *dev_priv, 192 i915_reg_t reg, 193 u32 mask, 194 u32 value, 195 unsigned int timeout_ms) 196 { 197 return __intel_wait_for_register(dev_priv, reg, mask, value, 2, 198 timeout_ms, NULL); 199 } 200 int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 201 i915_reg_t reg, 202 u32 mask, 203 u32 value, 204 unsigned int fast_timeout_us, 205 unsigned int slow_timeout_ms, 206 u32 *out_value); 207 static inline 208 int intel_wait_for_register_fw(struct drm_i915_private *dev_priv, 209 i915_reg_t reg, 210 u32 mask, 211 u32 value, 212 unsigned int timeout_ms) 213 { 214 return __intel_wait_for_register_fw(dev_priv, reg, mask, value, 215 2, timeout_ms, NULL); 216 } 217 218 #define raw_reg_read(base, reg) \ 219 readl(base + i915_mmio_reg_offset(reg)) 220 #define raw_reg_write(base, reg, value) \ 221 writel(value, base + i915_mmio_reg_offset(reg)) 222 223 #endif /* !__INTEL_UNCORE_H__ */ 224