xref: /linux/drivers/gpu/drm/i915/intel_uncore.h (revision 72e9abc3)
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef __INTEL_UNCORE_H__
26 #define __INTEL_UNCORE_H__
27 
28 #include <linux/spinlock.h>
29 #include <linux/notifier.h>
30 #include <linux/hrtimer.h>
31 #include <linux/io-64-nonatomic-lo-hi.h>
32 #include <linux/types.h>
33 
34 #include "i915_reg_defs.h"
35 
36 struct drm_device;
37 struct drm_i915_private;
38 struct intel_runtime_pm;
39 struct intel_uncore;
40 struct intel_gt;
41 
42 struct intel_uncore_mmio_debug {
43 	spinlock_t lock; /** lock is also taken in irq contexts. */
44 	int unclaimed_mmio_check;
45 	int saved_mmio_check;
46 	u32 suspend_count;
47 };
48 
49 enum forcewake_domain_id {
50 	FW_DOMAIN_ID_RENDER = 0,
51 	FW_DOMAIN_ID_GT,        /* also includes blitter engine */
52 	FW_DOMAIN_ID_MEDIA,
53 	FW_DOMAIN_ID_MEDIA_VDBOX0,
54 	FW_DOMAIN_ID_MEDIA_VDBOX1,
55 	FW_DOMAIN_ID_MEDIA_VDBOX2,
56 	FW_DOMAIN_ID_MEDIA_VDBOX3,
57 	FW_DOMAIN_ID_MEDIA_VDBOX4,
58 	FW_DOMAIN_ID_MEDIA_VDBOX5,
59 	FW_DOMAIN_ID_MEDIA_VDBOX6,
60 	FW_DOMAIN_ID_MEDIA_VDBOX7,
61 	FW_DOMAIN_ID_MEDIA_VEBOX0,
62 	FW_DOMAIN_ID_MEDIA_VEBOX1,
63 	FW_DOMAIN_ID_MEDIA_VEBOX2,
64 	FW_DOMAIN_ID_MEDIA_VEBOX3,
65 	FW_DOMAIN_ID_GSC,
66 
67 	FW_DOMAIN_ID_COUNT
68 };
69 
70 enum forcewake_domains {
71 	FORCEWAKE_RENDER	= BIT(FW_DOMAIN_ID_RENDER),
72 	FORCEWAKE_GT		= BIT(FW_DOMAIN_ID_GT),
73 	FORCEWAKE_MEDIA		= BIT(FW_DOMAIN_ID_MEDIA),
74 	FORCEWAKE_MEDIA_VDBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX0),
75 	FORCEWAKE_MEDIA_VDBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
76 	FORCEWAKE_MEDIA_VDBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
77 	FORCEWAKE_MEDIA_VDBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
78 	FORCEWAKE_MEDIA_VDBOX4	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX4),
79 	FORCEWAKE_MEDIA_VDBOX5	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX5),
80 	FORCEWAKE_MEDIA_VDBOX6	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX6),
81 	FORCEWAKE_MEDIA_VDBOX7	= BIT(FW_DOMAIN_ID_MEDIA_VDBOX7),
82 	FORCEWAKE_MEDIA_VEBOX0	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
83 	FORCEWAKE_MEDIA_VEBOX1	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
84 	FORCEWAKE_MEDIA_VEBOX2	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX2),
85 	FORCEWAKE_MEDIA_VEBOX3	= BIT(FW_DOMAIN_ID_MEDIA_VEBOX3),
86 	FORCEWAKE_GSC		= BIT(FW_DOMAIN_ID_GSC),
87 
88 	FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
89 };
90 
91 struct intel_uncore_fw_get {
92 	void (*force_wake_get)(struct intel_uncore *uncore,
93 			       enum forcewake_domains domains);
94 };
95 
96 struct intel_uncore_funcs {
97 	enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
98 						  i915_reg_t r);
99 	enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
100 						   i915_reg_t r);
101 
102 	u8 (*mmio_readb)(struct intel_uncore *uncore,
103 			 i915_reg_t r, bool trace);
104 	u16 (*mmio_readw)(struct intel_uncore *uncore,
105 			  i915_reg_t r, bool trace);
106 	u32 (*mmio_readl)(struct intel_uncore *uncore,
107 			  i915_reg_t r, bool trace);
108 	u64 (*mmio_readq)(struct intel_uncore *uncore,
109 			  i915_reg_t r, bool trace);
110 
111 	void (*mmio_writeb)(struct intel_uncore *uncore,
112 			    i915_reg_t r, u8 val, bool trace);
113 	void (*mmio_writew)(struct intel_uncore *uncore,
114 			    i915_reg_t r, u16 val, bool trace);
115 	void (*mmio_writel)(struct intel_uncore *uncore,
116 			    i915_reg_t r, u32 val, bool trace);
117 };
118 
119 struct intel_forcewake_range {
120 	u32 start;
121 	u32 end;
122 
123 	enum forcewake_domains domains;
124 };
125 
126 /* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
127 struct i915_range {
128 	u32 start;
129 	u32 end;
130 };
131 
132 struct intel_uncore {
133 	void __iomem *regs;
134 
135 	struct drm_i915_private *i915;
136 	struct intel_gt *gt;
137 	struct intel_runtime_pm *rpm;
138 
139 	spinlock_t lock; /** lock is also taken in irq contexts. */
140 
141 	/*
142 	 * Do we need to apply an additional offset to reach the beginning
143 	 * of the basic non-engine GT registers (referred to as "GSI" on
144 	 * newer platforms, or "GT block" on older platforms)?  If so, we'll
145 	 * track that here and apply it transparently to registers in the
146 	 * appropriate range to maintain compatibility with our existing
147 	 * register definitions and GT code.
148 	 */
149 	u32 gsi_offset;
150 
151 	unsigned int flags;
152 #define UNCORE_HAS_FORCEWAKE		BIT(0)
153 #define UNCORE_HAS_FPGA_DBG_UNCLAIMED	BIT(1)
154 #define UNCORE_HAS_DBG_UNCLAIMED	BIT(2)
155 #define UNCORE_HAS_FIFO			BIT(3)
156 #define UNCORE_NEEDS_FLR_ON_FINI	BIT(4)
157 
158 	const struct intel_forcewake_range *fw_domains_table;
159 	unsigned int fw_domains_table_entries;
160 
161 	/*
162 	 * Shadowed registers are special cases where we can safely write
163 	 * to the register *without* grabbing forcewake.
164 	 */
165 	const struct i915_range *shadowed_reg_table;
166 	unsigned int shadowed_reg_table_entries;
167 
168 	struct notifier_block pmic_bus_access_nb;
169 	const struct intel_uncore_fw_get *fw_get_funcs;
170 	struct intel_uncore_funcs funcs;
171 
172 	unsigned int fifo_count;
173 
174 	enum forcewake_domains fw_domains;
175 	enum forcewake_domains fw_domains_active;
176 	enum forcewake_domains fw_domains_timer;
177 	enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
178 
179 	struct intel_uncore_forcewake_domain {
180 		struct intel_uncore *uncore;
181 		enum forcewake_domain_id id;
182 		enum forcewake_domains mask;
183 		unsigned int wake_count;
184 		bool active;
185 		struct hrtimer timer;
186 		u32 __iomem *reg_set;
187 		u32 __iomem *reg_ack;
188 	} *fw_domain[FW_DOMAIN_ID_COUNT];
189 
190 	unsigned int user_forcewake_count;
191 
192 	struct intel_uncore_mmio_debug *debug;
193 };
194 
195 /* Iterate over initialised fw domains */
196 #define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
197 	for (tmp__ = (mask__); tmp__ ;) \
198 		for_each_if(domain__ = (uncore__)->fw_domain[__mask_next_bit(tmp__)])
199 
200 #define for_each_fw_domain(domain__, uncore__, tmp__) \
201 	for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
202 
203 static inline bool
intel_uncore_has_forcewake(const struct intel_uncore * uncore)204 intel_uncore_has_forcewake(const struct intel_uncore *uncore)
205 {
206 	return uncore->flags & UNCORE_HAS_FORCEWAKE;
207 }
208 
209 static inline bool
intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore * uncore)210 intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
211 {
212 	return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
213 }
214 
215 static inline bool
intel_uncore_has_dbg_unclaimed(const struct intel_uncore * uncore)216 intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
217 {
218 	return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
219 }
220 
221 static inline bool
intel_uncore_has_fifo(const struct intel_uncore * uncore)222 intel_uncore_has_fifo(const struct intel_uncore *uncore)
223 {
224 	return uncore->flags & UNCORE_HAS_FIFO;
225 }
226 
227 static inline bool
intel_uncore_needs_flr_on_fini(const struct intel_uncore * uncore)228 intel_uncore_needs_flr_on_fini(const struct intel_uncore *uncore)
229 {
230 	return uncore->flags & UNCORE_NEEDS_FLR_ON_FINI;
231 }
232 
233 static inline bool
intel_uncore_set_flr_on_fini(struct intel_uncore * uncore)234 intel_uncore_set_flr_on_fini(struct intel_uncore *uncore)
235 {
236 	return uncore->flags |= UNCORE_NEEDS_FLR_ON_FINI;
237 }
238 
239 void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
240 void intel_uncore_init_early(struct intel_uncore *uncore,
241 			     struct intel_gt *gt);
242 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr);
243 int intel_uncore_init_mmio(struct intel_uncore *uncore);
244 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
245 					  struct intel_gt *gt);
246 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
247 bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
248 void intel_uncore_cleanup_mmio(struct intel_uncore *uncore);
249 void intel_uncore_fini_mmio(struct drm_device *dev, void *data);
250 void intel_uncore_suspend(struct intel_uncore *uncore);
251 void intel_uncore_resume_early(struct intel_uncore *uncore);
252 void intel_uncore_runtime_resume(struct intel_uncore *uncore);
253 
254 void assert_forcewakes_inactive(struct intel_uncore *uncore);
255 void assert_forcewakes_active(struct intel_uncore *uncore,
256 			      enum forcewake_domains fw_domains);
257 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
258 
259 enum forcewake_domains
260 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
261 			       i915_reg_t reg, unsigned int op);
262 #define FW_REG_READ  (1)
263 #define FW_REG_WRITE (2)
264 
265 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
266 				enum forcewake_domains domains);
267 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
268 				enum forcewake_domains domains);
269 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
270 					enum forcewake_domains domains);
271 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
272 				  enum forcewake_domains fw_domains);
273 
274 /*
275  * Like above but the caller must manage the uncore.lock itself.
276  * Must be used with intel_uncore_read_fw() and friends.
277  */
278 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
279 					enum forcewake_domains domains);
280 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
281 					enum forcewake_domains domains);
282 
283 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
284 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
285 
286 int __intel_wait_for_register(struct intel_uncore *uncore,
287 			      i915_reg_t reg,
288 			      u32 mask,
289 			      u32 value,
290 			      unsigned int fast_timeout_us,
291 			      unsigned int slow_timeout_ms,
292 			      u32 *out_value);
293 static inline int
intel_wait_for_register(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)294 intel_wait_for_register(struct intel_uncore *uncore,
295 			i915_reg_t reg,
296 			u32 mask,
297 			u32 value,
298 			unsigned int timeout_ms)
299 {
300 	return __intel_wait_for_register(uncore, reg, mask, value, 2,
301 					 timeout_ms, NULL);
302 }
303 
304 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
305 				 i915_reg_t reg,
306 				 u32 mask,
307 				 u32 value,
308 				 unsigned int fast_timeout_us,
309 				 unsigned int slow_timeout_ms,
310 				 u32 *out_value);
311 static inline int
intel_wait_for_register_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 mask,u32 value,unsigned int timeout_ms)312 intel_wait_for_register_fw(struct intel_uncore *uncore,
313 			   i915_reg_t reg,
314 			   u32 mask,
315 			   u32 value,
316 			       unsigned int timeout_ms)
317 {
318 	return __intel_wait_for_register_fw(uncore, reg, mask, value,
319 					    2, timeout_ms, NULL);
320 }
321 
322 #define IS_GSI_REG(reg) ((reg) < 0x40000)
323 
324 /* register access functions */
325 #define __raw_read(x__, s__) \
326 static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
327 					    i915_reg_t reg) \
328 { \
329 	u32 offset = i915_mmio_reg_offset(reg); \
330 	if (IS_GSI_REG(offset)) \
331 		offset += uncore->gsi_offset; \
332 	return read##s__(uncore->regs + offset); \
333 }
334 
335 #define __raw_write(x__, s__) \
336 static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
337 					   i915_reg_t reg, u##x__ val) \
338 { \
339 	u32 offset = i915_mmio_reg_offset(reg); \
340 	if (IS_GSI_REG(offset)) \
341 		offset += uncore->gsi_offset; \
342 	write##s__(val, uncore->regs + offset); \
343 }
344 __raw_read(8, b)
345 __raw_read(16, w)
346 __raw_read(32, l)
347 __raw_read(64, q)
348 
349 __raw_write(8, b)
350 __raw_write(16, w)
351 __raw_write(32, l)
352 __raw_write(64, q)
353 
354 #undef __raw_read
355 #undef __raw_write
356 
357 #define __uncore_read(name__, x__, s__, trace__) \
358 static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
359 					   i915_reg_t reg) \
360 { \
361 	return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
362 }
363 
364 #define __uncore_write(name__, x__, s__, trace__) \
365 static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
366 					 i915_reg_t reg, u##x__ val) \
367 { \
368 	uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
369 }
370 
371 __uncore_read(read8, 8, b, true)
372 __uncore_read(read16, 16, w, true)
373 __uncore_read(read, 32, l, true)
374 __uncore_read(read16_notrace, 16, w, false)
375 __uncore_read(read_notrace, 32, l, false)
376 
377 __uncore_write(write8, 8, b, true)
378 __uncore_write(write16, 16, w, true)
379 __uncore_write(write, 32, l, true)
380 __uncore_write(write_notrace, 32, l, false)
381 
382 /* Be very careful with read/write 64-bit values. On 32-bit machines, they
383  * will be implemented using 2 32-bit writes in an arbitrary order with
384  * an arbitrary delay between them. This can cause the hardware to
385  * act upon the intermediate value, possibly leading to corruption and
386  * machine death. For this reason we do not support intel_uncore_write64,
387  * or uncore->funcs.mmio_writeq.
388  *
389  * When reading a 64-bit value as two 32-bit values, the delay may cause
390  * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
391  * occasionally a 64-bit register does not actually support a full readq
392  * and must be read using two 32-bit reads.
393  *
394  * You have been warned.
395  */
396 __uncore_read(read64, 64, q, true)
397 
398 #define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
399 #define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
400 
401 #undef __uncore_read
402 #undef __uncore_write
403 
404 /* These are untraced mmio-accessors that are only valid to be used inside
405  * critical sections, such as inside IRQ handlers, where forcewake is explicitly
406  * controlled.
407  *
408  * Think twice, and think again, before using these.
409  *
410  * As an example, these accessors can possibly be used between:
411  *
412  * spin_lock_irq(&uncore->lock);
413  * intel_uncore_forcewake_get__locked();
414  *
415  * and
416  *
417  * intel_uncore_forcewake_put__locked();
418  * spin_unlock_irq(&uncore->lock);
419  *
420  *
421  * Note: some registers may not need forcewake held, so
422  * intel_uncore_forcewake_{get,put} can be omitted, see
423  * intel_uncore_forcewake_for_reg().
424  *
425  * Certain architectures will die if the same cacheline is concurrently accessed
426  * by different clients (e.g. on Ivybridge). Access to registers should
427  * therefore generally be serialised, by either the dev_priv->uncore.lock or
428  * a more localised lock guarding all access to that bank of registers.
429  */
430 #define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
431 #define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
432 #define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
433 #define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
434 
intel_uncore_rmw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)435 static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
436 				   i915_reg_t reg, u32 clear, u32 set)
437 {
438 	u32 old, val;
439 
440 	old = intel_uncore_read(uncore, reg);
441 	val = (old & ~clear) | set;
442 	intel_uncore_write(uncore, reg, val);
443 	return old;
444 }
445 
intel_uncore_rmw_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 clear,u32 set)446 static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
447 				       i915_reg_t reg, u32 clear, u32 set)
448 {
449 	u32 old, val;
450 
451 	old = intel_uncore_read_fw(uncore, reg);
452 	val = (old & ~clear) | set;
453 	if (val != old)
454 		intel_uncore_write_fw(uncore, reg, val);
455 }
456 
457 static inline u64
intel_uncore_read64_2x32(struct intel_uncore * uncore,i915_reg_t lower_reg,i915_reg_t upper_reg)458 intel_uncore_read64_2x32(struct intel_uncore *uncore,
459 			 i915_reg_t lower_reg, i915_reg_t upper_reg)
460 {
461 	u32 upper, lower, old_upper, loop = 0;
462 	enum forcewake_domains fw_domains;
463 	unsigned long flags;
464 
465 	fw_domains = intel_uncore_forcewake_for_reg(uncore, lower_reg,
466 						    FW_REG_READ);
467 
468 	fw_domains |= intel_uncore_forcewake_for_reg(uncore, upper_reg,
469 						    FW_REG_READ);
470 
471 	spin_lock_irqsave(&uncore->lock, flags);
472 	intel_uncore_forcewake_get__locked(uncore, fw_domains);
473 
474 	upper = intel_uncore_read_fw(uncore, upper_reg);
475 	do {
476 		old_upper = upper;
477 		lower = intel_uncore_read_fw(uncore, lower_reg);
478 		upper = intel_uncore_read_fw(uncore, upper_reg);
479 	} while (upper != old_upper && loop++ < 2);
480 
481 	intel_uncore_forcewake_put__locked(uncore, fw_domains);
482 	spin_unlock_irqrestore(&uncore->lock, flags);
483 
484 	return (u64)upper << 32 | lower;
485 }
486 
intel_uncore_write_and_verify(struct intel_uncore * uncore,i915_reg_t reg,u32 val,u32 mask,u32 expected_val)487 static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore,
488 						i915_reg_t reg, u32 val,
489 						u32 mask, u32 expected_val)
490 {
491 	u32 reg_val;
492 
493 	intel_uncore_write(uncore, reg, val);
494 	reg_val = intel_uncore_read(uncore, reg);
495 
496 	return (reg_val & mask) != expected_val ? -EINVAL : 0;
497 }
498 
intel_uncore_regs(struct intel_uncore * uncore)499 static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
500 {
501 	return uncore->regs;
502 }
503 
504 /*
505  * The raw_reg_{read,write} macros are intended as a micro-optimization for
506  * interrupt handlers so that the pointer indirection on uncore->regs can
507  * be computed once (and presumably cached in a register) instead of generating
508  * extra load instructions for each MMIO access.
509  *
510  * Given that these macros are only intended for non-GSI interrupt registers
511  * (and the goal is to avoid extra instructions generated by the compiler),
512  * these macros do not account for uncore->gsi_offset.  Any caller that needs
513  * to use these macros on a GSI register is responsible for adding the
514  * appropriate GSI offset to the 'base' parameter.
515  */
516 #define raw_reg_read(base, reg) \
517 	readl(base + i915_mmio_reg_offset(reg))
518 #define raw_reg_write(base, reg, value) \
519 	writel(value, base + i915_mmio_reg_offset(reg))
520 
521 #endif /* !__INTEL_UNCORE_H__ */
522