xref: /dragonfly/sys/dev/drm/radeon/si.c (revision 029e6489)
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <linux/firmware.h>
25 #include <linux/module.h>
26 #include <drm/drmP.h>
27 #include "radeon.h"
28 #include "radeon_asic.h"
29 #include "radeon_audio.h"
30 #include <drm/radeon_drm.h>
31 #include "sid.h"
32 #include "atom.h"
33 #include "si_blit_shaders.h"
34 #include "clearstate_si.h"
35 #include "radeon_ucode.h"
36 
37 
38 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
39 MODULE_FIRMWARE("radeon/TAHITI_me.bin");
40 MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
41 MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
42 MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
43 MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
44 MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
45 
46 MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
47 MODULE_FIRMWARE("radeon/tahiti_me.bin");
48 MODULE_FIRMWARE("radeon/tahiti_ce.bin");
49 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
50 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
51 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
52 
53 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
54 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
55 MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
56 MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
57 MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
58 MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
59 MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
60 
61 MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
62 MODULE_FIRMWARE("radeon/pitcairn_me.bin");
63 MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
64 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
65 MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
66 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
67 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
68 
69 MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
70 MODULE_FIRMWARE("radeon/VERDE_me.bin");
71 MODULE_FIRMWARE("radeon/VERDE_ce.bin");
72 MODULE_FIRMWARE("radeon/VERDE_mc.bin");
73 MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
74 MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
75 MODULE_FIRMWARE("radeon/VERDE_smc.bin");
76 
77 MODULE_FIRMWARE("radeon/verde_pfp.bin");
78 MODULE_FIRMWARE("radeon/verde_me.bin");
79 MODULE_FIRMWARE("radeon/verde_ce.bin");
80 MODULE_FIRMWARE("radeon/verde_mc.bin");
81 MODULE_FIRMWARE("radeon/verde_rlc.bin");
82 MODULE_FIRMWARE("radeon/verde_smc.bin");
83 MODULE_FIRMWARE("radeon/verde_k_smc.bin");
84 
85 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
86 MODULE_FIRMWARE("radeon/OLAND_me.bin");
87 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
88 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
89 MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
90 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
91 MODULE_FIRMWARE("radeon/OLAND_smc.bin");
92 
93 MODULE_FIRMWARE("radeon/oland_pfp.bin");
94 MODULE_FIRMWARE("radeon/oland_me.bin");
95 MODULE_FIRMWARE("radeon/oland_ce.bin");
96 MODULE_FIRMWARE("radeon/oland_mc.bin");
97 MODULE_FIRMWARE("radeon/oland_rlc.bin");
98 MODULE_FIRMWARE("radeon/oland_smc.bin");
99 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
100 
101 MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
102 MODULE_FIRMWARE("radeon/HAINAN_me.bin");
103 MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
104 MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
105 MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
106 MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
107 MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
108 
109 MODULE_FIRMWARE("radeon/hainan_pfp.bin");
110 MODULE_FIRMWARE("radeon/hainan_me.bin");
111 MODULE_FIRMWARE("radeon/hainan_ce.bin");
112 MODULE_FIRMWARE("radeon/hainan_mc.bin");
113 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
114 MODULE_FIRMWARE("radeon/hainan_smc.bin");
115 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
116 MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
117 
118 MODULE_FIRMWARE("radeon/si58_mc.bin");
119 
120 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
121 static void si_pcie_gen3_enable(struct radeon_device *rdev);
122 static void si_program_aspm(struct radeon_device *rdev);
123 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
124 					 bool enable);
125 static void si_init_pg(struct radeon_device *rdev);
126 static void si_init_cg(struct radeon_device *rdev);
127 static void si_fini_pg(struct radeon_device *rdev);
128 static void si_fini_cg(struct radeon_device *rdev);
129 static void si_rlc_stop(struct radeon_device *rdev);
130 
131 static const u32 verde_rlc_save_restore_register_list[] =
132 {
133 	(0x8000 << 16) | (0x98f4 >> 2),
134 	0x00000000,
135 	(0x8040 << 16) | (0x98f4 >> 2),
136 	0x00000000,
137 	(0x8000 << 16) | (0xe80 >> 2),
138 	0x00000000,
139 	(0x8040 << 16) | (0xe80 >> 2),
140 	0x00000000,
141 	(0x8000 << 16) | (0x89bc >> 2),
142 	0x00000000,
143 	(0x8040 << 16) | (0x89bc >> 2),
144 	0x00000000,
145 	(0x8000 << 16) | (0x8c1c >> 2),
146 	0x00000000,
147 	(0x8040 << 16) | (0x8c1c >> 2),
148 	0x00000000,
149 	(0x9c00 << 16) | (0x98f0 >> 2),
150 	0x00000000,
151 	(0x9c00 << 16) | (0xe7c >> 2),
152 	0x00000000,
153 	(0x8000 << 16) | (0x9148 >> 2),
154 	0x00000000,
155 	(0x8040 << 16) | (0x9148 >> 2),
156 	0x00000000,
157 	(0x9c00 << 16) | (0x9150 >> 2),
158 	0x00000000,
159 	(0x9c00 << 16) | (0x897c >> 2),
160 	0x00000000,
161 	(0x9c00 << 16) | (0x8d8c >> 2),
162 	0x00000000,
163 	(0x9c00 << 16) | (0xac54 >> 2),
164 	0X00000000,
165 	0x3,
166 	(0x9c00 << 16) | (0x98f8 >> 2),
167 	0x00000000,
168 	(0x9c00 << 16) | (0x9910 >> 2),
169 	0x00000000,
170 	(0x9c00 << 16) | (0x9914 >> 2),
171 	0x00000000,
172 	(0x9c00 << 16) | (0x9918 >> 2),
173 	0x00000000,
174 	(0x9c00 << 16) | (0x991c >> 2),
175 	0x00000000,
176 	(0x9c00 << 16) | (0x9920 >> 2),
177 	0x00000000,
178 	(0x9c00 << 16) | (0x9924 >> 2),
179 	0x00000000,
180 	(0x9c00 << 16) | (0x9928 >> 2),
181 	0x00000000,
182 	(0x9c00 << 16) | (0x992c >> 2),
183 	0x00000000,
184 	(0x9c00 << 16) | (0x9930 >> 2),
185 	0x00000000,
186 	(0x9c00 << 16) | (0x9934 >> 2),
187 	0x00000000,
188 	(0x9c00 << 16) | (0x9938 >> 2),
189 	0x00000000,
190 	(0x9c00 << 16) | (0x993c >> 2),
191 	0x00000000,
192 	(0x9c00 << 16) | (0x9940 >> 2),
193 	0x00000000,
194 	(0x9c00 << 16) | (0x9944 >> 2),
195 	0x00000000,
196 	(0x9c00 << 16) | (0x9948 >> 2),
197 	0x00000000,
198 	(0x9c00 << 16) | (0x994c >> 2),
199 	0x00000000,
200 	(0x9c00 << 16) | (0x9950 >> 2),
201 	0x00000000,
202 	(0x9c00 << 16) | (0x9954 >> 2),
203 	0x00000000,
204 	(0x9c00 << 16) | (0x9958 >> 2),
205 	0x00000000,
206 	(0x9c00 << 16) | (0x995c >> 2),
207 	0x00000000,
208 	(0x9c00 << 16) | (0x9960 >> 2),
209 	0x00000000,
210 	(0x9c00 << 16) | (0x9964 >> 2),
211 	0x00000000,
212 	(0x9c00 << 16) | (0x9968 >> 2),
213 	0x00000000,
214 	(0x9c00 << 16) | (0x996c >> 2),
215 	0x00000000,
216 	(0x9c00 << 16) | (0x9970 >> 2),
217 	0x00000000,
218 	(0x9c00 << 16) | (0x9974 >> 2),
219 	0x00000000,
220 	(0x9c00 << 16) | (0x9978 >> 2),
221 	0x00000000,
222 	(0x9c00 << 16) | (0x997c >> 2),
223 	0x00000000,
224 	(0x9c00 << 16) | (0x9980 >> 2),
225 	0x00000000,
226 	(0x9c00 << 16) | (0x9984 >> 2),
227 	0x00000000,
228 	(0x9c00 << 16) | (0x9988 >> 2),
229 	0x00000000,
230 	(0x9c00 << 16) | (0x998c >> 2),
231 	0x00000000,
232 	(0x9c00 << 16) | (0x8c00 >> 2),
233 	0x00000000,
234 	(0x9c00 << 16) | (0x8c14 >> 2),
235 	0x00000000,
236 	(0x9c00 << 16) | (0x8c04 >> 2),
237 	0x00000000,
238 	(0x9c00 << 16) | (0x8c08 >> 2),
239 	0x00000000,
240 	(0x8000 << 16) | (0x9b7c >> 2),
241 	0x00000000,
242 	(0x8040 << 16) | (0x9b7c >> 2),
243 	0x00000000,
244 	(0x8000 << 16) | (0xe84 >> 2),
245 	0x00000000,
246 	(0x8040 << 16) | (0xe84 >> 2),
247 	0x00000000,
248 	(0x8000 << 16) | (0x89c0 >> 2),
249 	0x00000000,
250 	(0x8040 << 16) | (0x89c0 >> 2),
251 	0x00000000,
252 	(0x8000 << 16) | (0x914c >> 2),
253 	0x00000000,
254 	(0x8040 << 16) | (0x914c >> 2),
255 	0x00000000,
256 	(0x8000 << 16) | (0x8c20 >> 2),
257 	0x00000000,
258 	(0x8040 << 16) | (0x8c20 >> 2),
259 	0x00000000,
260 	(0x8000 << 16) | (0x9354 >> 2),
261 	0x00000000,
262 	(0x8040 << 16) | (0x9354 >> 2),
263 	0x00000000,
264 	(0x9c00 << 16) | (0x9060 >> 2),
265 	0x00000000,
266 	(0x9c00 << 16) | (0x9364 >> 2),
267 	0x00000000,
268 	(0x9c00 << 16) | (0x9100 >> 2),
269 	0x00000000,
270 	(0x9c00 << 16) | (0x913c >> 2),
271 	0x00000000,
272 	(0x8000 << 16) | (0x90e0 >> 2),
273 	0x00000000,
274 	(0x8000 << 16) | (0x90e4 >> 2),
275 	0x00000000,
276 	(0x8000 << 16) | (0x90e8 >> 2),
277 	0x00000000,
278 	(0x8040 << 16) | (0x90e0 >> 2),
279 	0x00000000,
280 	(0x8040 << 16) | (0x90e4 >> 2),
281 	0x00000000,
282 	(0x8040 << 16) | (0x90e8 >> 2),
283 	0x00000000,
284 	(0x9c00 << 16) | (0x8bcc >> 2),
285 	0x00000000,
286 	(0x9c00 << 16) | (0x8b24 >> 2),
287 	0x00000000,
288 	(0x9c00 << 16) | (0x88c4 >> 2),
289 	0x00000000,
290 	(0x9c00 << 16) | (0x8e50 >> 2),
291 	0x00000000,
292 	(0x9c00 << 16) | (0x8c0c >> 2),
293 	0x00000000,
294 	(0x9c00 << 16) | (0x8e58 >> 2),
295 	0x00000000,
296 	(0x9c00 << 16) | (0x8e5c >> 2),
297 	0x00000000,
298 	(0x9c00 << 16) | (0x9508 >> 2),
299 	0x00000000,
300 	(0x9c00 << 16) | (0x950c >> 2),
301 	0x00000000,
302 	(0x9c00 << 16) | (0x9494 >> 2),
303 	0x00000000,
304 	(0x9c00 << 16) | (0xac0c >> 2),
305 	0x00000000,
306 	(0x9c00 << 16) | (0xac10 >> 2),
307 	0x00000000,
308 	(0x9c00 << 16) | (0xac14 >> 2),
309 	0x00000000,
310 	(0x9c00 << 16) | (0xae00 >> 2),
311 	0x00000000,
312 	(0x9c00 << 16) | (0xac08 >> 2),
313 	0x00000000,
314 	(0x9c00 << 16) | (0x88d4 >> 2),
315 	0x00000000,
316 	(0x9c00 << 16) | (0x88c8 >> 2),
317 	0x00000000,
318 	(0x9c00 << 16) | (0x88cc >> 2),
319 	0x00000000,
320 	(0x9c00 << 16) | (0x89b0 >> 2),
321 	0x00000000,
322 	(0x9c00 << 16) | (0x8b10 >> 2),
323 	0x00000000,
324 	(0x9c00 << 16) | (0x8a14 >> 2),
325 	0x00000000,
326 	(0x9c00 << 16) | (0x9830 >> 2),
327 	0x00000000,
328 	(0x9c00 << 16) | (0x9834 >> 2),
329 	0x00000000,
330 	(0x9c00 << 16) | (0x9838 >> 2),
331 	0x00000000,
332 	(0x9c00 << 16) | (0x9a10 >> 2),
333 	0x00000000,
334 	(0x8000 << 16) | (0x9870 >> 2),
335 	0x00000000,
336 	(0x8000 << 16) | (0x9874 >> 2),
337 	0x00000000,
338 	(0x8001 << 16) | (0x9870 >> 2),
339 	0x00000000,
340 	(0x8001 << 16) | (0x9874 >> 2),
341 	0x00000000,
342 	(0x8040 << 16) | (0x9870 >> 2),
343 	0x00000000,
344 	(0x8040 << 16) | (0x9874 >> 2),
345 	0x00000000,
346 	(0x8041 << 16) | (0x9870 >> 2),
347 	0x00000000,
348 	(0x8041 << 16) | (0x9874 >> 2),
349 	0x00000000,
350 	0x00000000
351 };
352 
353 static const u32 tahiti_golden_rlc_registers[] =
354 {
355 	0xc424, 0xffffffff, 0x00601005,
356 	0xc47c, 0xffffffff, 0x10104040,
357 	0xc488, 0xffffffff, 0x0100000a,
358 	0xc314, 0xffffffff, 0x00000800,
359 	0xc30c, 0xffffffff, 0x800000f4,
360 	0xf4a8, 0xffffffff, 0x00000000
361 };
362 
363 static const u32 tahiti_golden_registers[] =
364 {
365 	0x9a10, 0x00010000, 0x00018208,
366 	0x9830, 0xffffffff, 0x00000000,
367 	0x9834, 0xf00fffff, 0x00000400,
368 	0x9838, 0x0002021c, 0x00020200,
369 	0xc78, 0x00000080, 0x00000000,
370 	0xd030, 0x000300c0, 0x00800040,
371 	0xd830, 0x000300c0, 0x00800040,
372 	0x5bb0, 0x000000f0, 0x00000070,
373 	0x5bc0, 0x00200000, 0x50100000,
374 	0x7030, 0x31000311, 0x00000011,
375 	0x277c, 0x00000003, 0x000007ff,
376 	0x240c, 0x000007ff, 0x00000000,
377 	0x8a14, 0xf000001f, 0x00000007,
378 	0x8b24, 0xffffffff, 0x00ffffff,
379 	0x8b10, 0x0000ff0f, 0x00000000,
380 	0x28a4c, 0x07ffffff, 0x4e000000,
381 	0x28350, 0x3f3f3fff, 0x2a00126a,
382 	0x30, 0x000000ff, 0x0040,
383 	0x34, 0x00000040, 0x00004040,
384 	0x9100, 0x07ffffff, 0x03000000,
385 	0x8e88, 0x01ff1f3f, 0x00000000,
386 	0x8e84, 0x01ff1f3f, 0x00000000,
387 	0x9060, 0x0000007f, 0x00000020,
388 	0x9508, 0x00010000, 0x00010000,
389 	0xac14, 0x00000200, 0x000002fb,
390 	0xac10, 0xffffffff, 0x0000543b,
391 	0xac0c, 0xffffffff, 0xa9210876,
392 	0x88d0, 0xffffffff, 0x000fff40,
393 	0x88d4, 0x0000001f, 0x00000010,
394 	0x1410, 0x20000000, 0x20fffed8,
395 	0x15c0, 0x000c0fc0, 0x000c0400
396 };
397 
398 static const u32 tahiti_golden_registers2[] =
399 {
400 	0xc64, 0x00000001, 0x00000001
401 };
402 
403 static const u32 pitcairn_golden_rlc_registers[] =
404 {
405 	0xc424, 0xffffffff, 0x00601004,
406 	0xc47c, 0xffffffff, 0x10102020,
407 	0xc488, 0xffffffff, 0x01000020,
408 	0xc314, 0xffffffff, 0x00000800,
409 	0xc30c, 0xffffffff, 0x800000a4
410 };
411 
412 static const u32 pitcairn_golden_registers[] =
413 {
414 	0x9a10, 0x00010000, 0x00018208,
415 	0x9830, 0xffffffff, 0x00000000,
416 	0x9834, 0xf00fffff, 0x00000400,
417 	0x9838, 0x0002021c, 0x00020200,
418 	0xc78, 0x00000080, 0x00000000,
419 	0xd030, 0x000300c0, 0x00800040,
420 	0xd830, 0x000300c0, 0x00800040,
421 	0x5bb0, 0x000000f0, 0x00000070,
422 	0x5bc0, 0x00200000, 0x50100000,
423 	0x7030, 0x31000311, 0x00000011,
424 	0x2ae4, 0x00073ffe, 0x000022a2,
425 	0x240c, 0x000007ff, 0x00000000,
426 	0x8a14, 0xf000001f, 0x00000007,
427 	0x8b24, 0xffffffff, 0x00ffffff,
428 	0x8b10, 0x0000ff0f, 0x00000000,
429 	0x28a4c, 0x07ffffff, 0x4e000000,
430 	0x28350, 0x3f3f3fff, 0x2a00126a,
431 	0x30, 0x000000ff, 0x0040,
432 	0x34, 0x00000040, 0x00004040,
433 	0x9100, 0x07ffffff, 0x03000000,
434 	0x9060, 0x0000007f, 0x00000020,
435 	0x9508, 0x00010000, 0x00010000,
436 	0xac14, 0x000003ff, 0x000000f7,
437 	0xac10, 0xffffffff, 0x00000000,
438 	0xac0c, 0xffffffff, 0x32761054,
439 	0x88d4, 0x0000001f, 0x00000010,
440 	0x15c0, 0x000c0fc0, 0x000c0400
441 };
442 
443 static const u32 verde_golden_rlc_registers[] =
444 {
445 	0xc424, 0xffffffff, 0x033f1005,
446 	0xc47c, 0xffffffff, 0x10808020,
447 	0xc488, 0xffffffff, 0x00800008,
448 	0xc314, 0xffffffff, 0x00001000,
449 	0xc30c, 0xffffffff, 0x80010014
450 };
451 
452 static const u32 verde_golden_registers[] =
453 {
454 	0x9a10, 0x00010000, 0x00018208,
455 	0x9830, 0xffffffff, 0x00000000,
456 	0x9834, 0xf00fffff, 0x00000400,
457 	0x9838, 0x0002021c, 0x00020200,
458 	0xc78, 0x00000080, 0x00000000,
459 	0xd030, 0x000300c0, 0x00800040,
460 	0xd030, 0x000300c0, 0x00800040,
461 	0xd830, 0x000300c0, 0x00800040,
462 	0xd830, 0x000300c0, 0x00800040,
463 	0x5bb0, 0x000000f0, 0x00000070,
464 	0x5bc0, 0x00200000, 0x50100000,
465 	0x7030, 0x31000311, 0x00000011,
466 	0x2ae4, 0x00073ffe, 0x000022a2,
467 	0x2ae4, 0x00073ffe, 0x000022a2,
468 	0x2ae4, 0x00073ffe, 0x000022a2,
469 	0x240c, 0x000007ff, 0x00000000,
470 	0x240c, 0x000007ff, 0x00000000,
471 	0x240c, 0x000007ff, 0x00000000,
472 	0x8a14, 0xf000001f, 0x00000007,
473 	0x8a14, 0xf000001f, 0x00000007,
474 	0x8a14, 0xf000001f, 0x00000007,
475 	0x8b24, 0xffffffff, 0x00ffffff,
476 	0x8b10, 0x0000ff0f, 0x00000000,
477 	0x28a4c, 0x07ffffff, 0x4e000000,
478 	0x28350, 0x3f3f3fff, 0x0000124a,
479 	0x28350, 0x3f3f3fff, 0x0000124a,
480 	0x28350, 0x3f3f3fff, 0x0000124a,
481 	0x30, 0x000000ff, 0x0040,
482 	0x34, 0x00000040, 0x00004040,
483 	0x9100, 0x07ffffff, 0x03000000,
484 	0x9100, 0x07ffffff, 0x03000000,
485 	0x8e88, 0x01ff1f3f, 0x00000000,
486 	0x8e88, 0x01ff1f3f, 0x00000000,
487 	0x8e88, 0x01ff1f3f, 0x00000000,
488 	0x8e84, 0x01ff1f3f, 0x00000000,
489 	0x8e84, 0x01ff1f3f, 0x00000000,
490 	0x8e84, 0x01ff1f3f, 0x00000000,
491 	0x9060, 0x0000007f, 0x00000020,
492 	0x9508, 0x00010000, 0x00010000,
493 	0xac14, 0x000003ff, 0x00000003,
494 	0xac14, 0x000003ff, 0x00000003,
495 	0xac14, 0x000003ff, 0x00000003,
496 	0xac10, 0xffffffff, 0x00000000,
497 	0xac10, 0xffffffff, 0x00000000,
498 	0xac10, 0xffffffff, 0x00000000,
499 	0xac0c, 0xffffffff, 0x00001032,
500 	0xac0c, 0xffffffff, 0x00001032,
501 	0xac0c, 0xffffffff, 0x00001032,
502 	0x88d4, 0x0000001f, 0x00000010,
503 	0x88d4, 0x0000001f, 0x00000010,
504 	0x88d4, 0x0000001f, 0x00000010,
505 	0x15c0, 0x000c0fc0, 0x000c0400
506 };
507 
508 static const u32 oland_golden_rlc_registers[] =
509 {
510 	0xc424, 0xffffffff, 0x00601005,
511 	0xc47c, 0xffffffff, 0x10104040,
512 	0xc488, 0xffffffff, 0x0100000a,
513 	0xc314, 0xffffffff, 0x00000800,
514 	0xc30c, 0xffffffff, 0x800000f4
515 };
516 
517 static const u32 oland_golden_registers[] =
518 {
519 	0x9a10, 0x00010000, 0x00018208,
520 	0x9830, 0xffffffff, 0x00000000,
521 	0x9834, 0xf00fffff, 0x00000400,
522 	0x9838, 0x0002021c, 0x00020200,
523 	0xc78, 0x00000080, 0x00000000,
524 	0xd030, 0x000300c0, 0x00800040,
525 	0xd830, 0x000300c0, 0x00800040,
526 	0x5bb0, 0x000000f0, 0x00000070,
527 	0x5bc0, 0x00200000, 0x50100000,
528 	0x7030, 0x31000311, 0x00000011,
529 	0x2ae4, 0x00073ffe, 0x000022a2,
530 	0x240c, 0x000007ff, 0x00000000,
531 	0x8a14, 0xf000001f, 0x00000007,
532 	0x8b24, 0xffffffff, 0x00ffffff,
533 	0x8b10, 0x0000ff0f, 0x00000000,
534 	0x28a4c, 0x07ffffff, 0x4e000000,
535 	0x28350, 0x3f3f3fff, 0x00000082,
536 	0x30, 0x000000ff, 0x0040,
537 	0x34, 0x00000040, 0x00004040,
538 	0x9100, 0x07ffffff, 0x03000000,
539 	0x9060, 0x0000007f, 0x00000020,
540 	0x9508, 0x00010000, 0x00010000,
541 	0xac14, 0x000003ff, 0x000000f3,
542 	0xac10, 0xffffffff, 0x00000000,
543 	0xac0c, 0xffffffff, 0x00003210,
544 	0x88d4, 0x0000001f, 0x00000010,
545 	0x15c0, 0x000c0fc0, 0x000c0400
546 };
547 
548 static const u32 hainan_golden_registers[] =
549 {
550 	0x9a10, 0x00010000, 0x00018208,
551 	0x9830, 0xffffffff, 0x00000000,
552 	0x9834, 0xf00fffff, 0x00000400,
553 	0x9838, 0x0002021c, 0x00020200,
554 	0xd0c0, 0xff000fff, 0x00000100,
555 	0xd030, 0x000300c0, 0x00800040,
556 	0xd8c0, 0xff000fff, 0x00000100,
557 	0xd830, 0x000300c0, 0x00800040,
558 	0x2ae4, 0x00073ffe, 0x000022a2,
559 	0x240c, 0x000007ff, 0x00000000,
560 	0x8a14, 0xf000001f, 0x00000007,
561 	0x8b24, 0xffffffff, 0x00ffffff,
562 	0x8b10, 0x0000ff0f, 0x00000000,
563 	0x28a4c, 0x07ffffff, 0x4e000000,
564 	0x28350, 0x3f3f3fff, 0x00000000,
565 	0x30, 0x000000ff, 0x0040,
566 	0x34, 0x00000040, 0x00004040,
567 	0x9100, 0x03e00000, 0x03600000,
568 	0x9060, 0x0000007f, 0x00000020,
569 	0x9508, 0x00010000, 0x00010000,
570 	0xac14, 0x000003ff, 0x000000f1,
571 	0xac10, 0xffffffff, 0x00000000,
572 	0xac0c, 0xffffffff, 0x00003210,
573 	0x88d4, 0x0000001f, 0x00000010,
574 	0x15c0, 0x000c0fc0, 0x000c0400
575 };
576 
577 static const u32 hainan_golden_registers2[] =
578 {
579 	0x98f8, 0xffffffff, 0x02010001
580 };
581 
582 static const u32 tahiti_mgcg_cgcg_init[] =
583 {
584 	0xc400, 0xffffffff, 0xfffffffc,
585 	0x802c, 0xffffffff, 0xe0000000,
586 	0x9a60, 0xffffffff, 0x00000100,
587 	0x92a4, 0xffffffff, 0x00000100,
588 	0xc164, 0xffffffff, 0x00000100,
589 	0x9774, 0xffffffff, 0x00000100,
590 	0x8984, 0xffffffff, 0x06000100,
591 	0x8a18, 0xffffffff, 0x00000100,
592 	0x92a0, 0xffffffff, 0x00000100,
593 	0xc380, 0xffffffff, 0x00000100,
594 	0x8b28, 0xffffffff, 0x00000100,
595 	0x9144, 0xffffffff, 0x00000100,
596 	0x8d88, 0xffffffff, 0x00000100,
597 	0x8d8c, 0xffffffff, 0x00000100,
598 	0x9030, 0xffffffff, 0x00000100,
599 	0x9034, 0xffffffff, 0x00000100,
600 	0x9038, 0xffffffff, 0x00000100,
601 	0x903c, 0xffffffff, 0x00000100,
602 	0xad80, 0xffffffff, 0x00000100,
603 	0xac54, 0xffffffff, 0x00000100,
604 	0x897c, 0xffffffff, 0x06000100,
605 	0x9868, 0xffffffff, 0x00000100,
606 	0x9510, 0xffffffff, 0x00000100,
607 	0xaf04, 0xffffffff, 0x00000100,
608 	0xae04, 0xffffffff, 0x00000100,
609 	0x949c, 0xffffffff, 0x00000100,
610 	0x802c, 0xffffffff, 0xe0000000,
611 	0x9160, 0xffffffff, 0x00010000,
612 	0x9164, 0xffffffff, 0x00030002,
613 	0x9168, 0xffffffff, 0x00040007,
614 	0x916c, 0xffffffff, 0x00060005,
615 	0x9170, 0xffffffff, 0x00090008,
616 	0x9174, 0xffffffff, 0x00020001,
617 	0x9178, 0xffffffff, 0x00040003,
618 	0x917c, 0xffffffff, 0x00000007,
619 	0x9180, 0xffffffff, 0x00060005,
620 	0x9184, 0xffffffff, 0x00090008,
621 	0x9188, 0xffffffff, 0x00030002,
622 	0x918c, 0xffffffff, 0x00050004,
623 	0x9190, 0xffffffff, 0x00000008,
624 	0x9194, 0xffffffff, 0x00070006,
625 	0x9198, 0xffffffff, 0x000a0009,
626 	0x919c, 0xffffffff, 0x00040003,
627 	0x91a0, 0xffffffff, 0x00060005,
628 	0x91a4, 0xffffffff, 0x00000009,
629 	0x91a8, 0xffffffff, 0x00080007,
630 	0x91ac, 0xffffffff, 0x000b000a,
631 	0x91b0, 0xffffffff, 0x00050004,
632 	0x91b4, 0xffffffff, 0x00070006,
633 	0x91b8, 0xffffffff, 0x0008000b,
634 	0x91bc, 0xffffffff, 0x000a0009,
635 	0x91c0, 0xffffffff, 0x000d000c,
636 	0x91c4, 0xffffffff, 0x00060005,
637 	0x91c8, 0xffffffff, 0x00080007,
638 	0x91cc, 0xffffffff, 0x0000000b,
639 	0x91d0, 0xffffffff, 0x000a0009,
640 	0x91d4, 0xffffffff, 0x000d000c,
641 	0x91d8, 0xffffffff, 0x00070006,
642 	0x91dc, 0xffffffff, 0x00090008,
643 	0x91e0, 0xffffffff, 0x0000000c,
644 	0x91e4, 0xffffffff, 0x000b000a,
645 	0x91e8, 0xffffffff, 0x000e000d,
646 	0x91ec, 0xffffffff, 0x00080007,
647 	0x91f0, 0xffffffff, 0x000a0009,
648 	0x91f4, 0xffffffff, 0x0000000d,
649 	0x91f8, 0xffffffff, 0x000c000b,
650 	0x91fc, 0xffffffff, 0x000f000e,
651 	0x9200, 0xffffffff, 0x00090008,
652 	0x9204, 0xffffffff, 0x000b000a,
653 	0x9208, 0xffffffff, 0x000c000f,
654 	0x920c, 0xffffffff, 0x000e000d,
655 	0x9210, 0xffffffff, 0x00110010,
656 	0x9214, 0xffffffff, 0x000a0009,
657 	0x9218, 0xffffffff, 0x000c000b,
658 	0x921c, 0xffffffff, 0x0000000f,
659 	0x9220, 0xffffffff, 0x000e000d,
660 	0x9224, 0xffffffff, 0x00110010,
661 	0x9228, 0xffffffff, 0x000b000a,
662 	0x922c, 0xffffffff, 0x000d000c,
663 	0x9230, 0xffffffff, 0x00000010,
664 	0x9234, 0xffffffff, 0x000f000e,
665 	0x9238, 0xffffffff, 0x00120011,
666 	0x923c, 0xffffffff, 0x000c000b,
667 	0x9240, 0xffffffff, 0x000e000d,
668 	0x9244, 0xffffffff, 0x00000011,
669 	0x9248, 0xffffffff, 0x0010000f,
670 	0x924c, 0xffffffff, 0x00130012,
671 	0x9250, 0xffffffff, 0x000d000c,
672 	0x9254, 0xffffffff, 0x000f000e,
673 	0x9258, 0xffffffff, 0x00100013,
674 	0x925c, 0xffffffff, 0x00120011,
675 	0x9260, 0xffffffff, 0x00150014,
676 	0x9264, 0xffffffff, 0x000e000d,
677 	0x9268, 0xffffffff, 0x0010000f,
678 	0x926c, 0xffffffff, 0x00000013,
679 	0x9270, 0xffffffff, 0x00120011,
680 	0x9274, 0xffffffff, 0x00150014,
681 	0x9278, 0xffffffff, 0x000f000e,
682 	0x927c, 0xffffffff, 0x00110010,
683 	0x9280, 0xffffffff, 0x00000014,
684 	0x9284, 0xffffffff, 0x00130012,
685 	0x9288, 0xffffffff, 0x00160015,
686 	0x928c, 0xffffffff, 0x0010000f,
687 	0x9290, 0xffffffff, 0x00120011,
688 	0x9294, 0xffffffff, 0x00000015,
689 	0x9298, 0xffffffff, 0x00140013,
690 	0x929c, 0xffffffff, 0x00170016,
691 	0x9150, 0xffffffff, 0x96940200,
692 	0x8708, 0xffffffff, 0x00900100,
693 	0xc478, 0xffffffff, 0x00000080,
694 	0xc404, 0xffffffff, 0x0020003f,
695 	0x30, 0xffffffff, 0x0000001c,
696 	0x34, 0x000f0000, 0x000f0000,
697 	0x160c, 0xffffffff, 0x00000100,
698 	0x1024, 0xffffffff, 0x00000100,
699 	0x102c, 0x00000101, 0x00000000,
700 	0x20a8, 0xffffffff, 0x00000104,
701 	0x264c, 0x000c0000, 0x000c0000,
702 	0x2648, 0x000c0000, 0x000c0000,
703 	0x55e4, 0xff000fff, 0x00000100,
704 	0x55e8, 0x00000001, 0x00000001,
705 	0x2f50, 0x00000001, 0x00000001,
706 	0x30cc, 0xc0000fff, 0x00000104,
707 	0xc1e4, 0x00000001, 0x00000001,
708 	0xd0c0, 0xfffffff0, 0x00000100,
709 	0xd8c0, 0xfffffff0, 0x00000100
710 };
711 
712 static const u32 pitcairn_mgcg_cgcg_init[] =
713 {
714 	0xc400, 0xffffffff, 0xfffffffc,
715 	0x802c, 0xffffffff, 0xe0000000,
716 	0x9a60, 0xffffffff, 0x00000100,
717 	0x92a4, 0xffffffff, 0x00000100,
718 	0xc164, 0xffffffff, 0x00000100,
719 	0x9774, 0xffffffff, 0x00000100,
720 	0x8984, 0xffffffff, 0x06000100,
721 	0x8a18, 0xffffffff, 0x00000100,
722 	0x92a0, 0xffffffff, 0x00000100,
723 	0xc380, 0xffffffff, 0x00000100,
724 	0x8b28, 0xffffffff, 0x00000100,
725 	0x9144, 0xffffffff, 0x00000100,
726 	0x8d88, 0xffffffff, 0x00000100,
727 	0x8d8c, 0xffffffff, 0x00000100,
728 	0x9030, 0xffffffff, 0x00000100,
729 	0x9034, 0xffffffff, 0x00000100,
730 	0x9038, 0xffffffff, 0x00000100,
731 	0x903c, 0xffffffff, 0x00000100,
732 	0xad80, 0xffffffff, 0x00000100,
733 	0xac54, 0xffffffff, 0x00000100,
734 	0x897c, 0xffffffff, 0x06000100,
735 	0x9868, 0xffffffff, 0x00000100,
736 	0x9510, 0xffffffff, 0x00000100,
737 	0xaf04, 0xffffffff, 0x00000100,
738 	0xae04, 0xffffffff, 0x00000100,
739 	0x949c, 0xffffffff, 0x00000100,
740 	0x802c, 0xffffffff, 0xe0000000,
741 	0x9160, 0xffffffff, 0x00010000,
742 	0x9164, 0xffffffff, 0x00030002,
743 	0x9168, 0xffffffff, 0x00040007,
744 	0x916c, 0xffffffff, 0x00060005,
745 	0x9170, 0xffffffff, 0x00090008,
746 	0x9174, 0xffffffff, 0x00020001,
747 	0x9178, 0xffffffff, 0x00040003,
748 	0x917c, 0xffffffff, 0x00000007,
749 	0x9180, 0xffffffff, 0x00060005,
750 	0x9184, 0xffffffff, 0x00090008,
751 	0x9188, 0xffffffff, 0x00030002,
752 	0x918c, 0xffffffff, 0x00050004,
753 	0x9190, 0xffffffff, 0x00000008,
754 	0x9194, 0xffffffff, 0x00070006,
755 	0x9198, 0xffffffff, 0x000a0009,
756 	0x919c, 0xffffffff, 0x00040003,
757 	0x91a0, 0xffffffff, 0x00060005,
758 	0x91a4, 0xffffffff, 0x00000009,
759 	0x91a8, 0xffffffff, 0x00080007,
760 	0x91ac, 0xffffffff, 0x000b000a,
761 	0x91b0, 0xffffffff, 0x00050004,
762 	0x91b4, 0xffffffff, 0x00070006,
763 	0x91b8, 0xffffffff, 0x0008000b,
764 	0x91bc, 0xffffffff, 0x000a0009,
765 	0x91c0, 0xffffffff, 0x000d000c,
766 	0x9200, 0xffffffff, 0x00090008,
767 	0x9204, 0xffffffff, 0x000b000a,
768 	0x9208, 0xffffffff, 0x000c000f,
769 	0x920c, 0xffffffff, 0x000e000d,
770 	0x9210, 0xffffffff, 0x00110010,
771 	0x9214, 0xffffffff, 0x000a0009,
772 	0x9218, 0xffffffff, 0x000c000b,
773 	0x921c, 0xffffffff, 0x0000000f,
774 	0x9220, 0xffffffff, 0x000e000d,
775 	0x9224, 0xffffffff, 0x00110010,
776 	0x9228, 0xffffffff, 0x000b000a,
777 	0x922c, 0xffffffff, 0x000d000c,
778 	0x9230, 0xffffffff, 0x00000010,
779 	0x9234, 0xffffffff, 0x000f000e,
780 	0x9238, 0xffffffff, 0x00120011,
781 	0x923c, 0xffffffff, 0x000c000b,
782 	0x9240, 0xffffffff, 0x000e000d,
783 	0x9244, 0xffffffff, 0x00000011,
784 	0x9248, 0xffffffff, 0x0010000f,
785 	0x924c, 0xffffffff, 0x00130012,
786 	0x9250, 0xffffffff, 0x000d000c,
787 	0x9254, 0xffffffff, 0x000f000e,
788 	0x9258, 0xffffffff, 0x00100013,
789 	0x925c, 0xffffffff, 0x00120011,
790 	0x9260, 0xffffffff, 0x00150014,
791 	0x9150, 0xffffffff, 0x96940200,
792 	0x8708, 0xffffffff, 0x00900100,
793 	0xc478, 0xffffffff, 0x00000080,
794 	0xc404, 0xffffffff, 0x0020003f,
795 	0x30, 0xffffffff, 0x0000001c,
796 	0x34, 0x000f0000, 0x000f0000,
797 	0x160c, 0xffffffff, 0x00000100,
798 	0x1024, 0xffffffff, 0x00000100,
799 	0x102c, 0x00000101, 0x00000000,
800 	0x20a8, 0xffffffff, 0x00000104,
801 	0x55e4, 0xff000fff, 0x00000100,
802 	0x55e8, 0x00000001, 0x00000001,
803 	0x2f50, 0x00000001, 0x00000001,
804 	0x30cc, 0xc0000fff, 0x00000104,
805 	0xc1e4, 0x00000001, 0x00000001,
806 	0xd0c0, 0xfffffff0, 0x00000100,
807 	0xd8c0, 0xfffffff0, 0x00000100
808 };
809 
810 static const u32 verde_mgcg_cgcg_init[] =
811 {
812 	0xc400, 0xffffffff, 0xfffffffc,
813 	0x802c, 0xffffffff, 0xe0000000,
814 	0x9a60, 0xffffffff, 0x00000100,
815 	0x92a4, 0xffffffff, 0x00000100,
816 	0xc164, 0xffffffff, 0x00000100,
817 	0x9774, 0xffffffff, 0x00000100,
818 	0x8984, 0xffffffff, 0x06000100,
819 	0x8a18, 0xffffffff, 0x00000100,
820 	0x92a0, 0xffffffff, 0x00000100,
821 	0xc380, 0xffffffff, 0x00000100,
822 	0x8b28, 0xffffffff, 0x00000100,
823 	0x9144, 0xffffffff, 0x00000100,
824 	0x8d88, 0xffffffff, 0x00000100,
825 	0x8d8c, 0xffffffff, 0x00000100,
826 	0x9030, 0xffffffff, 0x00000100,
827 	0x9034, 0xffffffff, 0x00000100,
828 	0x9038, 0xffffffff, 0x00000100,
829 	0x903c, 0xffffffff, 0x00000100,
830 	0xad80, 0xffffffff, 0x00000100,
831 	0xac54, 0xffffffff, 0x00000100,
832 	0x897c, 0xffffffff, 0x06000100,
833 	0x9868, 0xffffffff, 0x00000100,
834 	0x9510, 0xffffffff, 0x00000100,
835 	0xaf04, 0xffffffff, 0x00000100,
836 	0xae04, 0xffffffff, 0x00000100,
837 	0x949c, 0xffffffff, 0x00000100,
838 	0x802c, 0xffffffff, 0xe0000000,
839 	0x9160, 0xffffffff, 0x00010000,
840 	0x9164, 0xffffffff, 0x00030002,
841 	0x9168, 0xffffffff, 0x00040007,
842 	0x916c, 0xffffffff, 0x00060005,
843 	0x9170, 0xffffffff, 0x00090008,
844 	0x9174, 0xffffffff, 0x00020001,
845 	0x9178, 0xffffffff, 0x00040003,
846 	0x917c, 0xffffffff, 0x00000007,
847 	0x9180, 0xffffffff, 0x00060005,
848 	0x9184, 0xffffffff, 0x00090008,
849 	0x9188, 0xffffffff, 0x00030002,
850 	0x918c, 0xffffffff, 0x00050004,
851 	0x9190, 0xffffffff, 0x00000008,
852 	0x9194, 0xffffffff, 0x00070006,
853 	0x9198, 0xffffffff, 0x000a0009,
854 	0x919c, 0xffffffff, 0x00040003,
855 	0x91a0, 0xffffffff, 0x00060005,
856 	0x91a4, 0xffffffff, 0x00000009,
857 	0x91a8, 0xffffffff, 0x00080007,
858 	0x91ac, 0xffffffff, 0x000b000a,
859 	0x91b0, 0xffffffff, 0x00050004,
860 	0x91b4, 0xffffffff, 0x00070006,
861 	0x91b8, 0xffffffff, 0x0008000b,
862 	0x91bc, 0xffffffff, 0x000a0009,
863 	0x91c0, 0xffffffff, 0x000d000c,
864 	0x9200, 0xffffffff, 0x00090008,
865 	0x9204, 0xffffffff, 0x000b000a,
866 	0x9208, 0xffffffff, 0x000c000f,
867 	0x920c, 0xffffffff, 0x000e000d,
868 	0x9210, 0xffffffff, 0x00110010,
869 	0x9214, 0xffffffff, 0x000a0009,
870 	0x9218, 0xffffffff, 0x000c000b,
871 	0x921c, 0xffffffff, 0x0000000f,
872 	0x9220, 0xffffffff, 0x000e000d,
873 	0x9224, 0xffffffff, 0x00110010,
874 	0x9228, 0xffffffff, 0x000b000a,
875 	0x922c, 0xffffffff, 0x000d000c,
876 	0x9230, 0xffffffff, 0x00000010,
877 	0x9234, 0xffffffff, 0x000f000e,
878 	0x9238, 0xffffffff, 0x00120011,
879 	0x923c, 0xffffffff, 0x000c000b,
880 	0x9240, 0xffffffff, 0x000e000d,
881 	0x9244, 0xffffffff, 0x00000011,
882 	0x9248, 0xffffffff, 0x0010000f,
883 	0x924c, 0xffffffff, 0x00130012,
884 	0x9250, 0xffffffff, 0x000d000c,
885 	0x9254, 0xffffffff, 0x000f000e,
886 	0x9258, 0xffffffff, 0x00100013,
887 	0x925c, 0xffffffff, 0x00120011,
888 	0x9260, 0xffffffff, 0x00150014,
889 	0x9150, 0xffffffff, 0x96940200,
890 	0x8708, 0xffffffff, 0x00900100,
891 	0xc478, 0xffffffff, 0x00000080,
892 	0xc404, 0xffffffff, 0x0020003f,
893 	0x30, 0xffffffff, 0x0000001c,
894 	0x34, 0x000f0000, 0x000f0000,
895 	0x160c, 0xffffffff, 0x00000100,
896 	0x1024, 0xffffffff, 0x00000100,
897 	0x102c, 0x00000101, 0x00000000,
898 	0x20a8, 0xffffffff, 0x00000104,
899 	0x264c, 0x000c0000, 0x000c0000,
900 	0x2648, 0x000c0000, 0x000c0000,
901 	0x55e4, 0xff000fff, 0x00000100,
902 	0x55e8, 0x00000001, 0x00000001,
903 	0x2f50, 0x00000001, 0x00000001,
904 	0x30cc, 0xc0000fff, 0x00000104,
905 	0xc1e4, 0x00000001, 0x00000001,
906 	0xd0c0, 0xfffffff0, 0x00000100,
907 	0xd8c0, 0xfffffff0, 0x00000100
908 };
909 
910 static const u32 oland_mgcg_cgcg_init[] =
911 {
912 	0xc400, 0xffffffff, 0xfffffffc,
913 	0x802c, 0xffffffff, 0xe0000000,
914 	0x9a60, 0xffffffff, 0x00000100,
915 	0x92a4, 0xffffffff, 0x00000100,
916 	0xc164, 0xffffffff, 0x00000100,
917 	0x9774, 0xffffffff, 0x00000100,
918 	0x8984, 0xffffffff, 0x06000100,
919 	0x8a18, 0xffffffff, 0x00000100,
920 	0x92a0, 0xffffffff, 0x00000100,
921 	0xc380, 0xffffffff, 0x00000100,
922 	0x8b28, 0xffffffff, 0x00000100,
923 	0x9144, 0xffffffff, 0x00000100,
924 	0x8d88, 0xffffffff, 0x00000100,
925 	0x8d8c, 0xffffffff, 0x00000100,
926 	0x9030, 0xffffffff, 0x00000100,
927 	0x9034, 0xffffffff, 0x00000100,
928 	0x9038, 0xffffffff, 0x00000100,
929 	0x903c, 0xffffffff, 0x00000100,
930 	0xad80, 0xffffffff, 0x00000100,
931 	0xac54, 0xffffffff, 0x00000100,
932 	0x897c, 0xffffffff, 0x06000100,
933 	0x9868, 0xffffffff, 0x00000100,
934 	0x9510, 0xffffffff, 0x00000100,
935 	0xaf04, 0xffffffff, 0x00000100,
936 	0xae04, 0xffffffff, 0x00000100,
937 	0x949c, 0xffffffff, 0x00000100,
938 	0x802c, 0xffffffff, 0xe0000000,
939 	0x9160, 0xffffffff, 0x00010000,
940 	0x9164, 0xffffffff, 0x00030002,
941 	0x9168, 0xffffffff, 0x00040007,
942 	0x916c, 0xffffffff, 0x00060005,
943 	0x9170, 0xffffffff, 0x00090008,
944 	0x9174, 0xffffffff, 0x00020001,
945 	0x9178, 0xffffffff, 0x00040003,
946 	0x917c, 0xffffffff, 0x00000007,
947 	0x9180, 0xffffffff, 0x00060005,
948 	0x9184, 0xffffffff, 0x00090008,
949 	0x9188, 0xffffffff, 0x00030002,
950 	0x918c, 0xffffffff, 0x00050004,
951 	0x9190, 0xffffffff, 0x00000008,
952 	0x9194, 0xffffffff, 0x00070006,
953 	0x9198, 0xffffffff, 0x000a0009,
954 	0x919c, 0xffffffff, 0x00040003,
955 	0x91a0, 0xffffffff, 0x00060005,
956 	0x91a4, 0xffffffff, 0x00000009,
957 	0x91a8, 0xffffffff, 0x00080007,
958 	0x91ac, 0xffffffff, 0x000b000a,
959 	0x91b0, 0xffffffff, 0x00050004,
960 	0x91b4, 0xffffffff, 0x00070006,
961 	0x91b8, 0xffffffff, 0x0008000b,
962 	0x91bc, 0xffffffff, 0x000a0009,
963 	0x91c0, 0xffffffff, 0x000d000c,
964 	0x91c4, 0xffffffff, 0x00060005,
965 	0x91c8, 0xffffffff, 0x00080007,
966 	0x91cc, 0xffffffff, 0x0000000b,
967 	0x91d0, 0xffffffff, 0x000a0009,
968 	0x91d4, 0xffffffff, 0x000d000c,
969 	0x9150, 0xffffffff, 0x96940200,
970 	0x8708, 0xffffffff, 0x00900100,
971 	0xc478, 0xffffffff, 0x00000080,
972 	0xc404, 0xffffffff, 0x0020003f,
973 	0x30, 0xffffffff, 0x0000001c,
974 	0x34, 0x000f0000, 0x000f0000,
975 	0x160c, 0xffffffff, 0x00000100,
976 	0x1024, 0xffffffff, 0x00000100,
977 	0x102c, 0x00000101, 0x00000000,
978 	0x20a8, 0xffffffff, 0x00000104,
979 	0x264c, 0x000c0000, 0x000c0000,
980 	0x2648, 0x000c0000, 0x000c0000,
981 	0x55e4, 0xff000fff, 0x00000100,
982 	0x55e8, 0x00000001, 0x00000001,
983 	0x2f50, 0x00000001, 0x00000001,
984 	0x30cc, 0xc0000fff, 0x00000104,
985 	0xc1e4, 0x00000001, 0x00000001,
986 	0xd0c0, 0xfffffff0, 0x00000100,
987 	0xd8c0, 0xfffffff0, 0x00000100
988 };
989 
990 static const u32 hainan_mgcg_cgcg_init[] =
991 {
992 	0xc400, 0xffffffff, 0xfffffffc,
993 	0x802c, 0xffffffff, 0xe0000000,
994 	0x9a60, 0xffffffff, 0x00000100,
995 	0x92a4, 0xffffffff, 0x00000100,
996 	0xc164, 0xffffffff, 0x00000100,
997 	0x9774, 0xffffffff, 0x00000100,
998 	0x8984, 0xffffffff, 0x06000100,
999 	0x8a18, 0xffffffff, 0x00000100,
1000 	0x92a0, 0xffffffff, 0x00000100,
1001 	0xc380, 0xffffffff, 0x00000100,
1002 	0x8b28, 0xffffffff, 0x00000100,
1003 	0x9144, 0xffffffff, 0x00000100,
1004 	0x8d88, 0xffffffff, 0x00000100,
1005 	0x8d8c, 0xffffffff, 0x00000100,
1006 	0x9030, 0xffffffff, 0x00000100,
1007 	0x9034, 0xffffffff, 0x00000100,
1008 	0x9038, 0xffffffff, 0x00000100,
1009 	0x903c, 0xffffffff, 0x00000100,
1010 	0xad80, 0xffffffff, 0x00000100,
1011 	0xac54, 0xffffffff, 0x00000100,
1012 	0x897c, 0xffffffff, 0x06000100,
1013 	0x9868, 0xffffffff, 0x00000100,
1014 	0x9510, 0xffffffff, 0x00000100,
1015 	0xaf04, 0xffffffff, 0x00000100,
1016 	0xae04, 0xffffffff, 0x00000100,
1017 	0x949c, 0xffffffff, 0x00000100,
1018 	0x802c, 0xffffffff, 0xe0000000,
1019 	0x9160, 0xffffffff, 0x00010000,
1020 	0x9164, 0xffffffff, 0x00030002,
1021 	0x9168, 0xffffffff, 0x00040007,
1022 	0x916c, 0xffffffff, 0x00060005,
1023 	0x9170, 0xffffffff, 0x00090008,
1024 	0x9174, 0xffffffff, 0x00020001,
1025 	0x9178, 0xffffffff, 0x00040003,
1026 	0x917c, 0xffffffff, 0x00000007,
1027 	0x9180, 0xffffffff, 0x00060005,
1028 	0x9184, 0xffffffff, 0x00090008,
1029 	0x9188, 0xffffffff, 0x00030002,
1030 	0x918c, 0xffffffff, 0x00050004,
1031 	0x9190, 0xffffffff, 0x00000008,
1032 	0x9194, 0xffffffff, 0x00070006,
1033 	0x9198, 0xffffffff, 0x000a0009,
1034 	0x919c, 0xffffffff, 0x00040003,
1035 	0x91a0, 0xffffffff, 0x00060005,
1036 	0x91a4, 0xffffffff, 0x00000009,
1037 	0x91a8, 0xffffffff, 0x00080007,
1038 	0x91ac, 0xffffffff, 0x000b000a,
1039 	0x91b0, 0xffffffff, 0x00050004,
1040 	0x91b4, 0xffffffff, 0x00070006,
1041 	0x91b8, 0xffffffff, 0x0008000b,
1042 	0x91bc, 0xffffffff, 0x000a0009,
1043 	0x91c0, 0xffffffff, 0x000d000c,
1044 	0x91c4, 0xffffffff, 0x00060005,
1045 	0x91c8, 0xffffffff, 0x00080007,
1046 	0x91cc, 0xffffffff, 0x0000000b,
1047 	0x91d0, 0xffffffff, 0x000a0009,
1048 	0x91d4, 0xffffffff, 0x000d000c,
1049 	0x9150, 0xffffffff, 0x96940200,
1050 	0x8708, 0xffffffff, 0x00900100,
1051 	0xc478, 0xffffffff, 0x00000080,
1052 	0xc404, 0xffffffff, 0x0020003f,
1053 	0x30, 0xffffffff, 0x0000001c,
1054 	0x34, 0x000f0000, 0x000f0000,
1055 	0x160c, 0xffffffff, 0x00000100,
1056 	0x1024, 0xffffffff, 0x00000100,
1057 	0x20a8, 0xffffffff, 0x00000104,
1058 	0x264c, 0x000c0000, 0x000c0000,
1059 	0x2648, 0x000c0000, 0x000c0000,
1060 	0x2f50, 0x00000001, 0x00000001,
1061 	0x30cc, 0xc0000fff, 0x00000104,
1062 	0xc1e4, 0x00000001, 0x00000001,
1063 	0xd0c0, 0xfffffff0, 0x00000100,
1064 	0xd8c0, 0xfffffff0, 0x00000100
1065 };
1066 
1067 static u32 verde_pg_init[] =
1068 {
1069 	0x353c, 0xffffffff, 0x40000,
1070 	0x3538, 0xffffffff, 0x200010ff,
1071 	0x353c, 0xffffffff, 0x0,
1072 	0x353c, 0xffffffff, 0x0,
1073 	0x353c, 0xffffffff, 0x0,
1074 	0x353c, 0xffffffff, 0x0,
1075 	0x353c, 0xffffffff, 0x0,
1076 	0x353c, 0xffffffff, 0x7007,
1077 	0x3538, 0xffffffff, 0x300010ff,
1078 	0x353c, 0xffffffff, 0x0,
1079 	0x353c, 0xffffffff, 0x0,
1080 	0x353c, 0xffffffff, 0x0,
1081 	0x353c, 0xffffffff, 0x0,
1082 	0x353c, 0xffffffff, 0x0,
1083 	0x353c, 0xffffffff, 0x400000,
1084 	0x3538, 0xffffffff, 0x100010ff,
1085 	0x353c, 0xffffffff, 0x0,
1086 	0x353c, 0xffffffff, 0x0,
1087 	0x353c, 0xffffffff, 0x0,
1088 	0x353c, 0xffffffff, 0x0,
1089 	0x353c, 0xffffffff, 0x0,
1090 	0x353c, 0xffffffff, 0x120200,
1091 	0x3538, 0xffffffff, 0x500010ff,
1092 	0x353c, 0xffffffff, 0x0,
1093 	0x353c, 0xffffffff, 0x0,
1094 	0x353c, 0xffffffff, 0x0,
1095 	0x353c, 0xffffffff, 0x0,
1096 	0x353c, 0xffffffff, 0x0,
1097 	0x353c, 0xffffffff, 0x1e1e16,
1098 	0x3538, 0xffffffff, 0x600010ff,
1099 	0x353c, 0xffffffff, 0x0,
1100 	0x353c, 0xffffffff, 0x0,
1101 	0x353c, 0xffffffff, 0x0,
1102 	0x353c, 0xffffffff, 0x0,
1103 	0x353c, 0xffffffff, 0x0,
1104 	0x353c, 0xffffffff, 0x171f1e,
1105 	0x3538, 0xffffffff, 0x700010ff,
1106 	0x353c, 0xffffffff, 0x0,
1107 	0x353c, 0xffffffff, 0x0,
1108 	0x353c, 0xffffffff, 0x0,
1109 	0x353c, 0xffffffff, 0x0,
1110 	0x353c, 0xffffffff, 0x0,
1111 	0x353c, 0xffffffff, 0x0,
1112 	0x3538, 0xffffffff, 0x9ff,
1113 	0x3500, 0xffffffff, 0x0,
1114 	0x3504, 0xffffffff, 0x10000800,
1115 	0x3504, 0xffffffff, 0xf,
1116 	0x3504, 0xffffffff, 0xf,
1117 	0x3500, 0xffffffff, 0x4,
1118 	0x3504, 0xffffffff, 0x1000051e,
1119 	0x3504, 0xffffffff, 0xffff,
1120 	0x3504, 0xffffffff, 0xffff,
1121 	0x3500, 0xffffffff, 0x8,
1122 	0x3504, 0xffffffff, 0x80500,
1123 	0x3500, 0xffffffff, 0x12,
1124 	0x3504, 0xffffffff, 0x9050c,
1125 	0x3500, 0xffffffff, 0x1d,
1126 	0x3504, 0xffffffff, 0xb052c,
1127 	0x3500, 0xffffffff, 0x2a,
1128 	0x3504, 0xffffffff, 0x1053e,
1129 	0x3500, 0xffffffff, 0x2d,
1130 	0x3504, 0xffffffff, 0x10546,
1131 	0x3500, 0xffffffff, 0x30,
1132 	0x3504, 0xffffffff, 0xa054e,
1133 	0x3500, 0xffffffff, 0x3c,
1134 	0x3504, 0xffffffff, 0x1055f,
1135 	0x3500, 0xffffffff, 0x3f,
1136 	0x3504, 0xffffffff, 0x10567,
1137 	0x3500, 0xffffffff, 0x42,
1138 	0x3504, 0xffffffff, 0x1056f,
1139 	0x3500, 0xffffffff, 0x45,
1140 	0x3504, 0xffffffff, 0x10572,
1141 	0x3500, 0xffffffff, 0x48,
1142 	0x3504, 0xffffffff, 0x20575,
1143 	0x3500, 0xffffffff, 0x4c,
1144 	0x3504, 0xffffffff, 0x190801,
1145 	0x3500, 0xffffffff, 0x67,
1146 	0x3504, 0xffffffff, 0x1082a,
1147 	0x3500, 0xffffffff, 0x6a,
1148 	0x3504, 0xffffffff, 0x1b082d,
1149 	0x3500, 0xffffffff, 0x87,
1150 	0x3504, 0xffffffff, 0x310851,
1151 	0x3500, 0xffffffff, 0xba,
1152 	0x3504, 0xffffffff, 0x891,
1153 	0x3500, 0xffffffff, 0xbc,
1154 	0x3504, 0xffffffff, 0x893,
1155 	0x3500, 0xffffffff, 0xbe,
1156 	0x3504, 0xffffffff, 0x20895,
1157 	0x3500, 0xffffffff, 0xc2,
1158 	0x3504, 0xffffffff, 0x20899,
1159 	0x3500, 0xffffffff, 0xc6,
1160 	0x3504, 0xffffffff, 0x2089d,
1161 	0x3500, 0xffffffff, 0xca,
1162 	0x3504, 0xffffffff, 0x8a1,
1163 	0x3500, 0xffffffff, 0xcc,
1164 	0x3504, 0xffffffff, 0x8a3,
1165 	0x3500, 0xffffffff, 0xce,
1166 	0x3504, 0xffffffff, 0x308a5,
1167 	0x3500, 0xffffffff, 0xd3,
1168 	0x3504, 0xffffffff, 0x6d08cd,
1169 	0x3500, 0xffffffff, 0x142,
1170 	0x3504, 0xffffffff, 0x2000095a,
1171 	0x3504, 0xffffffff, 0x1,
1172 	0x3500, 0xffffffff, 0x144,
1173 	0x3504, 0xffffffff, 0x301f095b,
1174 	0x3500, 0xffffffff, 0x165,
1175 	0x3504, 0xffffffff, 0xc094d,
1176 	0x3500, 0xffffffff, 0x173,
1177 	0x3504, 0xffffffff, 0xf096d,
1178 	0x3500, 0xffffffff, 0x184,
1179 	0x3504, 0xffffffff, 0x15097f,
1180 	0x3500, 0xffffffff, 0x19b,
1181 	0x3504, 0xffffffff, 0xc0998,
1182 	0x3500, 0xffffffff, 0x1a9,
1183 	0x3504, 0xffffffff, 0x409a7,
1184 	0x3500, 0xffffffff, 0x1af,
1185 	0x3504, 0xffffffff, 0xcdc,
1186 	0x3500, 0xffffffff, 0x1b1,
1187 	0x3504, 0xffffffff, 0x800,
1188 	0x3508, 0xffffffff, 0x6c9b2000,
1189 	0x3510, 0xfc00, 0x2000,
1190 	0x3544, 0xffffffff, 0xfc0,
1191 	0x28d4, 0x00000100, 0x100
1192 };
1193 
1194 static void si_init_golden_registers(struct radeon_device *rdev)
1195 {
1196 	switch (rdev->family) {
1197 	case CHIP_TAHITI:
1198 		radeon_program_register_sequence(rdev,
1199 						 tahiti_golden_registers,
1200 						 (const u32)ARRAY_SIZE(tahiti_golden_registers));
1201 		radeon_program_register_sequence(rdev,
1202 						 tahiti_golden_rlc_registers,
1203 						 (const u32)ARRAY_SIZE(tahiti_golden_rlc_registers));
1204 		radeon_program_register_sequence(rdev,
1205 						 tahiti_mgcg_cgcg_init,
1206 						 (const u32)ARRAY_SIZE(tahiti_mgcg_cgcg_init));
1207 		radeon_program_register_sequence(rdev,
1208 						 tahiti_golden_registers2,
1209 						 (const u32)ARRAY_SIZE(tahiti_golden_registers2));
1210 		break;
1211 	case CHIP_PITCAIRN:
1212 		radeon_program_register_sequence(rdev,
1213 						 pitcairn_golden_registers,
1214 						 (const u32)ARRAY_SIZE(pitcairn_golden_registers));
1215 		radeon_program_register_sequence(rdev,
1216 						 pitcairn_golden_rlc_registers,
1217 						 (const u32)ARRAY_SIZE(pitcairn_golden_rlc_registers));
1218 		radeon_program_register_sequence(rdev,
1219 						 pitcairn_mgcg_cgcg_init,
1220 						 (const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
1221 		break;
1222 	case CHIP_VERDE:
1223 		radeon_program_register_sequence(rdev,
1224 						 verde_golden_registers,
1225 						 (const u32)ARRAY_SIZE(verde_golden_registers));
1226 		radeon_program_register_sequence(rdev,
1227 						 verde_golden_rlc_registers,
1228 						 (const u32)ARRAY_SIZE(verde_golden_rlc_registers));
1229 		radeon_program_register_sequence(rdev,
1230 						 verde_mgcg_cgcg_init,
1231 						 (const u32)ARRAY_SIZE(verde_mgcg_cgcg_init));
1232 		radeon_program_register_sequence(rdev,
1233 						 verde_pg_init,
1234 						 (const u32)ARRAY_SIZE(verde_pg_init));
1235 		break;
1236 	case CHIP_OLAND:
1237 		radeon_program_register_sequence(rdev,
1238 						 oland_golden_registers,
1239 						 (const u32)ARRAY_SIZE(oland_golden_registers));
1240 		radeon_program_register_sequence(rdev,
1241 						 oland_golden_rlc_registers,
1242 						 (const u32)ARRAY_SIZE(oland_golden_rlc_registers));
1243 		radeon_program_register_sequence(rdev,
1244 						 oland_mgcg_cgcg_init,
1245 						 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
1246 		break;
1247 	case CHIP_HAINAN:
1248 		radeon_program_register_sequence(rdev,
1249 						 hainan_golden_registers,
1250 						 (const u32)ARRAY_SIZE(hainan_golden_registers));
1251 		radeon_program_register_sequence(rdev,
1252 						 hainan_golden_registers2,
1253 						 (const u32)ARRAY_SIZE(hainan_golden_registers2));
1254 		radeon_program_register_sequence(rdev,
1255 						 hainan_mgcg_cgcg_init,
1256 						 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
1257 		break;
1258 	default:
1259 		break;
1260 	}
1261 }
1262 
1263 /**
1264  * si_get_allowed_info_register - fetch the register for the info ioctl
1265  *
1266  * @rdev: radeon_device pointer
1267  * @reg: register offset in bytes
1268  * @val: register value
1269  *
1270  * Returns 0 for success or -EINVAL for an invalid register
1271  *
1272  */
1273 int si_get_allowed_info_register(struct radeon_device *rdev,
1274 				 u32 reg, u32 *val)
1275 {
1276 	switch (reg) {
1277 	case GRBM_STATUS:
1278 	case GRBM_STATUS2:
1279 	case GRBM_STATUS_SE0:
1280 	case GRBM_STATUS_SE1:
1281 	case SRBM_STATUS:
1282 	case SRBM_STATUS2:
1283 	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
1284 	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
1285 	case UVD_STATUS:
1286 		*val = RREG32(reg);
1287 		return 0;
1288 	default:
1289 		return -EINVAL;
1290 	}
1291 }
1292 
1293 #define PCIE_BUS_CLK                10000
1294 #define TCLK                        (PCIE_BUS_CLK / 10)
1295 
1296 /**
1297  * si_get_xclk - get the xclk
1298  *
1299  * @rdev: radeon_device pointer
1300  *
1301  * Returns the reference clock used by the gfx engine
1302  * (SI).
1303  */
1304 u32 si_get_xclk(struct radeon_device *rdev)
1305 {
1306 	u32 reference_clock = rdev->clock.spll.reference_freq;
1307 	u32 tmp;
1308 
1309 	tmp = RREG32(CG_CLKPIN_CNTL_2);
1310 	if (tmp & MUX_TCLK_TO_XCLK)
1311 		return TCLK;
1312 
1313 	tmp = RREG32(CG_CLKPIN_CNTL);
1314 	if (tmp & XTALIN_DIVIDE)
1315 		return reference_clock / 4;
1316 
1317 	return reference_clock;
1318 }
1319 
1320 /* get temperature in millidegrees */
1321 int si_get_temp(struct radeon_device *rdev)
1322 {
1323 	u32 temp;
1324 	int actual_temp = 0;
1325 
1326 	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
1327 		CTF_TEMP_SHIFT;
1328 
1329 	if (temp & 0x200)
1330 		actual_temp = 255;
1331 	else
1332 		actual_temp = temp & 0x1ff;
1333 
1334 	actual_temp = (actual_temp * 1000);
1335 
1336 	return actual_temp;
1337 }
1338 
1339 #define TAHITI_IO_MC_REGS_SIZE 36
1340 
1341 static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1342 	{0x0000006f, 0x03044000},
1343 	{0x00000070, 0x0480c018},
1344 	{0x00000071, 0x00000040},
1345 	{0x00000072, 0x01000000},
1346 	{0x00000074, 0x000000ff},
1347 	{0x00000075, 0x00143400},
1348 	{0x00000076, 0x08ec0800},
1349 	{0x00000077, 0x040000cc},
1350 	{0x00000079, 0x00000000},
1351 	{0x0000007a, 0x21000409},
1352 	{0x0000007c, 0x00000000},
1353 	{0x0000007d, 0xe8000000},
1354 	{0x0000007e, 0x044408a8},
1355 	{0x0000007f, 0x00000003},
1356 	{0x00000080, 0x00000000},
1357 	{0x00000081, 0x01000000},
1358 	{0x00000082, 0x02000000},
1359 	{0x00000083, 0x00000000},
1360 	{0x00000084, 0xe3f3e4f4},
1361 	{0x00000085, 0x00052024},
1362 	{0x00000087, 0x00000000},
1363 	{0x00000088, 0x66036603},
1364 	{0x00000089, 0x01000000},
1365 	{0x0000008b, 0x1c0a0000},
1366 	{0x0000008c, 0xff010000},
1367 	{0x0000008e, 0xffffefff},
1368 	{0x0000008f, 0xfff3efff},
1369 	{0x00000090, 0xfff3efbf},
1370 	{0x00000094, 0x00101101},
1371 	{0x00000095, 0x00000fff},
1372 	{0x00000096, 0x00116fff},
1373 	{0x00000097, 0x60010000},
1374 	{0x00000098, 0x10010000},
1375 	{0x00000099, 0x00006000},
1376 	{0x0000009a, 0x00001000},
1377 	{0x0000009f, 0x00a77400}
1378 };
1379 
1380 static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1381 	{0x0000006f, 0x03044000},
1382 	{0x00000070, 0x0480c018},
1383 	{0x00000071, 0x00000040},
1384 	{0x00000072, 0x01000000},
1385 	{0x00000074, 0x000000ff},
1386 	{0x00000075, 0x00143400},
1387 	{0x00000076, 0x08ec0800},
1388 	{0x00000077, 0x040000cc},
1389 	{0x00000079, 0x00000000},
1390 	{0x0000007a, 0x21000409},
1391 	{0x0000007c, 0x00000000},
1392 	{0x0000007d, 0xe8000000},
1393 	{0x0000007e, 0x044408a8},
1394 	{0x0000007f, 0x00000003},
1395 	{0x00000080, 0x00000000},
1396 	{0x00000081, 0x01000000},
1397 	{0x00000082, 0x02000000},
1398 	{0x00000083, 0x00000000},
1399 	{0x00000084, 0xe3f3e4f4},
1400 	{0x00000085, 0x00052024},
1401 	{0x00000087, 0x00000000},
1402 	{0x00000088, 0x66036603},
1403 	{0x00000089, 0x01000000},
1404 	{0x0000008b, 0x1c0a0000},
1405 	{0x0000008c, 0xff010000},
1406 	{0x0000008e, 0xffffefff},
1407 	{0x0000008f, 0xfff3efff},
1408 	{0x00000090, 0xfff3efbf},
1409 	{0x00000094, 0x00101101},
1410 	{0x00000095, 0x00000fff},
1411 	{0x00000096, 0x00116fff},
1412 	{0x00000097, 0x60010000},
1413 	{0x00000098, 0x10010000},
1414 	{0x00000099, 0x00006000},
1415 	{0x0000009a, 0x00001000},
1416 	{0x0000009f, 0x00a47400}
1417 };
1418 
1419 static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1420 	{0x0000006f, 0x03044000},
1421 	{0x00000070, 0x0480c018},
1422 	{0x00000071, 0x00000040},
1423 	{0x00000072, 0x01000000},
1424 	{0x00000074, 0x000000ff},
1425 	{0x00000075, 0x00143400},
1426 	{0x00000076, 0x08ec0800},
1427 	{0x00000077, 0x040000cc},
1428 	{0x00000079, 0x00000000},
1429 	{0x0000007a, 0x21000409},
1430 	{0x0000007c, 0x00000000},
1431 	{0x0000007d, 0xe8000000},
1432 	{0x0000007e, 0x044408a8},
1433 	{0x0000007f, 0x00000003},
1434 	{0x00000080, 0x00000000},
1435 	{0x00000081, 0x01000000},
1436 	{0x00000082, 0x02000000},
1437 	{0x00000083, 0x00000000},
1438 	{0x00000084, 0xe3f3e4f4},
1439 	{0x00000085, 0x00052024},
1440 	{0x00000087, 0x00000000},
1441 	{0x00000088, 0x66036603},
1442 	{0x00000089, 0x01000000},
1443 	{0x0000008b, 0x1c0a0000},
1444 	{0x0000008c, 0xff010000},
1445 	{0x0000008e, 0xffffefff},
1446 	{0x0000008f, 0xfff3efff},
1447 	{0x00000090, 0xfff3efbf},
1448 	{0x00000094, 0x00101101},
1449 	{0x00000095, 0x00000fff},
1450 	{0x00000096, 0x00116fff},
1451 	{0x00000097, 0x60010000},
1452 	{0x00000098, 0x10010000},
1453 	{0x00000099, 0x00006000},
1454 	{0x0000009a, 0x00001000},
1455 	{0x0000009f, 0x00a37400}
1456 };
1457 
1458 static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1459 	{0x0000006f, 0x03044000},
1460 	{0x00000070, 0x0480c018},
1461 	{0x00000071, 0x00000040},
1462 	{0x00000072, 0x01000000},
1463 	{0x00000074, 0x000000ff},
1464 	{0x00000075, 0x00143400},
1465 	{0x00000076, 0x08ec0800},
1466 	{0x00000077, 0x040000cc},
1467 	{0x00000079, 0x00000000},
1468 	{0x0000007a, 0x21000409},
1469 	{0x0000007c, 0x00000000},
1470 	{0x0000007d, 0xe8000000},
1471 	{0x0000007e, 0x044408a8},
1472 	{0x0000007f, 0x00000003},
1473 	{0x00000080, 0x00000000},
1474 	{0x00000081, 0x01000000},
1475 	{0x00000082, 0x02000000},
1476 	{0x00000083, 0x00000000},
1477 	{0x00000084, 0xe3f3e4f4},
1478 	{0x00000085, 0x00052024},
1479 	{0x00000087, 0x00000000},
1480 	{0x00000088, 0x66036603},
1481 	{0x00000089, 0x01000000},
1482 	{0x0000008b, 0x1c0a0000},
1483 	{0x0000008c, 0xff010000},
1484 	{0x0000008e, 0xffffefff},
1485 	{0x0000008f, 0xfff3efff},
1486 	{0x00000090, 0xfff3efbf},
1487 	{0x00000094, 0x00101101},
1488 	{0x00000095, 0x00000fff},
1489 	{0x00000096, 0x00116fff},
1490 	{0x00000097, 0x60010000},
1491 	{0x00000098, 0x10010000},
1492 	{0x00000099, 0x00006000},
1493 	{0x0000009a, 0x00001000},
1494 	{0x0000009f, 0x00a17730}
1495 };
1496 
1497 static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1498 	{0x0000006f, 0x03044000},
1499 	{0x00000070, 0x0480c018},
1500 	{0x00000071, 0x00000040},
1501 	{0x00000072, 0x01000000},
1502 	{0x00000074, 0x000000ff},
1503 	{0x00000075, 0x00143400},
1504 	{0x00000076, 0x08ec0800},
1505 	{0x00000077, 0x040000cc},
1506 	{0x00000079, 0x00000000},
1507 	{0x0000007a, 0x21000409},
1508 	{0x0000007c, 0x00000000},
1509 	{0x0000007d, 0xe8000000},
1510 	{0x0000007e, 0x044408a8},
1511 	{0x0000007f, 0x00000003},
1512 	{0x00000080, 0x00000000},
1513 	{0x00000081, 0x01000000},
1514 	{0x00000082, 0x02000000},
1515 	{0x00000083, 0x00000000},
1516 	{0x00000084, 0xe3f3e4f4},
1517 	{0x00000085, 0x00052024},
1518 	{0x00000087, 0x00000000},
1519 	{0x00000088, 0x66036603},
1520 	{0x00000089, 0x01000000},
1521 	{0x0000008b, 0x1c0a0000},
1522 	{0x0000008c, 0xff010000},
1523 	{0x0000008e, 0xffffefff},
1524 	{0x0000008f, 0xfff3efff},
1525 	{0x00000090, 0xfff3efbf},
1526 	{0x00000094, 0x00101101},
1527 	{0x00000095, 0x00000fff},
1528 	{0x00000096, 0x00116fff},
1529 	{0x00000097, 0x60010000},
1530 	{0x00000098, 0x10010000},
1531 	{0x00000099, 0x00006000},
1532 	{0x0000009a, 0x00001000},
1533 	{0x0000009f, 0x00a07730}
1534 };
1535 
1536 /* ucode loading */
1537 int si_mc_load_microcode(struct radeon_device *rdev)
1538 {
1539 	const __be32 *fw_data = NULL;
1540 	const __le32 *new_fw_data = NULL;
1541 	u32 running;
1542 	u32 *io_mc_regs = NULL;
1543 	const __le32 *new_io_mc_regs = NULL;
1544 	int i, regs_size, ucode_size;
1545 
1546 	if (!rdev->mc_fw)
1547 		return -EINVAL;
1548 
1549 	if (rdev->new_fw) {
1550 		const struct mc_firmware_header_v1_0 *hdr =
1551 			(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
1552 
1553 		radeon_ucode_print_mc_hdr(&hdr->header);
1554 		regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
1555 		new_io_mc_regs = (const __le32 *)
1556 			(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
1557 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1558 		new_fw_data = (const __le32 *)
1559 			(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1560 	} else {
1561 		ucode_size = rdev->mc_fw->datasize / 4;
1562 
1563 		switch (rdev->family) {
1564 		case CHIP_TAHITI:
1565 			io_mc_regs = (u32 *)&tahiti_io_mc_regs;
1566 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1567 			break;
1568 		case CHIP_PITCAIRN:
1569 			io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
1570 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1571 			break;
1572 		case CHIP_VERDE:
1573 		default:
1574 			io_mc_regs = (u32 *)&verde_io_mc_regs;
1575 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1576 			break;
1577 		case CHIP_OLAND:
1578 			io_mc_regs = (u32 *)&oland_io_mc_regs;
1579 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1580 			break;
1581 		case CHIP_HAINAN:
1582 			io_mc_regs = (u32 *)&hainan_io_mc_regs;
1583 			regs_size = TAHITI_IO_MC_REGS_SIZE;
1584 			break;
1585 		}
1586 		fw_data = (const __be32 *)rdev->mc_fw->data;
1587 	}
1588 
1589 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
1590 
1591 	if (running == 0) {
1592 		/* reset the engine and set to writable */
1593 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1594 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
1595 
1596 		/* load mc io regs */
1597 		for (i = 0; i < regs_size; i++) {
1598 			if (rdev->new_fw) {
1599 				WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
1600 				WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
1601 			} else {
1602 				WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
1603 				WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1604 			}
1605 		}
1606 		/* load the MC ucode */
1607 		for (i = 0; i < ucode_size; i++) {
1608 			if (rdev->new_fw)
1609 				WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
1610 			else
1611 				WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
1612 		}
1613 
1614 		/* put the engine back into the active state */
1615 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
1616 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
1617 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
1618 
1619 		/* wait for training to complete */
1620 		for (i = 0; i < rdev->usec_timeout; i++) {
1621 			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
1622 				break;
1623 			udelay(1);
1624 		}
1625 		for (i = 0; i < rdev->usec_timeout; i++) {
1626 			if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
1627 				break;
1628 			udelay(1);
1629 		}
1630 	}
1631 
1632 	return 0;
1633 }
1634 
1635 static int si_init_microcode(struct radeon_device *rdev)
1636 {
1637 	const char *chip_name;
1638 	const char *new_chip_name;
1639 	size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
1640 	size_t smc_req_size, mc2_req_size;
1641 	char fw_name[30];
1642 	int err;
1643 	int new_fw = 0;
1644 	bool new_smc = false;
1645 	bool si58_fw = false;
1646 	bool banks2_fw = false;
1647 
1648 	DRM_DEBUG("\n");
1649 
1650 	switch (rdev->family) {
1651 	case CHIP_TAHITI:
1652 		chip_name = "TAHITI";
1653 		new_chip_name = "tahiti";
1654 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1655 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1656 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1657 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1658 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1659 		mc2_req_size = TAHITI_MC_UCODE_SIZE * 4;
1660 		smc_req_size = ALIGN(TAHITI_SMC_UCODE_SIZE, 4);
1661 		break;
1662 	case CHIP_PITCAIRN:
1663 		chip_name = "PITCAIRN";
1664 		if ((rdev->pdev->revision == 0x81) &&
1665 		    ((rdev->pdev->device == 0x6810) ||
1666 		     (rdev->pdev->device == 0x6811)))
1667 			new_smc = true;
1668 		new_chip_name = "pitcairn";
1669 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1670 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1671 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1672 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1673 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1674 		mc2_req_size = PITCAIRN_MC_UCODE_SIZE * 4;
1675 		smc_req_size = ALIGN(PITCAIRN_SMC_UCODE_SIZE, 4);
1676 		break;
1677 	case CHIP_VERDE:
1678 		chip_name = "VERDE";
1679 		if (((rdev->pdev->device == 0x6820) &&
1680 		     ((rdev->pdev->revision == 0x81) ||
1681 		      (rdev->pdev->revision == 0x83))) ||
1682 		    ((rdev->pdev->device == 0x6821) &&
1683 		     ((rdev->pdev->revision == 0x83) ||
1684 		      (rdev->pdev->revision == 0x87))) ||
1685 		    ((rdev->pdev->revision == 0x87) &&
1686 		     ((rdev->pdev->device == 0x6823) ||
1687 		      (rdev->pdev->device == 0x682b))))
1688 			new_smc = true;
1689 		new_chip_name = "verde";
1690 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1691 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1692 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1693 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1694 		mc_req_size = SI_MC_UCODE_SIZE * 4;
1695 		mc2_req_size = VERDE_MC_UCODE_SIZE * 4;
1696 		smc_req_size = ALIGN(VERDE_SMC_UCODE_SIZE, 4);
1697 		break;
1698 	case CHIP_OLAND:
1699 		chip_name = "OLAND";
1700 		if (((rdev->pdev->revision == 0x81) &&
1701 		     ((rdev->pdev->device == 0x6600) ||
1702 		      (rdev->pdev->device == 0x6604) ||
1703 		      (rdev->pdev->device == 0x6605) ||
1704 		      (rdev->pdev->device == 0x6610))) ||
1705 		    ((rdev->pdev->revision == 0x83) &&
1706 		     (rdev->pdev->device == 0x6610)))
1707 			new_smc = true;
1708 		new_chip_name = "oland";
1709 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1710 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1711 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1712 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1713 		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1714 		smc_req_size = ALIGN(OLAND_SMC_UCODE_SIZE, 4);
1715 		break;
1716 	case CHIP_HAINAN:
1717 		chip_name = "HAINAN";
1718 		if (((rdev->pdev->revision == 0x81) &&
1719 		     (rdev->pdev->device == 0x6660)) ||
1720 		    ((rdev->pdev->revision == 0x83) &&
1721 		     ((rdev->pdev->device == 0x6660) ||
1722 		      (rdev->pdev->device == 0x6663) ||
1723 		      (rdev->pdev->device == 0x6665) ||
1724 		      (rdev->pdev->device == 0x6667))))
1725 			new_smc = true;
1726 		else if ((rdev->pdev->revision == 0xc3) &&
1727 			 (rdev->pdev->device == 0x6665))
1728 			banks2_fw = true;
1729 		new_chip_name = "hainan";
1730 		pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1731 		me_req_size = SI_PM4_UCODE_SIZE * 4;
1732 		ce_req_size = SI_CE_UCODE_SIZE * 4;
1733 		rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1734 		mc_req_size = mc2_req_size = OLAND_MC_UCODE_SIZE * 4;
1735 		smc_req_size = ALIGN(HAINAN_SMC_UCODE_SIZE, 4);
1736 		break;
1737 	default: BUG();
1738 	}
1739 
1740 	/* this memory configuration requires special firmware */
1741 	if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1742 		si58_fw = true;
1743 
1744 	DRM_INFO("Loading %s Microcode\n", new_chip_name);
1745 
1746 	ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", new_chip_name);
1747 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1748 	if (err) {
1749 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_pfp", chip_name);
1750 		err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
1751 		if (err)
1752 			goto out;
1753 		if (rdev->pfp_fw->datasize != pfp_req_size) {
1754 			printk(KERN_ERR
1755 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1756 			       rdev->pfp_fw->datasize, fw_name);
1757 			err = -EINVAL;
1758 			goto out;
1759 		}
1760 	} else {
1761 		err = radeon_ucode_validate(rdev->pfp_fw);
1762 		if (err) {
1763 			pr_err("si_cp: validation failed for firmware \"%s\"\n",
1764 			       fw_name);
1765 			goto out;
1766 		} else {
1767 			new_fw++;
1768 		}
1769 	}
1770 
1771 	ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", new_chip_name);
1772 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1773 	if (err) {
1774 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_me", chip_name);
1775 		err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1776 		if (err)
1777 			goto out;
1778 		if (rdev->me_fw->datasize != me_req_size) {
1779 			printk(KERN_ERR
1780 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1781 			       rdev->me_fw->datasize, fw_name);
1782 			err = -EINVAL;
1783 		}
1784 	} else {
1785 		err = radeon_ucode_validate(rdev->me_fw);
1786 		if (err) {
1787 			printk(KERN_ERR
1788 			       "si_cp: validation failed for firmware \"%s\"\n",
1789 			       fw_name);
1790 			goto out;
1791 		} else {
1792 			new_fw++;
1793 		}
1794 	}
1795 
1796 	ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_ce", new_chip_name);
1797 	err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1798 	if (err) {
1799 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_ce", chip_name);
1800 		err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
1801 		if (err)
1802 			goto out;
1803 		if (rdev->ce_fw->datasize != ce_req_size) {
1804 			printk(KERN_ERR
1805 			       "si_cp: Bogus length %zu in firmware \"%s\"\n",
1806 			       rdev->ce_fw->datasize, fw_name);
1807 			err = -EINVAL;
1808 		}
1809 	} else {
1810 		err = radeon_ucode_validate(rdev->ce_fw);
1811 		if (err) {
1812 			printk(KERN_ERR
1813 			       "si_cp: validation failed for firmware \"%s\"\n",
1814 			       fw_name);
1815 			goto out;
1816 		} else {
1817 			new_fw++;
1818 		}
1819 	}
1820 
1821 	ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", new_chip_name);
1822 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1823 	if (err) {
1824 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_rlc", chip_name);
1825 		err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
1826 		if (err)
1827 			goto out;
1828 		if (rdev->rlc_fw->datasize != rlc_req_size) {
1829 			printk(KERN_ERR
1830 			       "si_rlc: Bogus length %zu in firmware \"%s\"\n",
1831 			       rdev->rlc_fw->datasize, fw_name);
1832 			err = -EINVAL;
1833 		}
1834 	} else {
1835 		err = radeon_ucode_validate(rdev->rlc_fw);
1836 		if (err) {
1837 			printk(KERN_ERR
1838 			       "si_cp: validation failed for firmware \"%s\"\n",
1839 			       fw_name);
1840 			goto out;
1841 		} else {
1842 			new_fw++;
1843 		}
1844 	}
1845 
1846 	if (si58_fw)
1847 		snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_si58_mc");
1848 	else
1849 		snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", new_chip_name);
1850 	err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1851 	if (err) {
1852 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc2", chip_name);
1853 		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1854 		if (err) {
1855 			ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_mc", chip_name);
1856 			err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1857 			if (err)
1858 				goto out;
1859 		}
1860 		if ((rdev->mc_fw->datasize != mc_req_size) &&
1861 		    (rdev->mc_fw->datasize != mc2_req_size)) {
1862 			printk(KERN_ERR
1863 			       "si_mc: Bogus length %zu in firmware \"%s\"\n",
1864 			       rdev->mc_fw->datasize, fw_name);
1865 			err = -EINVAL;
1866 		}
1867 		DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->datasize);
1868 	} else {
1869 		err = radeon_ucode_validate(rdev->mc_fw);
1870 		if (err) {
1871 			pr_err("si_cp: validation failed for firmware \"%s\"\n",
1872 			       fw_name);
1873 			goto out;
1874 		} else {
1875 			new_fw++;
1876 		}
1877 	}
1878 
1879 	if (banks2_fw)
1880 		snprintf(fw_name, sizeof(fw_name), "radeonkmsfw_banks_k_2_smc");
1881 	else if (new_smc)
1882 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_k_smc", new_chip_name);
1883 	else
1884 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", new_chip_name);
1885 	err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1886 	if (err) {
1887 		ksnprintf(fw_name, sizeof(fw_name), "radeonkmsfw_%s_smc", chip_name);
1888 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1889 		if (err) {
1890 			printk(KERN_ERR
1891 			       "smc: error loading firmware \"%s\"\n",
1892 			       fw_name);
1893 			release_firmware(rdev->smc_fw);
1894 			rdev->smc_fw = NULL;
1895 			err = 0;
1896 		} else if (rdev->smc_fw->datasize != smc_req_size) {
1897 			printk(KERN_ERR
1898 			       "si_smc: Bogus length %zu in firmware \"%s\"\n",
1899 			       rdev->smc_fw->datasize, fw_name);
1900 			err = -EINVAL;
1901 		}
1902 	} else {
1903 		err = radeon_ucode_validate(rdev->smc_fw);
1904 		if (err) {
1905 			printk(KERN_ERR
1906 			       "si_cp: validation failed for firmware \"%s\"\n",
1907 			       fw_name);
1908 			goto out;
1909 		} else {
1910 			new_fw++;
1911 		}
1912 	}
1913 
1914 	if (new_fw == 0) {
1915 		rdev->new_fw = false;
1916 	} else if (new_fw < 6) {
1917 		pr_err("si_fw: mixing new and old firmware!\n");
1918 		err = -EINVAL;
1919 	} else {
1920 		rdev->new_fw = true;
1921 	}
1922 out:
1923 	if (err) {
1924 		if (err != -EINVAL)
1925 			pr_err("si_cp: Failed to load firmware \"%s\"\n",
1926 			       fw_name);
1927 		release_firmware(rdev->pfp_fw);
1928 		rdev->pfp_fw = NULL;
1929 		release_firmware(rdev->me_fw);
1930 		rdev->me_fw = NULL;
1931 		release_firmware(rdev->ce_fw);
1932 		rdev->ce_fw = NULL;
1933 		release_firmware(rdev->rlc_fw);
1934 		rdev->rlc_fw = NULL;
1935 		release_firmware(rdev->mc_fw);
1936 		rdev->mc_fw = NULL;
1937 		release_firmware(rdev->smc_fw);
1938 		rdev->smc_fw = NULL;
1939 	}
1940 	return err;
1941 }
1942 
1943 /**
1944  * si_fini_microcode - drop the firmwares image references
1945  *
1946  * @rdev: radeon_device pointer
1947  *
1948  * Drop the pfp, me, rlc, mc and ce firmware image references.
1949  * Called at driver shutdown.
1950  */
1951 static void si_fini_microcode(struct radeon_device *rdev)
1952 {
1953 	release_firmware(rdev->pfp_fw);
1954 	rdev->pfp_fw = NULL;
1955 	release_firmware(rdev->me_fw);
1956 	rdev->me_fw = NULL;
1957 	release_firmware(rdev->rlc_fw);
1958 	rdev->rlc_fw = NULL;
1959 	release_firmware(rdev->mc_fw);
1960 	rdev->mc_fw = NULL;
1961 	release_firmware(rdev->smc_fw);
1962 	rdev->smc_fw = NULL;
1963 	release_firmware(rdev->ce_fw);
1964 	rdev->ce_fw = NULL;
1965 }
1966 
1967 /* watermark setup */
1968 static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1969 				   struct radeon_crtc *radeon_crtc,
1970 				   struct drm_display_mode *mode,
1971 				   struct drm_display_mode *other_mode)
1972 {
1973 	u32 tmp, buffer_alloc, i;
1974 	u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1975 	/*
1976 	 * Line Buffer Setup
1977 	 * There are 3 line buffers, each one shared by 2 display controllers.
1978 	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
1979 	 * the display controllers.  The paritioning is done via one of four
1980 	 * preset allocations specified in bits 21:20:
1981 	 *  0 - half lb
1982 	 *  2 - whole lb, other crtc must be disabled
1983 	 */
1984 	/* this can get tricky if we have two large displays on a paired group
1985 	 * of crtcs.  Ideally for multiple large displays we'd assign them to
1986 	 * non-linked crtcs for maximum line buffer allocation.
1987 	 */
1988 	if (radeon_crtc->base.enabled && mode) {
1989 		if (other_mode) {
1990 			tmp = 0; /* 1/2 */
1991 			buffer_alloc = 1;
1992 		} else {
1993 			tmp = 2; /* whole */
1994 			buffer_alloc = 2;
1995 		}
1996 	} else {
1997 		tmp = 0;
1998 		buffer_alloc = 0;
1999 	}
2000 
2001 	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
2002 	       DC_LB_MEMORY_CONFIG(tmp));
2003 
2004 	WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
2005 	       DMIF_BUFFERS_ALLOCATED(buffer_alloc));
2006 	for (i = 0; i < rdev->usec_timeout; i++) {
2007 		if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
2008 		    DMIF_BUFFERS_ALLOCATED_COMPLETED)
2009 			break;
2010 		udelay(1);
2011 	}
2012 
2013 	if (radeon_crtc->base.enabled && mode) {
2014 		switch (tmp) {
2015 		case 0:
2016 		default:
2017 			return 4096 * 2;
2018 		case 2:
2019 			return 8192 * 2;
2020 		}
2021 	}
2022 
2023 	/* controller not enabled, so no lb used */
2024 	return 0;
2025 }
2026 
2027 static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
2028 {
2029 	u32 tmp = RREG32(MC_SHARED_CHMAP);
2030 
2031 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
2032 	case 0:
2033 	default:
2034 		return 1;
2035 	case 1:
2036 		return 2;
2037 	case 2:
2038 		return 4;
2039 	case 3:
2040 		return 8;
2041 	case 4:
2042 		return 3;
2043 	case 5:
2044 		return 6;
2045 	case 6:
2046 		return 10;
2047 	case 7:
2048 		return 12;
2049 	case 8:
2050 		return 16;
2051 	}
2052 }
2053 
2054 struct dce6_wm_params {
2055 	u32 dram_channels; /* number of dram channels */
2056 	u32 yclk;          /* bandwidth per dram data pin in kHz */
2057 	u32 sclk;          /* engine clock in kHz */
2058 	u32 disp_clk;      /* display clock in kHz */
2059 	u32 src_width;     /* viewport width */
2060 	u32 active_time;   /* active display time in ns */
2061 	u32 blank_time;    /* blank time in ns */
2062 	bool interlaced;    /* mode is interlaced */
2063 	fixed20_12 vsc;    /* vertical scale ratio */
2064 	u32 num_heads;     /* number of active crtcs */
2065 	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
2066 	u32 lb_size;       /* line buffer allocated to pipe */
2067 	u32 vtaps;         /* vertical scaler taps */
2068 };
2069 
2070 static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
2071 {
2072 	/* Calculate raw DRAM Bandwidth */
2073 	fixed20_12 dram_efficiency; /* 0.7 */
2074 	fixed20_12 yclk, dram_channels, bandwidth;
2075 	fixed20_12 a;
2076 
2077 	a.full = dfixed_const(1000);
2078 	yclk.full = dfixed_const(wm->yclk);
2079 	yclk.full = dfixed_div(yclk, a);
2080 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2081 	a.full = dfixed_const(10);
2082 	dram_efficiency.full = dfixed_const(7);
2083 	dram_efficiency.full = dfixed_div(dram_efficiency, a);
2084 	bandwidth.full = dfixed_mul(dram_channels, yclk);
2085 	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
2086 
2087 	return dfixed_trunc(bandwidth);
2088 }
2089 
2090 static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2091 {
2092 	/* Calculate DRAM Bandwidth and the part allocated to display. */
2093 	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
2094 	fixed20_12 yclk, dram_channels, bandwidth;
2095 	fixed20_12 a;
2096 
2097 	a.full = dfixed_const(1000);
2098 	yclk.full = dfixed_const(wm->yclk);
2099 	yclk.full = dfixed_div(yclk, a);
2100 	dram_channels.full = dfixed_const(wm->dram_channels * 4);
2101 	a.full = dfixed_const(10);
2102 	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
2103 	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
2104 	bandwidth.full = dfixed_mul(dram_channels, yclk);
2105 	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
2106 
2107 	return dfixed_trunc(bandwidth);
2108 }
2109 
2110 static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
2111 {
2112 	/* Calculate the display Data return Bandwidth */
2113 	fixed20_12 return_efficiency; /* 0.8 */
2114 	fixed20_12 sclk, bandwidth;
2115 	fixed20_12 a;
2116 
2117 	a.full = dfixed_const(1000);
2118 	sclk.full = dfixed_const(wm->sclk);
2119 	sclk.full = dfixed_div(sclk, a);
2120 	a.full = dfixed_const(10);
2121 	return_efficiency.full = dfixed_const(8);
2122 	return_efficiency.full = dfixed_div(return_efficiency, a);
2123 	a.full = dfixed_const(32);
2124 	bandwidth.full = dfixed_mul(a, sclk);
2125 	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
2126 
2127 	return dfixed_trunc(bandwidth);
2128 }
2129 
2130 static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
2131 {
2132 	return 32;
2133 }
2134 
2135 static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
2136 {
2137 	/* Calculate the DMIF Request Bandwidth */
2138 	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
2139 	fixed20_12 disp_clk, sclk, bandwidth;
2140 	fixed20_12 a, b1, b2;
2141 	u32 min_bandwidth;
2142 
2143 	a.full = dfixed_const(1000);
2144 	disp_clk.full = dfixed_const(wm->disp_clk);
2145 	disp_clk.full = dfixed_div(disp_clk, a);
2146 	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
2147 	b1.full = dfixed_mul(a, disp_clk);
2148 
2149 	a.full = dfixed_const(1000);
2150 	sclk.full = dfixed_const(wm->sclk);
2151 	sclk.full = dfixed_div(sclk, a);
2152 	a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
2153 	b2.full = dfixed_mul(a, sclk);
2154 
2155 	a.full = dfixed_const(10);
2156 	disp_clk_request_efficiency.full = dfixed_const(8);
2157 	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
2158 
2159 	min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
2160 
2161 	a.full = dfixed_const(min_bandwidth);
2162 	bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
2163 
2164 	return dfixed_trunc(bandwidth);
2165 }
2166 
2167 static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
2168 {
2169 	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
2170 	u32 dram_bandwidth = dce6_dram_bandwidth(wm);
2171 	u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
2172 	u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
2173 
2174 	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
2175 }
2176 
2177 static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
2178 {
2179 	/* Calculate the display mode Average Bandwidth
2180 	 * DisplayMode should contain the source and destination dimensions,
2181 	 * timing, etc.
2182 	 */
2183 	fixed20_12 bpp;
2184 	fixed20_12 line_time;
2185 	fixed20_12 src_width;
2186 	fixed20_12 bandwidth;
2187 	fixed20_12 a;
2188 
2189 	a.full = dfixed_const(1000);
2190 	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
2191 	line_time.full = dfixed_div(line_time, a);
2192 	bpp.full = dfixed_const(wm->bytes_per_pixel);
2193 	src_width.full = dfixed_const(wm->src_width);
2194 	bandwidth.full = dfixed_mul(src_width, bpp);
2195 	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
2196 	bandwidth.full = dfixed_div(bandwidth, line_time);
2197 
2198 	return dfixed_trunc(bandwidth);
2199 }
2200 
2201 static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
2202 {
2203 	/* First calcualte the latency in ns */
2204 	u32 mc_latency = 2000; /* 2000 ns. */
2205 	u32 available_bandwidth = dce6_available_bandwidth(wm);
2206 	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
2207 	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
2208 	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
2209 	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
2210 		(wm->num_heads * cursor_line_pair_return_time);
2211 	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
2212 	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
2213 	u32 tmp, dmif_size = 12288;
2214 	fixed20_12 a, b, c;
2215 
2216 	if (wm->num_heads == 0)
2217 		return 0;
2218 
2219 	a.full = dfixed_const(2);
2220 	b.full = dfixed_const(1);
2221 	if ((wm->vsc.full > a.full) ||
2222 	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
2223 	    (wm->vtaps >= 5) ||
2224 	    ((wm->vsc.full >= a.full) && wm->interlaced))
2225 		max_src_lines_per_dst_line = 4;
2226 	else
2227 		max_src_lines_per_dst_line = 2;
2228 
2229 	a.full = dfixed_const(available_bandwidth);
2230 	b.full = dfixed_const(wm->num_heads);
2231 	a.full = dfixed_div(a, b);
2232 	tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
2233 	tmp = min(dfixed_trunc(a), tmp);
2234 
2235 	lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
2236 
2237 	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
2238 	b.full = dfixed_const(1000);
2239 	c.full = dfixed_const(lb_fill_bw);
2240 	b.full = dfixed_div(c, b);
2241 	a.full = dfixed_div(a, b);
2242 	line_fill_time = dfixed_trunc(a);
2243 
2244 	if (line_fill_time < wm->active_time)
2245 		return latency;
2246 	else
2247 		return latency + (line_fill_time - wm->active_time);
2248 
2249 }
2250 
2251 static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
2252 {
2253 	if (dce6_average_bandwidth(wm) <=
2254 	    (dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
2255 		return true;
2256 	else
2257 		return false;
2258 };
2259 
2260 static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
2261 {
2262 	if (dce6_average_bandwidth(wm) <=
2263 	    (dce6_available_bandwidth(wm) / wm->num_heads))
2264 		return true;
2265 	else
2266 		return false;
2267 };
2268 
2269 static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
2270 {
2271 	u32 lb_partitions = wm->lb_size / wm->src_width;
2272 	u32 line_time = wm->active_time + wm->blank_time;
2273 	u32 latency_tolerant_lines;
2274 	u32 latency_hiding;
2275 	fixed20_12 a;
2276 
2277 	a.full = dfixed_const(1);
2278 	if (wm->vsc.full > a.full)
2279 		latency_tolerant_lines = 1;
2280 	else {
2281 		if (lb_partitions <= (wm->vtaps + 1))
2282 			latency_tolerant_lines = 1;
2283 		else
2284 			latency_tolerant_lines = 2;
2285 	}
2286 
2287 	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
2288 
2289 	if (dce6_latency_watermark(wm) <= latency_hiding)
2290 		return true;
2291 	else
2292 		return false;
2293 }
2294 
2295 static void dce6_program_watermarks(struct radeon_device *rdev,
2296 					 struct radeon_crtc *radeon_crtc,
2297 					 u32 lb_size, u32 num_heads)
2298 {
2299 	struct drm_display_mode *mode = &radeon_crtc->base.mode;
2300 	struct dce6_wm_params wm_low, wm_high;
2301 	u32 dram_channels;
2302 	u32 active_time;
2303 	u32 line_time = 0;
2304 	u32 latency_watermark_a = 0, latency_watermark_b = 0;
2305 	u32 priority_a_mark = 0, priority_b_mark = 0;
2306 	u32 priority_a_cnt = PRIORITY_OFF;
2307 	u32 priority_b_cnt = PRIORITY_OFF;
2308 	u32 tmp, arb_control3;
2309 	fixed20_12 a, b, c;
2310 
2311 	if (radeon_crtc->base.enabled && num_heads && mode) {
2312 		active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
2313 					    (u32)mode->clock);
2314 		line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
2315 					  (u32)mode->clock);
2316 		line_time = min(line_time, (u32)65535);
2317 		priority_a_cnt = 0;
2318 		priority_b_cnt = 0;
2319 
2320 		if (rdev->family == CHIP_ARUBA)
2321 			dram_channels = evergreen_get_number_of_dram_channels(rdev);
2322 		else
2323 			dram_channels = si_get_number_of_dram_channels(rdev);
2324 
2325 		/* watermark for high clocks */
2326 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2327 			wm_high.yclk =
2328 				radeon_dpm_get_mclk(rdev, false) * 10;
2329 			wm_high.sclk =
2330 				radeon_dpm_get_sclk(rdev, false) * 10;
2331 		} else {
2332 			wm_high.yclk = rdev->pm.current_mclk * 10;
2333 			wm_high.sclk = rdev->pm.current_sclk * 10;
2334 		}
2335 
2336 		wm_high.disp_clk = mode->clock;
2337 		wm_high.src_width = mode->crtc_hdisplay;
2338 		wm_high.active_time = active_time;
2339 		wm_high.blank_time = line_time - wm_high.active_time;
2340 		wm_high.interlaced = false;
2341 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2342 			wm_high.interlaced = true;
2343 		wm_high.vsc = radeon_crtc->vsc;
2344 		wm_high.vtaps = 1;
2345 		if (radeon_crtc->rmx_type != RMX_OFF)
2346 			wm_high.vtaps = 2;
2347 		wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
2348 		wm_high.lb_size = lb_size;
2349 		wm_high.dram_channels = dram_channels;
2350 		wm_high.num_heads = num_heads;
2351 
2352 		/* watermark for low clocks */
2353 		if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
2354 			wm_low.yclk =
2355 				radeon_dpm_get_mclk(rdev, true) * 10;
2356 			wm_low.sclk =
2357 				radeon_dpm_get_sclk(rdev, true) * 10;
2358 		} else {
2359 			wm_low.yclk = rdev->pm.current_mclk * 10;
2360 			wm_low.sclk = rdev->pm.current_sclk * 10;
2361 		}
2362 
2363 		wm_low.disp_clk = mode->clock;
2364 		wm_low.src_width = mode->crtc_hdisplay;
2365 		wm_low.active_time = active_time;
2366 		wm_low.blank_time = line_time - wm_low.active_time;
2367 		wm_low.interlaced = false;
2368 		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2369 			wm_low.interlaced = true;
2370 		wm_low.vsc = radeon_crtc->vsc;
2371 		wm_low.vtaps = 1;
2372 		if (radeon_crtc->rmx_type != RMX_OFF)
2373 			wm_low.vtaps = 2;
2374 		wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
2375 		wm_low.lb_size = lb_size;
2376 		wm_low.dram_channels = dram_channels;
2377 		wm_low.num_heads = num_heads;
2378 
2379 		/* set for high clocks */
2380 		latency_watermark_a = min(dce6_latency_watermark(&wm_high), (u32)65535);
2381 		/* set for low clocks */
2382 		latency_watermark_b = min(dce6_latency_watermark(&wm_low), (u32)65535);
2383 
2384 		/* possibly force display priority to high */
2385 		/* should really do this at mode validation time... */
2386 		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
2387 		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_high) ||
2388 		    !dce6_check_latency_hiding(&wm_high) ||
2389 		    (rdev->disp_priority == 2)) {
2390 			DRM_DEBUG_KMS("force priority to high\n");
2391 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2392 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2393 		}
2394 		if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
2395 		    !dce6_average_bandwidth_vs_available_bandwidth(&wm_low) ||
2396 		    !dce6_check_latency_hiding(&wm_low) ||
2397 		    (rdev->disp_priority == 2)) {
2398 			DRM_DEBUG_KMS("force priority to high\n");
2399 			priority_a_cnt |= PRIORITY_ALWAYS_ON;
2400 			priority_b_cnt |= PRIORITY_ALWAYS_ON;
2401 		}
2402 
2403 		a.full = dfixed_const(1000);
2404 		b.full = dfixed_const(mode->clock);
2405 		b.full = dfixed_div(b, a);
2406 		c.full = dfixed_const(latency_watermark_a);
2407 		c.full = dfixed_mul(c, b);
2408 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2409 		c.full = dfixed_div(c, a);
2410 		a.full = dfixed_const(16);
2411 		c.full = dfixed_div(c, a);
2412 		priority_a_mark = dfixed_trunc(c);
2413 		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
2414 
2415 		a.full = dfixed_const(1000);
2416 		b.full = dfixed_const(mode->clock);
2417 		b.full = dfixed_div(b, a);
2418 		c.full = dfixed_const(latency_watermark_b);
2419 		c.full = dfixed_mul(c, b);
2420 		c.full = dfixed_mul(c, radeon_crtc->hsc);
2421 		c.full = dfixed_div(c, a);
2422 		a.full = dfixed_const(16);
2423 		c.full = dfixed_div(c, a);
2424 		priority_b_mark = dfixed_trunc(c);
2425 		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
2426 
2427 		/* Save number of lines the linebuffer leads before the scanout */
2428 		radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
2429 	}
2430 
2431 	/* select wm A */
2432 	arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2433 	tmp = arb_control3;
2434 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2435 	tmp |= LATENCY_WATERMARK_MASK(1);
2436 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2437 	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2438 	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
2439 		LATENCY_HIGH_WATERMARK(line_time)));
2440 	/* select wm B */
2441 	tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
2442 	tmp &= ~LATENCY_WATERMARK_MASK(3);
2443 	tmp |= LATENCY_WATERMARK_MASK(2);
2444 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
2445 	WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
2446 	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
2447 		LATENCY_HIGH_WATERMARK(line_time)));
2448 	/* restore original selection */
2449 	WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
2450 
2451 	/* write the priority marks */
2452 	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
2453 	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
2454 
2455 	/* save values for DPM */
2456 	radeon_crtc->line_time = line_time;
2457 	radeon_crtc->wm_high = latency_watermark_a;
2458 	radeon_crtc->wm_low = latency_watermark_b;
2459 }
2460 
2461 void dce6_bandwidth_update(struct radeon_device *rdev)
2462 {
2463 	struct drm_display_mode *mode0 = NULL;
2464 	struct drm_display_mode *mode1 = NULL;
2465 	u32 num_heads = 0, lb_size;
2466 	int i;
2467 
2468 	if (!rdev->mode_info.mode_config_initialized)
2469 		return;
2470 
2471 	radeon_update_display_priority(rdev);
2472 
2473 	for (i = 0; i < rdev->num_crtc; i++) {
2474 		if (rdev->mode_info.crtcs[i]->base.enabled)
2475 			num_heads++;
2476 	}
2477 	for (i = 0; i < rdev->num_crtc; i += 2) {
2478 		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
2479 		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
2480 		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
2481 		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
2482 		lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
2483 		dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
2484 	}
2485 }
2486 
2487 /*
2488  * Core functions
2489  */
2490 static void si_tiling_mode_table_init(struct radeon_device *rdev)
2491 {
2492 	u32 *tile = rdev->config.si.tile_mode_array;
2493 	const u32 num_tile_mode_states =
2494 			ARRAY_SIZE(rdev->config.si.tile_mode_array);
2495 	u32 reg_offset, split_equal_to_row_size;
2496 
2497 	switch (rdev->config.si.mem_row_size_in_kb) {
2498 	case 1:
2499 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
2500 		break;
2501 	case 2:
2502 	default:
2503 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
2504 		break;
2505 	case 4:
2506 		split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
2507 		break;
2508 	}
2509 
2510 	for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2511 		tile[reg_offset] = 0;
2512 
2513 	switch(rdev->family) {
2514 	case CHIP_TAHITI:
2515 	case CHIP_PITCAIRN:
2516 		/* non-AA compressed depth or any compressed stencil */
2517 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2518 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2519 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2520 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2521 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2522 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2523 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2524 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2525 		/* 2xAA/4xAA compressed depth only */
2526 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2527 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2528 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2529 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2530 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2531 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2532 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2533 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2534 		/* 8xAA compressed depth only */
2535 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2536 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2537 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2538 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2539 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2540 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2541 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2542 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2543 		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2544 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2545 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2546 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2547 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2548 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2549 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2550 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2551 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2552 		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2553 		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2554 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2555 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2556 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2557 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2558 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2559 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2560 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2561 		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2562 		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2563 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2564 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2565 			   TILE_SPLIT(split_equal_to_row_size) |
2566 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2567 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2568 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2569 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2570 		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2571 		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2572 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2573 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2574 			   TILE_SPLIT(split_equal_to_row_size) |
2575 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2576 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2577 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2578 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2579 		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2580 		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2581 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2582 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2583 			   TILE_SPLIT(split_equal_to_row_size) |
2584 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2585 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2586 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2587 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2588 		/* 1D and 1D Array Surfaces */
2589 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2590 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2591 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2592 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2593 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2594 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2595 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2596 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2597 		/* Displayable maps. */
2598 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2599 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2600 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2601 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2602 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2603 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2604 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2605 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2606 		/* Display 8bpp. */
2607 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2608 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2609 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2610 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2611 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2612 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2613 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2614 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2615 		/* Display 16bpp. */
2616 		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2617 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2618 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2619 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2620 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2621 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2622 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2623 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2624 		/* Display 32bpp. */
2625 		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2626 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2627 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2628 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2629 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2630 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2631 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2632 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2633 		/* Thin. */
2634 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2635 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2636 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2637 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2638 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2639 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2640 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2641 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2642 		/* Thin 8 bpp. */
2643 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2644 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2645 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2646 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2647 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2648 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2649 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2650 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2651 		/* Thin 16 bpp. */
2652 		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2653 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2654 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2655 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2656 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2657 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2658 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2659 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2660 		/* Thin 32 bpp. */
2661 		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2662 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2663 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2664 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2665 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2666 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2667 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2668 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2669 		/* Thin 64 bpp. */
2670 		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2671 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2672 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2673 			   TILE_SPLIT(split_equal_to_row_size) |
2674 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2675 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2676 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2677 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2678 		/* 8 bpp PRT. */
2679 		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2680 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2681 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2682 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2683 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2684 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2685 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2686 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2687 		/* 16 bpp PRT */
2688 		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2689 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2690 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2691 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2692 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2693 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2694 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2695 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2696 		/* 32 bpp PRT */
2697 		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2698 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2699 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2700 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2701 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2702 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2703 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2704 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2705 		/* 64 bpp PRT */
2706 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2707 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2708 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2709 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2710 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2711 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2712 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2713 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2714 		/* 128 bpp PRT */
2715 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2716 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2717 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2718 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2719 			   NUM_BANKS(ADDR_SURF_8_BANK) |
2720 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2721 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2722 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2723 
2724 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2725 			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2726 		break;
2727 
2728 	case CHIP_VERDE:
2729 	case CHIP_OLAND:
2730 	case CHIP_HAINAN:
2731 		/* non-AA compressed depth or any compressed stencil */
2732 		tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2733 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2734 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2735 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2736 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2737 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2738 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2739 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2740 		/* 2xAA/4xAA compressed depth only */
2741 		tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2742 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2743 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2744 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2745 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2746 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2747 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2748 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2749 		/* 8xAA compressed depth only */
2750 		tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2751 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2752 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2753 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2754 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2755 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2756 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2757 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2758 		/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
2759 		tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2760 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2761 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2762 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
2763 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2764 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2765 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2766 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2767 		/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
2768 		tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2769 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2770 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2771 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2772 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2773 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2774 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2775 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2776 		/* Uncompressed 16bpp depth - and stencil buffer allocated with it */
2777 		tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2778 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2779 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2780 			   TILE_SPLIT(split_equal_to_row_size) |
2781 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2782 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2783 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2784 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2785 		/* Uncompressed 32bpp depth - and stencil buffer allocated with it */
2786 		tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2787 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2788 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2789 			   TILE_SPLIT(split_equal_to_row_size) |
2790 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2791 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2792 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2793 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2794 		/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
2795 		tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2796 			   MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
2797 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2798 			   TILE_SPLIT(split_equal_to_row_size) |
2799 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2800 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2801 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2802 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2803 		/* 1D and 1D Array Surfaces */
2804 		tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
2805 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2806 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2807 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2808 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2809 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2810 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2811 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2812 		/* Displayable maps. */
2813 		tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2814 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2815 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2816 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2817 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2818 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2819 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2820 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2821 		/* Display 8bpp. */
2822 		tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2823 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2824 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2825 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2826 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2827 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2828 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2829 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2830 		/* Display 16bpp. */
2831 		tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2832 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2833 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2834 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2835 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2836 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2837 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2838 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2839 		/* Display 32bpp. */
2840 		tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2841 			   MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
2842 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2843 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2844 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2845 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2846 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2847 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2848 		/* Thin. */
2849 		tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
2850 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2851 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2852 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
2853 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2854 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2855 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2856 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2857 		/* Thin 8 bpp. */
2858 		tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2859 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2860 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2861 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2862 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2863 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2864 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2865 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2866 		/* Thin 16 bpp. */
2867 		tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2868 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2869 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2870 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2871 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2872 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2873 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2874 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2875 		/* Thin 32 bpp. */
2876 		tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2877 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2878 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2879 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2880 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2881 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2882 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2883 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2884 		/* Thin 64 bpp. */
2885 		tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2886 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2887 			   PIPE_CONFIG(ADDR_SURF_P4_8x16) |
2888 			   TILE_SPLIT(split_equal_to_row_size) |
2889 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2890 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2891 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2892 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2893 		/* 8 bpp PRT. */
2894 		tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2895 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2896 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2897 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2898 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2899 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
2900 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2901 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2902 		/* 16 bpp PRT */
2903 		tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2904 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2905 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2906 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2907 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2908 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2909 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
2910 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
2911 		/* 32 bpp PRT */
2912 		tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2913 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2914 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2915 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
2916 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2917 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2918 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
2919 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2920 		/* 64 bpp PRT */
2921 		tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2922 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2923 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2924 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
2925 			   NUM_BANKS(ADDR_SURF_16_BANK) |
2926 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2927 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2928 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
2929 		/* 128 bpp PRT */
2930 		tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
2931 			   MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
2932 			   PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
2933 			   TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
2934 			   NUM_BANKS(ADDR_SURF_8_BANK) |
2935 			   BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
2936 			   BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
2937 			   MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
2938 
2939 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
2940 			WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]);
2941 		break;
2942 
2943 	default:
2944 		DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
2945 	}
2946 }
2947 
2948 static void si_select_se_sh(struct radeon_device *rdev,
2949 			    u32 se_num, u32 sh_num)
2950 {
2951 	u32 data = INSTANCE_BROADCAST_WRITES;
2952 
2953 	if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
2954 		data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
2955 	else if (se_num == 0xffffffff)
2956 		data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
2957 	else if (sh_num == 0xffffffff)
2958 		data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
2959 	else
2960 		data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
2961 	WREG32(GRBM_GFX_INDEX, data);
2962 }
2963 
2964 static u32 si_create_bitmask(u32 bit_width)
2965 {
2966 	u32 i, mask = 0;
2967 
2968 	for (i = 0; i < bit_width; i++) {
2969 		mask <<= 1;
2970 		mask |= 1;
2971 	}
2972 	return mask;
2973 }
2974 
2975 static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
2976 {
2977 	u32 data, mask;
2978 
2979 	data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
2980 	if (data & 1)
2981 		data &= INACTIVE_CUS_MASK;
2982 	else
2983 		data = 0;
2984 	data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
2985 
2986 	data >>= INACTIVE_CUS_SHIFT;
2987 
2988 	mask = si_create_bitmask(cu_per_sh);
2989 
2990 	return ~data & mask;
2991 }
2992 
2993 static void si_setup_spi(struct radeon_device *rdev,
2994 			 u32 se_num, u32 sh_per_se,
2995 			 u32 cu_per_sh)
2996 {
2997 	int i, j, k;
2998 	u32 data, mask, active_cu;
2999 
3000 	for (i = 0; i < se_num; i++) {
3001 		for (j = 0; j < sh_per_se; j++) {
3002 			si_select_se_sh(rdev, i, j);
3003 			data = RREG32(SPI_STATIC_THREAD_MGMT_3);
3004 			active_cu = si_get_cu_enabled(rdev, cu_per_sh);
3005 
3006 			mask = 1;
3007 			for (k = 0; k < 16; k++) {
3008 				mask <<= k;
3009 				if (active_cu & mask) {
3010 					data &= ~mask;
3011 					WREG32(SPI_STATIC_THREAD_MGMT_3, data);
3012 					break;
3013 				}
3014 			}
3015 		}
3016 	}
3017 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3018 }
3019 
3020 static u32 si_get_rb_disabled(struct radeon_device *rdev,
3021 			      u32 max_rb_num_per_se,
3022 			      u32 sh_per_se)
3023 {
3024 	u32 data, mask;
3025 
3026 	data = RREG32(CC_RB_BACKEND_DISABLE);
3027 	if (data & 1)
3028 		data &= BACKEND_DISABLE_MASK;
3029 	else
3030 		data = 0;
3031 	data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
3032 
3033 	data >>= BACKEND_DISABLE_SHIFT;
3034 
3035 	mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
3036 
3037 	return data & mask;
3038 }
3039 
3040 static void si_setup_rb(struct radeon_device *rdev,
3041 			u32 se_num, u32 sh_per_se,
3042 			u32 max_rb_num_per_se)
3043 {
3044 	int i, j;
3045 	u32 data, mask;
3046 	u32 disabled_rbs = 0;
3047 	u32 enabled_rbs = 0;
3048 
3049 	for (i = 0; i < se_num; i++) {
3050 		for (j = 0; j < sh_per_se; j++) {
3051 			si_select_se_sh(rdev, i, j);
3052 			data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
3053 			disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
3054 		}
3055 	}
3056 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3057 
3058 	mask = 1;
3059 	for (i = 0; i < max_rb_num_per_se * se_num; i++) {
3060 		if (!(disabled_rbs & mask))
3061 			enabled_rbs |= mask;
3062 		mask <<= 1;
3063 	}
3064 
3065 	rdev->config.si.backend_enable_mask = enabled_rbs;
3066 
3067 	for (i = 0; i < se_num; i++) {
3068 		si_select_se_sh(rdev, i, 0xffffffff);
3069 		data = 0;
3070 		for (j = 0; j < sh_per_se; j++) {
3071 			switch (enabled_rbs & 3) {
3072 			case 1:
3073 				data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
3074 				break;
3075 			case 2:
3076 				data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
3077 				break;
3078 			case 3:
3079 			default:
3080 				data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
3081 				break;
3082 			}
3083 			enabled_rbs >>= 2;
3084 		}
3085 		WREG32(PA_SC_RASTER_CONFIG, data);
3086 	}
3087 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3088 }
3089 
3090 static void si_gpu_init(struct radeon_device *rdev)
3091 {
3092 	u32 gb_addr_config = 0;
3093 	u32 mc_shared_chmap, mc_arb_ramcfg;
3094 	u32 sx_debug_1;
3095 	u32 hdp_host_path_cntl;
3096 	u32 tmp;
3097 	int i, j;
3098 
3099 	switch (rdev->family) {
3100 	case CHIP_TAHITI:
3101 		rdev->config.si.max_shader_engines = 2;
3102 		rdev->config.si.max_tile_pipes = 12;
3103 		rdev->config.si.max_cu_per_sh = 8;
3104 		rdev->config.si.max_sh_per_se = 2;
3105 		rdev->config.si.max_backends_per_se = 4;
3106 		rdev->config.si.max_texture_channel_caches = 12;
3107 		rdev->config.si.max_gprs = 256;
3108 		rdev->config.si.max_gs_threads = 32;
3109 		rdev->config.si.max_hw_contexts = 8;
3110 
3111 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3112 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3113 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3114 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3115 		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3116 		break;
3117 	case CHIP_PITCAIRN:
3118 		rdev->config.si.max_shader_engines = 2;
3119 		rdev->config.si.max_tile_pipes = 8;
3120 		rdev->config.si.max_cu_per_sh = 5;
3121 		rdev->config.si.max_sh_per_se = 2;
3122 		rdev->config.si.max_backends_per_se = 4;
3123 		rdev->config.si.max_texture_channel_caches = 8;
3124 		rdev->config.si.max_gprs = 256;
3125 		rdev->config.si.max_gs_threads = 32;
3126 		rdev->config.si.max_hw_contexts = 8;
3127 
3128 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3129 		rdev->config.si.sc_prim_fifo_size_backend = 0x100;
3130 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3131 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3132 		gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
3133 		break;
3134 	case CHIP_VERDE:
3135 	default:
3136 		rdev->config.si.max_shader_engines = 1;
3137 		rdev->config.si.max_tile_pipes = 4;
3138 		rdev->config.si.max_cu_per_sh = 5;
3139 		rdev->config.si.max_sh_per_se = 2;
3140 		rdev->config.si.max_backends_per_se = 4;
3141 		rdev->config.si.max_texture_channel_caches = 4;
3142 		rdev->config.si.max_gprs = 256;
3143 		rdev->config.si.max_gs_threads = 32;
3144 		rdev->config.si.max_hw_contexts = 8;
3145 
3146 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3147 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3148 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3149 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3150 		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3151 		break;
3152 	case CHIP_OLAND:
3153 		rdev->config.si.max_shader_engines = 1;
3154 		rdev->config.si.max_tile_pipes = 4;
3155 		rdev->config.si.max_cu_per_sh = 6;
3156 		rdev->config.si.max_sh_per_se = 1;
3157 		rdev->config.si.max_backends_per_se = 2;
3158 		rdev->config.si.max_texture_channel_caches = 4;
3159 		rdev->config.si.max_gprs = 256;
3160 		rdev->config.si.max_gs_threads = 16;
3161 		rdev->config.si.max_hw_contexts = 8;
3162 
3163 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3164 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3165 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3166 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3167 		gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
3168 		break;
3169 	case CHIP_HAINAN:
3170 		rdev->config.si.max_shader_engines = 1;
3171 		rdev->config.si.max_tile_pipes = 4;
3172 		rdev->config.si.max_cu_per_sh = 5;
3173 		rdev->config.si.max_sh_per_se = 1;
3174 		rdev->config.si.max_backends_per_se = 1;
3175 		rdev->config.si.max_texture_channel_caches = 2;
3176 		rdev->config.si.max_gprs = 256;
3177 		rdev->config.si.max_gs_threads = 16;
3178 		rdev->config.si.max_hw_contexts = 8;
3179 
3180 		rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
3181 		rdev->config.si.sc_prim_fifo_size_backend = 0x40;
3182 		rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
3183 		rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
3184 		gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
3185 		break;
3186 	}
3187 
3188 	/* Initialize HDP */
3189 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
3190 		WREG32((0x2c14 + j), 0x00000000);
3191 		WREG32((0x2c18 + j), 0x00000000);
3192 		WREG32((0x2c1c + j), 0x00000000);
3193 		WREG32((0x2c20 + j), 0x00000000);
3194 		WREG32((0x2c24 + j), 0x00000000);
3195 	}
3196 
3197 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
3198 	WREG32(SRBM_INT_CNTL, 1);
3199 	WREG32(SRBM_INT_ACK, 1);
3200 
3201 	evergreen_fix_pci_max_read_req_size(rdev);
3202 
3203 	WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
3204 
3205 	mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
3206 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
3207 
3208 	rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
3209 	rdev->config.si.mem_max_burst_length_bytes = 256;
3210 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
3211 	rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
3212 	if (rdev->config.si.mem_row_size_in_kb > 4)
3213 		rdev->config.si.mem_row_size_in_kb = 4;
3214 	/* XXX use MC settings? */
3215 	rdev->config.si.shader_engine_tile_size = 32;
3216 	rdev->config.si.num_gpus = 1;
3217 	rdev->config.si.multi_gpu_tile_size = 64;
3218 
3219 	/* fix up row size */
3220 	gb_addr_config &= ~ROW_SIZE_MASK;
3221 	switch (rdev->config.si.mem_row_size_in_kb) {
3222 	case 1:
3223 	default:
3224 		gb_addr_config |= ROW_SIZE(0);
3225 		break;
3226 	case 2:
3227 		gb_addr_config |= ROW_SIZE(1);
3228 		break;
3229 	case 4:
3230 		gb_addr_config |= ROW_SIZE(2);
3231 		break;
3232 	}
3233 
3234 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
3235 	 * not have bank info, so create a custom tiling dword.
3236 	 * bits 3:0   num_pipes
3237 	 * bits 7:4   num_banks
3238 	 * bits 11:8  group_size
3239 	 * bits 15:12 row_size
3240 	 */
3241 	rdev->config.si.tile_config = 0;
3242 	switch (rdev->config.si.num_tile_pipes) {
3243 	case 1:
3244 		rdev->config.si.tile_config |= (0 << 0);
3245 		break;
3246 	case 2:
3247 		rdev->config.si.tile_config |= (1 << 0);
3248 		break;
3249 	case 4:
3250 		rdev->config.si.tile_config |= (2 << 0);
3251 		break;
3252 	case 8:
3253 	default:
3254 		/* XXX what about 12? */
3255 		rdev->config.si.tile_config |= (3 << 0);
3256 		break;
3257 	}
3258 	switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
3259 	case 0: /* four banks */
3260 		rdev->config.si.tile_config |= 0 << 4;
3261 		break;
3262 	case 1: /* eight banks */
3263 		rdev->config.si.tile_config |= 1 << 4;
3264 		break;
3265 	case 2: /* sixteen banks */
3266 	default:
3267 		rdev->config.si.tile_config |= 2 << 4;
3268 		break;
3269 	}
3270 	rdev->config.si.tile_config |=
3271 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
3272 	rdev->config.si.tile_config |=
3273 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
3274 
3275 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
3276 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
3277 	WREG32(DMIF_ADDR_CALC, gb_addr_config);
3278 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
3279 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
3280 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
3281 	if (rdev->has_uvd) {
3282 		WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
3283 		WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
3284 		WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
3285 	}
3286 
3287 	si_tiling_mode_table_init(rdev);
3288 
3289 	si_setup_rb(rdev, rdev->config.si.max_shader_engines,
3290 		    rdev->config.si.max_sh_per_se,
3291 		    rdev->config.si.max_backends_per_se);
3292 
3293 	si_setup_spi(rdev, rdev->config.si.max_shader_engines,
3294 		     rdev->config.si.max_sh_per_se,
3295 		     rdev->config.si.max_cu_per_sh);
3296 
3297 	rdev->config.si.active_cus = 0;
3298 	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
3299 		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
3300 			rdev->config.si.active_cus +=
3301 				hweight32(si_get_cu_active_bitmap(rdev, i, j));
3302 		}
3303 	}
3304 
3305 	/* set HW defaults for 3D engine */
3306 	WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
3307 				     ROQ_IB2_START(0x2b)));
3308 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3309 
3310 	sx_debug_1 = RREG32(SX_DEBUG_1);
3311 	WREG32(SX_DEBUG_1, sx_debug_1);
3312 
3313 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
3314 
3315 	WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
3316 				 SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
3317 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
3318 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
3319 
3320 	WREG32(VGT_NUM_INSTANCES, 1);
3321 
3322 	WREG32(CP_PERFMON_CNTL, 0);
3323 
3324 	WREG32(SQ_CONFIG, 0);
3325 
3326 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
3327 					  FORCE_EOV_MAX_REZ_CNT(255)));
3328 
3329 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
3330 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
3331 
3332 	WREG32(VGT_GS_VERTEX_REUSE, 16);
3333 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
3334 
3335 	WREG32(CB_PERFCOUNTER0_SELECT0, 0);
3336 	WREG32(CB_PERFCOUNTER0_SELECT1, 0);
3337 	WREG32(CB_PERFCOUNTER1_SELECT0, 0);
3338 	WREG32(CB_PERFCOUNTER1_SELECT1, 0);
3339 	WREG32(CB_PERFCOUNTER2_SELECT0, 0);
3340 	WREG32(CB_PERFCOUNTER2_SELECT1, 0);
3341 	WREG32(CB_PERFCOUNTER3_SELECT0, 0);
3342 	WREG32(CB_PERFCOUNTER3_SELECT1, 0);
3343 
3344 	tmp = RREG32(HDP_MISC_CNTL);
3345 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
3346 	WREG32(HDP_MISC_CNTL, tmp);
3347 
3348 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
3349 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
3350 
3351 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3352 
3353 	udelay(50);
3354 }
3355 
3356 /*
3357  * GPU scratch registers helpers function.
3358  */
3359 static void si_scratch_init(struct radeon_device *rdev)
3360 {
3361 	int i;
3362 
3363 	rdev->scratch.num_reg = 7;
3364 	rdev->scratch.reg_base = SCRATCH_REG0;
3365 	for (i = 0; i < rdev->scratch.num_reg; i++) {
3366 		rdev->scratch.free[i] = true;
3367 		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
3368 	}
3369 }
3370 
3371 void si_fence_ring_emit(struct radeon_device *rdev,
3372 			struct radeon_fence *fence)
3373 {
3374 	struct radeon_ring *ring = &rdev->ring[fence->ring];
3375 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3376 
3377 	/* flush read cache over gart */
3378 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3379 	radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3380 	radeon_ring_write(ring, 0);
3381 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3382 	radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3383 			  PACKET3_TC_ACTION_ENA |
3384 			  PACKET3_SH_KCACHE_ACTION_ENA |
3385 			  PACKET3_SH_ICACHE_ACTION_ENA);
3386 	radeon_ring_write(ring, 0xFFFFFFFF);
3387 	radeon_ring_write(ring, 0);
3388 	radeon_ring_write(ring, 10); /* poll interval */
3389 	/* EVENT_WRITE_EOP - flush caches, send int */
3390 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
3391 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
3392 	radeon_ring_write(ring, lower_32_bits(addr));
3393 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
3394 	radeon_ring_write(ring, fence->seq);
3395 	radeon_ring_write(ring, 0);
3396 }
3397 
3398 /*
3399  * IB stuff
3400  */
3401 void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3402 {
3403 	struct radeon_ring *ring = &rdev->ring[ib->ring];
3404 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3405 	u32 header;
3406 
3407 	if (ib->is_const_ib) {
3408 		/* set switch buffer packet before const IB */
3409 		radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3410 		radeon_ring_write(ring, 0);
3411 
3412 		header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
3413 	} else {
3414 		u32 next_rptr;
3415 		if (ring->rptr_save_reg) {
3416 			next_rptr = ring->wptr + 3 + 4 + 8;
3417 			radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3418 			radeon_ring_write(ring, ((ring->rptr_save_reg -
3419 						  PACKET3_SET_CONFIG_REG_START) >> 2));
3420 			radeon_ring_write(ring, next_rptr);
3421 		} else if (rdev->wb.enabled) {
3422 			next_rptr = ring->wptr + 5 + 4 + 8;
3423 			radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3424 			radeon_ring_write(ring, (1 << 8));
3425 			radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3426 			radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
3427 			radeon_ring_write(ring, next_rptr);
3428 		}
3429 
3430 		header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3431 	}
3432 
3433 	radeon_ring_write(ring, header);
3434 	radeon_ring_write(ring,
3435 #ifdef __BIG_ENDIAN
3436 			  (2 << 0) |
3437 #endif
3438 			  (ib->gpu_addr & 0xFFFFFFFC));
3439 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3440 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3441 
3442 	if (!ib->is_const_ib) {
3443 		/* flush read cache over gart for this vmid */
3444 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3445 		radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3446 		radeon_ring_write(ring, vm_id);
3447 		radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3448 		radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3449 				  PACKET3_TC_ACTION_ENA |
3450 				  PACKET3_SH_KCACHE_ACTION_ENA |
3451 				  PACKET3_SH_ICACHE_ACTION_ENA);
3452 		radeon_ring_write(ring, 0xFFFFFFFF);
3453 		radeon_ring_write(ring, 0);
3454 		radeon_ring_write(ring, 10); /* poll interval */
3455 	}
3456 }
3457 
3458 /*
3459  * CP.
3460  */
3461 static void si_cp_enable(struct radeon_device *rdev, bool enable)
3462 {
3463 	if (enable)
3464 		WREG32(CP_ME_CNTL, 0);
3465 	else {
3466 		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3467 			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
3468 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
3469 		WREG32(SCRATCH_UMSK, 0);
3470 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3471 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3472 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3473 	}
3474 	udelay(50);
3475 }
3476 
3477 static int si_cp_load_microcode(struct radeon_device *rdev)
3478 {
3479 	int i;
3480 
3481 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
3482 		return -EINVAL;
3483 
3484 	si_cp_enable(rdev, false);
3485 
3486 	if (rdev->new_fw) {
3487 		const struct gfx_firmware_header_v1_0 *pfp_hdr =
3488 			(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
3489 		const struct gfx_firmware_header_v1_0 *ce_hdr =
3490 			(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
3491 		const struct gfx_firmware_header_v1_0 *me_hdr =
3492 			(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
3493 		const __le32 *fw_data;
3494 		u32 fw_size;
3495 
3496 		radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
3497 		radeon_ucode_print_gfx_hdr(&ce_hdr->header);
3498 		radeon_ucode_print_gfx_hdr(&me_hdr->header);
3499 
3500 		/* PFP */
3501 		fw_data = (const __le32 *)
3502 			(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
3503 		fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
3504 		WREG32(CP_PFP_UCODE_ADDR, 0);
3505 		for (i = 0; i < fw_size; i++)
3506 			WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
3507 		WREG32(CP_PFP_UCODE_ADDR, 0);
3508 
3509 		/* CE */
3510 		fw_data = (const __le32 *)
3511 			(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
3512 		fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
3513 		WREG32(CP_CE_UCODE_ADDR, 0);
3514 		for (i = 0; i < fw_size; i++)
3515 			WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
3516 		WREG32(CP_CE_UCODE_ADDR, 0);
3517 
3518 		/* ME */
3519 		fw_data = (const __be32 *)
3520 			(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
3521 		fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
3522 		WREG32(CP_ME_RAM_WADDR, 0);
3523 		for (i = 0; i < fw_size; i++)
3524 			WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
3525 		WREG32(CP_ME_RAM_WADDR, 0);
3526 	} else {
3527 		const __be32 *fw_data;
3528 
3529 		/* PFP */
3530 		fw_data = (const __be32 *)rdev->pfp_fw->data;
3531 		WREG32(CP_PFP_UCODE_ADDR, 0);
3532 		for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
3533 			WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
3534 		WREG32(CP_PFP_UCODE_ADDR, 0);
3535 
3536 		/* CE */
3537 		fw_data = (const __be32 *)rdev->ce_fw->data;
3538 		WREG32(CP_CE_UCODE_ADDR, 0);
3539 		for (i = 0; i < SI_CE_UCODE_SIZE; i++)
3540 			WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
3541 		WREG32(CP_CE_UCODE_ADDR, 0);
3542 
3543 		/* ME */
3544 		fw_data = (const __be32 *)rdev->me_fw->data;
3545 		WREG32(CP_ME_RAM_WADDR, 0);
3546 		for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
3547 			WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
3548 		WREG32(CP_ME_RAM_WADDR, 0);
3549 	}
3550 
3551 	WREG32(CP_PFP_UCODE_ADDR, 0);
3552 	WREG32(CP_CE_UCODE_ADDR, 0);
3553 	WREG32(CP_ME_RAM_WADDR, 0);
3554 	WREG32(CP_ME_RAM_RADDR, 0);
3555 	return 0;
3556 }
3557 
3558 static int si_cp_start(struct radeon_device *rdev)
3559 {
3560 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3561 	int r, i;
3562 
3563 	r = radeon_ring_lock(rdev, ring, 7 + 4);
3564 	if (r) {
3565 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3566 		return r;
3567 	}
3568 	/* init the CP */
3569 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
3570 	radeon_ring_write(ring, 0x1);
3571 	radeon_ring_write(ring, 0x0);
3572 	radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
3573 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
3574 	radeon_ring_write(ring, 0);
3575 	radeon_ring_write(ring, 0);
3576 
3577 	/* init the CE partitions */
3578 	radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
3579 	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
3580 	radeon_ring_write(ring, 0xc000);
3581 	radeon_ring_write(ring, 0xe000);
3582 	radeon_ring_unlock_commit(rdev, ring, false);
3583 
3584 	si_cp_enable(rdev, true);
3585 
3586 	r = radeon_ring_lock(rdev, ring, si_default_size + 10);
3587 	if (r) {
3588 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3589 		return r;
3590 	}
3591 
3592 	/* setup clear context state */
3593 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3594 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
3595 
3596 	for (i = 0; i < si_default_size; i++)
3597 		radeon_ring_write(ring, si_default_state[i]);
3598 
3599 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
3600 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
3601 
3602 	/* set clear context state */
3603 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
3604 	radeon_ring_write(ring, 0);
3605 
3606 	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
3607 	radeon_ring_write(ring, 0x00000316);
3608 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
3609 	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
3610 
3611 	radeon_ring_unlock_commit(rdev, ring, false);
3612 
3613 	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
3614 		ring = &rdev->ring[i];
3615 		r = radeon_ring_lock(rdev, ring, 2);
3616 
3617 		/* clear the compute context state */
3618 		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
3619 		radeon_ring_write(ring, 0);
3620 
3621 		radeon_ring_unlock_commit(rdev, ring, false);
3622 	}
3623 
3624 	return 0;
3625 }
3626 
3627 static void si_cp_fini(struct radeon_device *rdev)
3628 {
3629 	struct radeon_ring *ring;
3630 	si_cp_enable(rdev, false);
3631 
3632 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3633 	radeon_ring_fini(rdev, ring);
3634 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3635 
3636 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3637 	radeon_ring_fini(rdev, ring);
3638 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3639 
3640 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3641 	radeon_ring_fini(rdev, ring);
3642 	radeon_scratch_free(rdev, ring->rptr_save_reg);
3643 }
3644 
3645 static int si_cp_resume(struct radeon_device *rdev)
3646 {
3647 	struct radeon_ring *ring;
3648 	u32 tmp;
3649 	u32 rb_bufsz;
3650 	int r;
3651 
3652 	si_enable_gui_idle_interrupt(rdev, false);
3653 
3654 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
3655 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
3656 
3657 	/* Set the write pointer delay */
3658 	WREG32(CP_RB_WPTR_DELAY, 0);
3659 
3660 	WREG32(CP_DEBUG, 0);
3661 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
3662 
3663 	/* ring 0 - compute and gfx */
3664 	/* Set ring buffer size */
3665 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3666 	rb_bufsz = order_base_2(ring->ring_size / 8);
3667 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3668 #ifdef __BIG_ENDIAN
3669 	tmp |= BUF_SWAP_32BIT;
3670 #endif
3671 	WREG32(CP_RB0_CNTL, tmp);
3672 
3673 	/* Initialize the ring buffer's read and write pointers */
3674 	WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
3675 	ring->wptr = 0;
3676 	WREG32(CP_RB0_WPTR, ring->wptr);
3677 
3678 	/* set the wb address whether it's enabled or not */
3679 	WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
3680 	WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
3681 
3682 	if (rdev->wb.enabled)
3683 		WREG32(SCRATCH_UMSK, 0xff);
3684 	else {
3685 		tmp |= RB_NO_UPDATE;
3686 		WREG32(SCRATCH_UMSK, 0);
3687 	}
3688 
3689 	mdelay(1);
3690 	WREG32(CP_RB0_CNTL, tmp);
3691 
3692 	WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
3693 
3694 	/* ring1  - compute only */
3695 	/* Set ring buffer size */
3696 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
3697 	rb_bufsz = order_base_2(ring->ring_size / 8);
3698 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3699 #ifdef __BIG_ENDIAN
3700 	tmp |= BUF_SWAP_32BIT;
3701 #endif
3702 	WREG32(CP_RB1_CNTL, tmp);
3703 
3704 	/* Initialize the ring buffer's read and write pointers */
3705 	WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
3706 	ring->wptr = 0;
3707 	WREG32(CP_RB1_WPTR, ring->wptr);
3708 
3709 	/* set the wb address whether it's enabled or not */
3710 	WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
3711 	WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
3712 
3713 	mdelay(1);
3714 	WREG32(CP_RB1_CNTL, tmp);
3715 
3716 	WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
3717 
3718 	/* ring2 - compute only */
3719 	/* Set ring buffer size */
3720 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
3721 	rb_bufsz = order_base_2(ring->ring_size / 8);
3722 	tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
3723 #ifdef __BIG_ENDIAN
3724 	tmp |= BUF_SWAP_32BIT;
3725 #endif
3726 	WREG32(CP_RB2_CNTL, tmp);
3727 
3728 	/* Initialize the ring buffer's read and write pointers */
3729 	WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
3730 	ring->wptr = 0;
3731 	WREG32(CP_RB2_WPTR, ring->wptr);
3732 
3733 	/* set the wb address whether it's enabled or not */
3734 	WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
3735 	WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
3736 
3737 	mdelay(1);
3738 	WREG32(CP_RB2_CNTL, tmp);
3739 
3740 	WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
3741 
3742 	/* start the rings */
3743 	si_cp_start(rdev);
3744 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
3745 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
3746 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
3747 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
3748 	if (r) {
3749 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3750 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3751 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3752 		return r;
3753 	}
3754 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
3755 	if (r) {
3756 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3757 	}
3758 	r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
3759 	if (r) {
3760 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3761 	}
3762 
3763 	si_enable_gui_idle_interrupt(rdev, true);
3764 
3765 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
3766 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
3767 
3768 	return 0;
3769 }
3770 
3771 u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
3772 {
3773 	u32 reset_mask = 0;
3774 	u32 tmp;
3775 
3776 	/* GRBM_STATUS */
3777 	tmp = RREG32(GRBM_STATUS);
3778 	if (tmp & (PA_BUSY | SC_BUSY |
3779 		   BCI_BUSY | SX_BUSY |
3780 		   TA_BUSY | VGT_BUSY |
3781 		   DB_BUSY | CB_BUSY |
3782 		   GDS_BUSY | SPI_BUSY |
3783 		   IA_BUSY | IA_BUSY_NO_DMA))
3784 		reset_mask |= RADEON_RESET_GFX;
3785 
3786 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
3787 		   CP_BUSY | CP_COHERENCY_BUSY))
3788 		reset_mask |= RADEON_RESET_CP;
3789 
3790 	if (tmp & GRBM_EE_BUSY)
3791 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
3792 
3793 	/* GRBM_STATUS2 */
3794 	tmp = RREG32(GRBM_STATUS2);
3795 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
3796 		reset_mask |= RADEON_RESET_RLC;
3797 
3798 	/* DMA_STATUS_REG 0 */
3799 	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
3800 	if (!(tmp & DMA_IDLE))
3801 		reset_mask |= RADEON_RESET_DMA;
3802 
3803 	/* DMA_STATUS_REG 1 */
3804 	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
3805 	if (!(tmp & DMA_IDLE))
3806 		reset_mask |= RADEON_RESET_DMA1;
3807 
3808 	/* SRBM_STATUS2 */
3809 	tmp = RREG32(SRBM_STATUS2);
3810 	if (tmp & DMA_BUSY)
3811 		reset_mask |= RADEON_RESET_DMA;
3812 
3813 	if (tmp & DMA1_BUSY)
3814 		reset_mask |= RADEON_RESET_DMA1;
3815 
3816 	/* SRBM_STATUS */
3817 	tmp = RREG32(SRBM_STATUS);
3818 
3819 	if (tmp & IH_BUSY)
3820 		reset_mask |= RADEON_RESET_IH;
3821 
3822 	if (tmp & SEM_BUSY)
3823 		reset_mask |= RADEON_RESET_SEM;
3824 
3825 	if (tmp & GRBM_RQ_PENDING)
3826 		reset_mask |= RADEON_RESET_GRBM;
3827 
3828 	if (tmp & VMC_BUSY)
3829 		reset_mask |= RADEON_RESET_VMC;
3830 
3831 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
3832 		   MCC_BUSY | MCD_BUSY))
3833 		reset_mask |= RADEON_RESET_MC;
3834 
3835 	if (evergreen_is_display_hung(rdev))
3836 		reset_mask |= RADEON_RESET_DISPLAY;
3837 
3838 	/* VM_L2_STATUS */
3839 	tmp = RREG32(VM_L2_STATUS);
3840 	if (tmp & L2_BUSY)
3841 		reset_mask |= RADEON_RESET_VMC;
3842 
3843 	/* Skip MC reset as it's mostly likely not hung, just busy */
3844 	if (reset_mask & RADEON_RESET_MC) {
3845 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
3846 		reset_mask &= ~RADEON_RESET_MC;
3847 	}
3848 
3849 	return reset_mask;
3850 }
3851 
3852 static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
3853 {
3854 	struct evergreen_mc_save save;
3855 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
3856 	u32 tmp;
3857 
3858 	if (reset_mask == 0)
3859 		return;
3860 
3861 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
3862 
3863 	evergreen_print_gpu_status_regs(rdev);
3864 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
3865 		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3866 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3867 		 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3868 
3869 	/* disable PG/CG */
3870 	si_fini_pg(rdev);
3871 	si_fini_cg(rdev);
3872 
3873 	/* stop the rlc */
3874 	si_rlc_stop(rdev);
3875 
3876 	/* Disable CP parsing/prefetching */
3877 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
3878 
3879 	if (reset_mask & RADEON_RESET_DMA) {
3880 		/* dma0 */
3881 		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
3882 		tmp &= ~DMA_RB_ENABLE;
3883 		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
3884 	}
3885 	if (reset_mask & RADEON_RESET_DMA1) {
3886 		/* dma1 */
3887 		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
3888 		tmp &= ~DMA_RB_ENABLE;
3889 		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
3890 	}
3891 
3892 	udelay(50);
3893 
3894 	evergreen_mc_stop(rdev, &save);
3895 	if (evergreen_mc_wait_for_idle(rdev)) {
3896 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3897 	}
3898 
3899 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
3900 		grbm_soft_reset = SOFT_RESET_CB |
3901 			SOFT_RESET_DB |
3902 			SOFT_RESET_GDS |
3903 			SOFT_RESET_PA |
3904 			SOFT_RESET_SC |
3905 			SOFT_RESET_BCI |
3906 			SOFT_RESET_SPI |
3907 			SOFT_RESET_SX |
3908 			SOFT_RESET_TC |
3909 			SOFT_RESET_TA |
3910 			SOFT_RESET_VGT |
3911 			SOFT_RESET_IA;
3912 	}
3913 
3914 	if (reset_mask & RADEON_RESET_CP) {
3915 		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
3916 
3917 		srbm_soft_reset |= SOFT_RESET_GRBM;
3918 	}
3919 
3920 	if (reset_mask & RADEON_RESET_DMA)
3921 		srbm_soft_reset |= SOFT_RESET_DMA;
3922 
3923 	if (reset_mask & RADEON_RESET_DMA1)
3924 		srbm_soft_reset |= SOFT_RESET_DMA1;
3925 
3926 	if (reset_mask & RADEON_RESET_DISPLAY)
3927 		srbm_soft_reset |= SOFT_RESET_DC;
3928 
3929 	if (reset_mask & RADEON_RESET_RLC)
3930 		grbm_soft_reset |= SOFT_RESET_RLC;
3931 
3932 	if (reset_mask & RADEON_RESET_SEM)
3933 		srbm_soft_reset |= SOFT_RESET_SEM;
3934 
3935 	if (reset_mask & RADEON_RESET_IH)
3936 		srbm_soft_reset |= SOFT_RESET_IH;
3937 
3938 	if (reset_mask & RADEON_RESET_GRBM)
3939 		srbm_soft_reset |= SOFT_RESET_GRBM;
3940 
3941 	if (reset_mask & RADEON_RESET_VMC)
3942 		srbm_soft_reset |= SOFT_RESET_VMC;
3943 
3944 	if (reset_mask & RADEON_RESET_MC)
3945 		srbm_soft_reset |= SOFT_RESET_MC;
3946 
3947 	if (grbm_soft_reset) {
3948 		tmp = RREG32(GRBM_SOFT_RESET);
3949 		tmp |= grbm_soft_reset;
3950 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
3951 		WREG32(GRBM_SOFT_RESET, tmp);
3952 		tmp = RREG32(GRBM_SOFT_RESET);
3953 
3954 		udelay(50);
3955 
3956 		tmp &= ~grbm_soft_reset;
3957 		WREG32(GRBM_SOFT_RESET, tmp);
3958 		tmp = RREG32(GRBM_SOFT_RESET);
3959 	}
3960 
3961 	if (srbm_soft_reset) {
3962 		tmp = RREG32(SRBM_SOFT_RESET);
3963 		tmp |= srbm_soft_reset;
3964 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3965 		WREG32(SRBM_SOFT_RESET, tmp);
3966 		tmp = RREG32(SRBM_SOFT_RESET);
3967 
3968 		udelay(50);
3969 
3970 		tmp &= ~srbm_soft_reset;
3971 		WREG32(SRBM_SOFT_RESET, tmp);
3972 		tmp = RREG32(SRBM_SOFT_RESET);
3973 	}
3974 
3975 	/* Wait a little for things to settle down */
3976 	udelay(50);
3977 
3978 	evergreen_mc_resume(rdev, &save);
3979 	udelay(50);
3980 
3981 	evergreen_print_gpu_status_regs(rdev);
3982 }
3983 
3984 static void si_set_clk_bypass_mode(struct radeon_device *rdev)
3985 {
3986 	u32 tmp, i;
3987 
3988 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
3989 	tmp |= SPLL_BYPASS_EN;
3990 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
3991 
3992 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
3993 	tmp |= SPLL_CTLREQ_CHG;
3994 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
3995 
3996 	for (i = 0; i < rdev->usec_timeout; i++) {
3997 		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS)
3998 			break;
3999 		udelay(1);
4000 	}
4001 
4002 	tmp = RREG32(CG_SPLL_FUNC_CNTL_2);
4003 	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE);
4004 	WREG32(CG_SPLL_FUNC_CNTL_2, tmp);
4005 
4006 	tmp = RREG32(MPLL_CNTL_MODE);
4007 	tmp &= ~MPLL_MCLK_SEL;
4008 	WREG32(MPLL_CNTL_MODE, tmp);
4009 }
4010 
4011 static void si_spll_powerdown(struct radeon_device *rdev)
4012 {
4013 	u32 tmp;
4014 
4015 	tmp = RREG32(SPLL_CNTL_MODE);
4016 	tmp |= SPLL_SW_DIR_CONTROL;
4017 	WREG32(SPLL_CNTL_MODE, tmp);
4018 
4019 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
4020 	tmp |= SPLL_RESET;
4021 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
4022 
4023 	tmp = RREG32(CG_SPLL_FUNC_CNTL);
4024 	tmp |= SPLL_SLEEP;
4025 	WREG32(CG_SPLL_FUNC_CNTL, tmp);
4026 
4027 	tmp = RREG32(SPLL_CNTL_MODE);
4028 	tmp &= ~SPLL_SW_DIR_CONTROL;
4029 	WREG32(SPLL_CNTL_MODE, tmp);
4030 }
4031 
4032 static void si_gpu_pci_config_reset(struct radeon_device *rdev)
4033 {
4034 	struct evergreen_mc_save save;
4035 	u32 tmp, i;
4036 
4037 	dev_info(rdev->dev, "GPU pci config reset\n");
4038 
4039 	/* disable dpm? */
4040 
4041 	/* disable cg/pg */
4042 	si_fini_pg(rdev);
4043 	si_fini_cg(rdev);
4044 
4045 	/* Disable CP parsing/prefetching */
4046 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
4047 	/* dma0 */
4048 	tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
4049 	tmp &= ~DMA_RB_ENABLE;
4050 	WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
4051 	/* dma1 */
4052 	tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
4053 	tmp &= ~DMA_RB_ENABLE;
4054 	WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
4055 	/* XXX other engines? */
4056 
4057 	/* halt the rlc, disable cp internal ints */
4058 	si_rlc_stop(rdev);
4059 
4060 	udelay(50);
4061 
4062 	/* disable mem access */
4063 	evergreen_mc_stop(rdev, &save);
4064 	if (evergreen_mc_wait_for_idle(rdev)) {
4065 		dev_warn(rdev->dev, "Wait for MC idle timed out !\n");
4066 	}
4067 
4068 	/* set mclk/sclk to bypass */
4069 	si_set_clk_bypass_mode(rdev);
4070 	/* powerdown spll */
4071 	si_spll_powerdown(rdev);
4072 	/* disable BM */
4073 	pci_clear_master(rdev->pdev);
4074 	/* reset */
4075 	radeon_pci_config_reset(rdev);
4076 	/* wait for asic to come out of reset */
4077 	for (i = 0; i < rdev->usec_timeout; i++) {
4078 		if (RREG32(CONFIG_MEMSIZE) != 0xffffffff)
4079 			break;
4080 		udelay(1);
4081 	}
4082 }
4083 
4084 int si_asic_reset(struct radeon_device *rdev, bool hard)
4085 {
4086 	u32 reset_mask;
4087 
4088 	if (hard) {
4089 		si_gpu_pci_config_reset(rdev);
4090 		return 0;
4091 	}
4092 
4093 	reset_mask = si_gpu_check_soft_reset(rdev);
4094 
4095 	if (reset_mask)
4096 		r600_set_bios_scratch_engine_hung(rdev, true);
4097 
4098 	/* try soft reset */
4099 	si_gpu_soft_reset(rdev, reset_mask);
4100 
4101 	reset_mask = si_gpu_check_soft_reset(rdev);
4102 
4103 	/* try pci config reset */
4104 	if (reset_mask && radeon_hard_reset)
4105 		si_gpu_pci_config_reset(rdev);
4106 
4107 	reset_mask = si_gpu_check_soft_reset(rdev);
4108 
4109 	if (!reset_mask)
4110 		r600_set_bios_scratch_engine_hung(rdev, false);
4111 
4112 	return 0;
4113 }
4114 
4115 /**
4116  * si_gfx_is_lockup - Check if the GFX engine is locked up
4117  *
4118  * @rdev: radeon_device pointer
4119  * @ring: radeon_ring structure holding ring information
4120  *
4121  * Check if the GFX engine is locked up.
4122  * Returns true if the engine appears to be locked up, false if not.
4123  */
4124 bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
4125 {
4126 	u32 reset_mask = si_gpu_check_soft_reset(rdev);
4127 
4128 	if (!(reset_mask & (RADEON_RESET_GFX |
4129 			    RADEON_RESET_COMPUTE |
4130 			    RADEON_RESET_CP))) {
4131 		radeon_ring_lockup_update(rdev, ring);
4132 		return false;
4133 	}
4134 	return radeon_ring_test_lockup(rdev, ring);
4135 }
4136 
4137 /* MC */
4138 static void si_mc_program(struct radeon_device *rdev)
4139 {
4140 	struct evergreen_mc_save save;
4141 	u32 tmp;
4142 	int i, j;
4143 
4144 	/* Initialize HDP */
4145 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
4146 		WREG32((0x2c14 + j), 0x00000000);
4147 		WREG32((0x2c18 + j), 0x00000000);
4148 		WREG32((0x2c1c + j), 0x00000000);
4149 		WREG32((0x2c20 + j), 0x00000000);
4150 		WREG32((0x2c24 + j), 0x00000000);
4151 	}
4152 	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
4153 
4154 	evergreen_mc_stop(rdev, &save);
4155 	if (radeon_mc_wait_for_idle(rdev)) {
4156 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4157 	}
4158 	if (!ASIC_IS_NODCE(rdev))
4159 		/* Lockout access through VGA aperture*/
4160 		WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
4161 	/* Update configuration */
4162 	WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
4163 	       rdev->mc.vram_start >> 12);
4164 	WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
4165 	       rdev->mc.vram_end >> 12);
4166 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
4167 	       rdev->vram_scratch.gpu_addr >> 12);
4168 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
4169 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
4170 	WREG32(MC_VM_FB_LOCATION, tmp);
4171 	/* XXX double check these! */
4172 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
4173 	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
4174 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
4175 	WREG32(MC_VM_AGP_BASE, 0);
4176 	WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
4177 	WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
4178 	if (radeon_mc_wait_for_idle(rdev)) {
4179 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
4180 	}
4181 	evergreen_mc_resume(rdev, &save);
4182 	if (!ASIC_IS_NODCE(rdev)) {
4183 		/* we need to own VRAM, so turn off the VGA renderer here
4184 		 * to stop it overwriting our objects */
4185 		rv515_vga_render_disable(rdev);
4186 	}
4187 }
4188 
4189 void si_vram_gtt_location(struct radeon_device *rdev,
4190 			  struct radeon_mc *mc)
4191 {
4192 	if (mc->mc_vram_size > 0xFFC0000000ULL) {
4193 		/* leave room for at least 1024M GTT */
4194 		dev_warn(rdev->dev, "limiting VRAM\n");
4195 		mc->real_vram_size = 0xFFC0000000ULL;
4196 		mc->mc_vram_size = 0xFFC0000000ULL;
4197 	}
4198 	radeon_vram_location(rdev, &rdev->mc, 0);
4199 	rdev->mc.gtt_base_align = 0;
4200 	radeon_gtt_location(rdev, mc);
4201 }
4202 
4203 static int si_mc_init(struct radeon_device *rdev)
4204 {
4205 	u32 tmp;
4206 	int chansize, numchan;
4207 
4208 	/* Get VRAM informations */
4209 	rdev->mc.vram_is_ddr = true;
4210 	tmp = RREG32(MC_ARB_RAMCFG);
4211 	if (tmp & CHANSIZE_OVERRIDE) {
4212 		chansize = 16;
4213 	} else if (tmp & CHANSIZE_MASK) {
4214 		chansize = 64;
4215 	} else {
4216 		chansize = 32;
4217 	}
4218 	tmp = RREG32(MC_SHARED_CHMAP);
4219 	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
4220 	case 0:
4221 	default:
4222 		numchan = 1;
4223 		break;
4224 	case 1:
4225 		numchan = 2;
4226 		break;
4227 	case 2:
4228 		numchan = 4;
4229 		break;
4230 	case 3:
4231 		numchan = 8;
4232 		break;
4233 	case 4:
4234 		numchan = 3;
4235 		break;
4236 	case 5:
4237 		numchan = 6;
4238 		break;
4239 	case 6:
4240 		numchan = 10;
4241 		break;
4242 	case 7:
4243 		numchan = 12;
4244 		break;
4245 	case 8:
4246 		numchan = 16;
4247 		break;
4248 	}
4249 	rdev->mc.vram_width = numchan * chansize;
4250 	/* Could aper size report 0 ? */
4251 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
4252 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
4253 	/* size in MB on si */
4254 	tmp = RREG32(CONFIG_MEMSIZE);
4255 	/* some boards may have garbage in the upper 16 bits */
4256 	if (tmp & 0xffff0000) {
4257 		DRM_INFO("Probable bad vram size: 0x%08x\n", tmp);
4258 		if (tmp & 0xffff)
4259 			tmp &= 0xffff;
4260 	}
4261 	rdev->mc.mc_vram_size = tmp * 1024ULL * 1024ULL;
4262 	rdev->mc.real_vram_size = rdev->mc.mc_vram_size;
4263 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
4264 	si_vram_gtt_location(rdev, &rdev->mc);
4265 	radeon_update_bandwidth_info(rdev);
4266 
4267 	return 0;
4268 }
4269 
4270 /*
4271  * GART
4272  */
4273 void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
4274 {
4275 	/* flush hdp cache */
4276 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
4277 
4278 	/* bits 0-15 are the VM contexts0-15 */
4279 	WREG32(VM_INVALIDATE_REQUEST, 1);
4280 }
4281 
4282 static int si_pcie_gart_enable(struct radeon_device *rdev)
4283 {
4284 	int r, i;
4285 
4286 	if (rdev->gart.robj == NULL) {
4287 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
4288 		return -EINVAL;
4289 	}
4290 	r = radeon_gart_table_vram_pin(rdev);
4291 	if (r)
4292 		return r;
4293 	/* Setup TLB control */
4294 	WREG32(MC_VM_MX_L1_TLB_CNTL,
4295 	       (0xA << 7) |
4296 	       ENABLE_L1_TLB |
4297 	       ENABLE_L1_FRAGMENT_PROCESSING |
4298 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4299 	       ENABLE_ADVANCED_DRIVER_MODEL |
4300 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4301 	/* Setup L2 cache */
4302 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
4303 	       ENABLE_L2_FRAGMENT_PROCESSING |
4304 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4305 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4306 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4307 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4308 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
4309 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4310 	       BANK_SELECT(4) |
4311 	       L2_CACHE_BIGK_FRAGMENT_SIZE(4));
4312 	/* setup context0 */
4313 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
4314 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
4315 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
4316 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
4317 			(u32)(rdev->dummy_page.addr >> 12));
4318 	WREG32(VM_CONTEXT0_CNTL2, 0);
4319 	WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
4320 				  RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
4321 
4322 	WREG32(0x15D4, 0);
4323 	WREG32(0x15D8, 0);
4324 	WREG32(0x15DC, 0);
4325 
4326 	/* empty context1-15 */
4327 	/* set vm size, must be a multiple of 4 */
4328 	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
4329 	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
4330 	/* Assign the pt base to something valid for now; the pts used for
4331 	 * the VMs are determined by the application and setup and assigned
4332 	 * on the fly in the vm part of radeon_gart.c
4333 	 */
4334 	for (i = 1; i < 16; i++) {
4335 		if (i < 8)
4336 			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
4337 			       rdev->vm_manager.saved_table_addr[i]);
4338 		else
4339 			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
4340 			       rdev->vm_manager.saved_table_addr[i]);
4341 	}
4342 
4343 	/* enable context1-15 */
4344 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
4345 	       (u32)(rdev->dummy_page.addr >> 12));
4346 	WREG32(VM_CONTEXT1_CNTL2, 4);
4347 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
4348 				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
4349 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4350 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4351 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4352 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
4353 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
4354 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
4355 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
4356 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
4357 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
4358 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
4359 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
4360 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
4361 
4362 	si_pcie_gart_tlb_flush(rdev);
4363 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
4364 		 (unsigned)(rdev->mc.gtt_size >> 20),
4365 		 (unsigned long long)rdev->gart.table_addr);
4366 	rdev->gart.ready = true;
4367 	return 0;
4368 }
4369 
4370 static void si_pcie_gart_disable(struct radeon_device *rdev)
4371 {
4372 	unsigned i;
4373 
4374 	for (i = 1; i < 16; ++i) {
4375 		uint32_t reg;
4376 		if (i < 8)
4377 			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2);
4378 		else
4379 			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2);
4380 		rdev->vm_manager.saved_table_addr[i] = RREG32(reg);
4381 	}
4382 
4383 	/* Disable all tables */
4384 	WREG32(VM_CONTEXT0_CNTL, 0);
4385 	WREG32(VM_CONTEXT1_CNTL, 0);
4386 	/* Setup TLB control */
4387 	WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
4388 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
4389 	/* Setup L2 cache */
4390 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
4391 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
4392 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
4393 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
4394 	WREG32(VM_L2_CNTL2, 0);
4395 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
4396 	       L2_CACHE_BIGK_FRAGMENT_SIZE(0));
4397 	radeon_gart_table_vram_unpin(rdev);
4398 }
4399 
4400 static void si_pcie_gart_fini(struct radeon_device *rdev)
4401 {
4402 	si_pcie_gart_disable(rdev);
4403 	radeon_gart_table_vram_free(rdev);
4404 	radeon_gart_fini(rdev);
4405 }
4406 
4407 /* vm parser */
4408 static bool si_vm_reg_valid(u32 reg)
4409 {
4410 	/* context regs are fine */
4411 	if (reg >= 0x28000)
4412 		return true;
4413 
4414 	/* shader regs are also fine */
4415 	if (reg >= 0xB000 && reg < 0xC000)
4416 		return true;
4417 
4418 	/* check config regs */
4419 	switch (reg) {
4420 	case GRBM_GFX_INDEX:
4421 	case CP_STRMOUT_CNTL:
4422 	case VGT_VTX_VECT_EJECT_REG:
4423 	case VGT_CACHE_INVALIDATION:
4424 	case VGT_ESGS_RING_SIZE:
4425 	case VGT_GSVS_RING_SIZE:
4426 	case VGT_GS_VERTEX_REUSE:
4427 	case VGT_PRIMITIVE_TYPE:
4428 	case VGT_INDEX_TYPE:
4429 	case VGT_NUM_INDICES:
4430 	case VGT_NUM_INSTANCES:
4431 	case VGT_TF_RING_SIZE:
4432 	case VGT_HS_OFFCHIP_PARAM:
4433 	case VGT_TF_MEMORY_BASE:
4434 	case PA_CL_ENHANCE:
4435 	case PA_SU_LINE_STIPPLE_VALUE:
4436 	case PA_SC_LINE_STIPPLE_STATE:
4437 	case PA_SC_ENHANCE:
4438 	case SQC_CACHES:
4439 	case SPI_STATIC_THREAD_MGMT_1:
4440 	case SPI_STATIC_THREAD_MGMT_2:
4441 	case SPI_STATIC_THREAD_MGMT_3:
4442 	case SPI_PS_MAX_WAVE_ID:
4443 	case SPI_CONFIG_CNTL:
4444 	case SPI_CONFIG_CNTL_1:
4445 	case TA_CNTL_AUX:
4446 	case TA_CS_BC_BASE_ADDR:
4447 		return true;
4448 	default:
4449 		DRM_ERROR("Invalid register 0x%x in CS\n", reg);
4450 		return false;
4451 	}
4452 }
4453 
4454 static int si_vm_packet3_ce_check(struct radeon_device *rdev,
4455 				  u32 *ib, struct radeon_cs_packet *pkt)
4456 {
4457 	switch (pkt->opcode) {
4458 	case PACKET3_NOP:
4459 	case PACKET3_SET_BASE:
4460 	case PACKET3_SET_CE_DE_COUNTERS:
4461 	case PACKET3_LOAD_CONST_RAM:
4462 	case PACKET3_WRITE_CONST_RAM:
4463 	case PACKET3_WRITE_CONST_RAM_OFFSET:
4464 	case PACKET3_DUMP_CONST_RAM:
4465 	case PACKET3_INCREMENT_CE_COUNTER:
4466 	case PACKET3_WAIT_ON_DE_COUNTER:
4467 	case PACKET3_CE_WRITE:
4468 		break;
4469 	default:
4470 		DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
4471 		return -EINVAL;
4472 	}
4473 	return 0;
4474 }
4475 
4476 static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
4477 {
4478 	u32 start_reg, reg, i;
4479 	u32 command = ib[idx + 4];
4480 	u32 info = ib[idx + 1];
4481 	u32 idx_value = ib[idx];
4482 	if (command & PACKET3_CP_DMA_CMD_SAS) {
4483 		/* src address space is register */
4484 		if (((info & 0x60000000) >> 29) == 0) {
4485 			start_reg = idx_value << 2;
4486 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
4487 				reg = start_reg;
4488 				if (!si_vm_reg_valid(reg)) {
4489 					DRM_ERROR("CP DMA Bad SRC register\n");
4490 					return -EINVAL;
4491 				}
4492 			} else {
4493 				for (i = 0; i < (command & 0x1fffff); i++) {
4494 					reg = start_reg + (4 * i);
4495 					if (!si_vm_reg_valid(reg)) {
4496 						DRM_ERROR("CP DMA Bad SRC register\n");
4497 						return -EINVAL;
4498 					}
4499 				}
4500 			}
4501 		}
4502 	}
4503 	if (command & PACKET3_CP_DMA_CMD_DAS) {
4504 		/* dst address space is register */
4505 		if (((info & 0x00300000) >> 20) == 0) {
4506 			start_reg = ib[idx + 2];
4507 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
4508 				reg = start_reg;
4509 				if (!si_vm_reg_valid(reg)) {
4510 					DRM_ERROR("CP DMA Bad DST register\n");
4511 					return -EINVAL;
4512 				}
4513 			} else {
4514 				for (i = 0; i < (command & 0x1fffff); i++) {
4515 					reg = start_reg + (4 * i);
4516 				if (!si_vm_reg_valid(reg)) {
4517 						DRM_ERROR("CP DMA Bad DST register\n");
4518 						return -EINVAL;
4519 					}
4520 				}
4521 			}
4522 		}
4523 	}
4524 	return 0;
4525 }
4526 
4527 static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
4528 				   u32 *ib, struct radeon_cs_packet *pkt)
4529 {
4530 	int r;
4531 	u32 idx = pkt->idx + 1;
4532 	u32 idx_value = ib[idx];
4533 	u32 start_reg, end_reg, reg, i;
4534 
4535 	switch (pkt->opcode) {
4536 	case PACKET3_NOP:
4537 	case PACKET3_SET_BASE:
4538 	case PACKET3_CLEAR_STATE:
4539 	case PACKET3_INDEX_BUFFER_SIZE:
4540 	case PACKET3_DISPATCH_DIRECT:
4541 	case PACKET3_DISPATCH_INDIRECT:
4542 	case PACKET3_ALLOC_GDS:
4543 	case PACKET3_WRITE_GDS_RAM:
4544 	case PACKET3_ATOMIC_GDS:
4545 	case PACKET3_ATOMIC:
4546 	case PACKET3_OCCLUSION_QUERY:
4547 	case PACKET3_SET_PREDICATION:
4548 	case PACKET3_COND_EXEC:
4549 	case PACKET3_PRED_EXEC:
4550 	case PACKET3_DRAW_INDIRECT:
4551 	case PACKET3_DRAW_INDEX_INDIRECT:
4552 	case PACKET3_INDEX_BASE:
4553 	case PACKET3_DRAW_INDEX_2:
4554 	case PACKET3_CONTEXT_CONTROL:
4555 	case PACKET3_INDEX_TYPE:
4556 	case PACKET3_DRAW_INDIRECT_MULTI:
4557 	case PACKET3_DRAW_INDEX_AUTO:
4558 	case PACKET3_DRAW_INDEX_IMMD:
4559 	case PACKET3_NUM_INSTANCES:
4560 	case PACKET3_DRAW_INDEX_MULTI_AUTO:
4561 	case PACKET3_STRMOUT_BUFFER_UPDATE:
4562 	case PACKET3_DRAW_INDEX_OFFSET_2:
4563 	case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
4564 	case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
4565 	case PACKET3_MPEG_INDEX:
4566 	case PACKET3_WAIT_REG_MEM:
4567 	case PACKET3_MEM_WRITE:
4568 	case PACKET3_PFP_SYNC_ME:
4569 	case PACKET3_SURFACE_SYNC:
4570 	case PACKET3_EVENT_WRITE:
4571 	case PACKET3_EVENT_WRITE_EOP:
4572 	case PACKET3_EVENT_WRITE_EOS:
4573 	case PACKET3_SET_CONTEXT_REG:
4574 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4575 	case PACKET3_SET_SH_REG:
4576 	case PACKET3_SET_SH_REG_OFFSET:
4577 	case PACKET3_INCREMENT_DE_COUNTER:
4578 	case PACKET3_WAIT_ON_CE_COUNTER:
4579 	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4580 	case PACKET3_ME_WRITE:
4581 		break;
4582 	case PACKET3_COPY_DATA:
4583 		if ((idx_value & 0xf00) == 0) {
4584 			reg = ib[idx + 3] * 4;
4585 			if (!si_vm_reg_valid(reg))
4586 				return -EINVAL;
4587 		}
4588 		break;
4589 	case PACKET3_WRITE_DATA:
4590 		if ((idx_value & 0xf00) == 0) {
4591 			start_reg = ib[idx + 1] * 4;
4592 			if (idx_value & 0x10000) {
4593 				if (!si_vm_reg_valid(start_reg))
4594 					return -EINVAL;
4595 			} else {
4596 				for (i = 0; i < (pkt->count - 2); i++) {
4597 					reg = start_reg + (4 * i);
4598 					if (!si_vm_reg_valid(reg))
4599 						return -EINVAL;
4600 				}
4601 			}
4602 		}
4603 		break;
4604 	case PACKET3_COND_WRITE:
4605 		if (idx_value & 0x100) {
4606 			reg = ib[idx + 5] * 4;
4607 			if (!si_vm_reg_valid(reg))
4608 				return -EINVAL;
4609 		}
4610 		break;
4611 	case PACKET3_COPY_DW:
4612 		if (idx_value & 0x2) {
4613 			reg = ib[idx + 3] * 4;
4614 			if (!si_vm_reg_valid(reg))
4615 				return -EINVAL;
4616 		}
4617 		break;
4618 	case PACKET3_SET_CONFIG_REG:
4619 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
4620 		end_reg = 4 * pkt->count + start_reg - 4;
4621 		if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
4622 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
4623 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
4624 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
4625 			return -EINVAL;
4626 		}
4627 		for (i = 0; i < pkt->count; i++) {
4628 			reg = start_reg + (4 * i);
4629 			if (!si_vm_reg_valid(reg))
4630 				return -EINVAL;
4631 		}
4632 		break;
4633 	case PACKET3_CP_DMA:
4634 		r = si_vm_packet3_cp_dma_check(ib, idx);
4635 		if (r)
4636 			return r;
4637 		break;
4638 	default:
4639 		DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
4640 		return -EINVAL;
4641 	}
4642 	return 0;
4643 }
4644 
4645 static int si_vm_packet3_compute_check(struct radeon_device *rdev,
4646 				       u32 *ib, struct radeon_cs_packet *pkt)
4647 {
4648 	int r;
4649 	u32 idx = pkt->idx + 1;
4650 	u32 idx_value = ib[idx];
4651 	u32 start_reg, reg, i;
4652 
4653 	switch (pkt->opcode) {
4654 	case PACKET3_NOP:
4655 	case PACKET3_SET_BASE:
4656 	case PACKET3_CLEAR_STATE:
4657 	case PACKET3_DISPATCH_DIRECT:
4658 	case PACKET3_DISPATCH_INDIRECT:
4659 	case PACKET3_ALLOC_GDS:
4660 	case PACKET3_WRITE_GDS_RAM:
4661 	case PACKET3_ATOMIC_GDS:
4662 	case PACKET3_ATOMIC:
4663 	case PACKET3_OCCLUSION_QUERY:
4664 	case PACKET3_SET_PREDICATION:
4665 	case PACKET3_COND_EXEC:
4666 	case PACKET3_PRED_EXEC:
4667 	case PACKET3_CONTEXT_CONTROL:
4668 	case PACKET3_STRMOUT_BUFFER_UPDATE:
4669 	case PACKET3_WAIT_REG_MEM:
4670 	case PACKET3_MEM_WRITE:
4671 	case PACKET3_PFP_SYNC_ME:
4672 	case PACKET3_SURFACE_SYNC:
4673 	case PACKET3_EVENT_WRITE:
4674 	case PACKET3_EVENT_WRITE_EOP:
4675 	case PACKET3_EVENT_WRITE_EOS:
4676 	case PACKET3_SET_CONTEXT_REG:
4677 	case PACKET3_SET_CONTEXT_REG_INDIRECT:
4678 	case PACKET3_SET_SH_REG:
4679 	case PACKET3_SET_SH_REG_OFFSET:
4680 	case PACKET3_INCREMENT_DE_COUNTER:
4681 	case PACKET3_WAIT_ON_CE_COUNTER:
4682 	case PACKET3_WAIT_ON_AVAIL_BUFFER:
4683 	case PACKET3_ME_WRITE:
4684 		break;
4685 	case PACKET3_COPY_DATA:
4686 		if ((idx_value & 0xf00) == 0) {
4687 			reg = ib[idx + 3] * 4;
4688 			if (!si_vm_reg_valid(reg))
4689 				return -EINVAL;
4690 		}
4691 		break;
4692 	case PACKET3_WRITE_DATA:
4693 		if ((idx_value & 0xf00) == 0) {
4694 			start_reg = ib[idx + 1] * 4;
4695 			if (idx_value & 0x10000) {
4696 				if (!si_vm_reg_valid(start_reg))
4697 					return -EINVAL;
4698 			} else {
4699 				for (i = 0; i < (pkt->count - 2); i++) {
4700 					reg = start_reg + (4 * i);
4701 					if (!si_vm_reg_valid(reg))
4702 						return -EINVAL;
4703 				}
4704 			}
4705 		}
4706 		break;
4707 	case PACKET3_COND_WRITE:
4708 		if (idx_value & 0x100) {
4709 			reg = ib[idx + 5] * 4;
4710 			if (!si_vm_reg_valid(reg))
4711 				return -EINVAL;
4712 		}
4713 		break;
4714 	case PACKET3_COPY_DW:
4715 		if (idx_value & 0x2) {
4716 			reg = ib[idx + 3] * 4;
4717 			if (!si_vm_reg_valid(reg))
4718 				return -EINVAL;
4719 		}
4720 		break;
4721 	case PACKET3_CP_DMA:
4722 		r = si_vm_packet3_cp_dma_check(ib, idx);
4723 		if (r)
4724 			return r;
4725 		break;
4726 	default:
4727 		DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
4728 		return -EINVAL;
4729 	}
4730 	return 0;
4731 }
4732 
4733 int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
4734 {
4735 	int ret = 0;
4736 	u32 idx = 0, i;
4737 	struct radeon_cs_packet pkt;
4738 
4739 	do {
4740 		pkt.idx = idx;
4741 		pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
4742 		pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
4743 		pkt.one_reg_wr = 0;
4744 		switch (pkt.type) {
4745 		case RADEON_PACKET_TYPE0:
4746 			dev_err(rdev->dev, "Packet0 not allowed!\n");
4747 			ret = -EINVAL;
4748 			break;
4749 		case RADEON_PACKET_TYPE2:
4750 			idx += 1;
4751 			break;
4752 		case RADEON_PACKET_TYPE3:
4753 			pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
4754 			if (ib->is_const_ib)
4755 				ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
4756 			else {
4757 				switch (ib->ring) {
4758 				case RADEON_RING_TYPE_GFX_INDEX:
4759 					ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
4760 					break;
4761 				case CAYMAN_RING_TYPE_CP1_INDEX:
4762 				case CAYMAN_RING_TYPE_CP2_INDEX:
4763 					ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
4764 					break;
4765 				default:
4766 					dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
4767 					ret = -EINVAL;
4768 					break;
4769 				}
4770 			}
4771 			idx += pkt.count + 2;
4772 			break;
4773 		default:
4774 			dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
4775 			ret = -EINVAL;
4776 			break;
4777 		}
4778 		if (ret) {
4779 			for (i = 0; i < ib->length_dw; i++) {
4780 				if (i == idx)
4781 					printk("\t0x%08x <---\n", ib->ptr[i]);
4782 				else
4783 					printk("\t0x%08x\n", ib->ptr[i]);
4784 			}
4785 			break;
4786 		}
4787 	} while (idx < ib->length_dw);
4788 
4789 	return ret;
4790 }
4791 
4792 /*
4793  * vm
4794  */
4795 int si_vm_init(struct radeon_device *rdev)
4796 {
4797 	/* number of VMs */
4798 	rdev->vm_manager.nvm = 16;
4799 	/* base offset of vram pages */
4800 	rdev->vm_manager.vram_base_offset = 0;
4801 
4802 	return 0;
4803 }
4804 
4805 void si_vm_fini(struct radeon_device *rdev)
4806 {
4807 }
4808 
4809 /**
4810  * si_vm_decode_fault - print human readable fault info
4811  *
4812  * @rdev: radeon_device pointer
4813  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
4814  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
4815  *
4816  * Print human readable fault information (SI).
4817  */
4818 static void si_vm_decode_fault(struct radeon_device *rdev,
4819 			       u32 status, u32 addr)
4820 {
4821 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
4822 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
4823 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
4824 	char *block;
4825 
4826 	if (rdev->family == CHIP_TAHITI) {
4827 		switch (mc_id) {
4828 		case 160:
4829 		case 144:
4830 		case 96:
4831 		case 80:
4832 		case 224:
4833 		case 208:
4834 		case 32:
4835 		case 16:
4836 			block = "CB";
4837 			break;
4838 		case 161:
4839 		case 145:
4840 		case 97:
4841 		case 81:
4842 		case 225:
4843 		case 209:
4844 		case 33:
4845 		case 17:
4846 			block = "CB_FMASK";
4847 			break;
4848 		case 162:
4849 		case 146:
4850 		case 98:
4851 		case 82:
4852 		case 226:
4853 		case 210:
4854 		case 34:
4855 		case 18:
4856 			block = "CB_CMASK";
4857 			break;
4858 		case 163:
4859 		case 147:
4860 		case 99:
4861 		case 83:
4862 		case 227:
4863 		case 211:
4864 		case 35:
4865 		case 19:
4866 			block = "CB_IMMED";
4867 			break;
4868 		case 164:
4869 		case 148:
4870 		case 100:
4871 		case 84:
4872 		case 228:
4873 		case 212:
4874 		case 36:
4875 		case 20:
4876 			block = "DB";
4877 			break;
4878 		case 165:
4879 		case 149:
4880 		case 101:
4881 		case 85:
4882 		case 229:
4883 		case 213:
4884 		case 37:
4885 		case 21:
4886 			block = "DB_HTILE";
4887 			break;
4888 		case 167:
4889 		case 151:
4890 		case 103:
4891 		case 87:
4892 		case 231:
4893 		case 215:
4894 		case 39:
4895 		case 23:
4896 			block = "DB_STEN";
4897 			break;
4898 		case 72:
4899 		case 68:
4900 		case 64:
4901 		case 8:
4902 		case 4:
4903 		case 0:
4904 		case 136:
4905 		case 132:
4906 		case 128:
4907 		case 200:
4908 		case 196:
4909 		case 192:
4910 			block = "TC";
4911 			break;
4912 		case 112:
4913 		case 48:
4914 			block = "CP";
4915 			break;
4916 		case 49:
4917 		case 177:
4918 		case 50:
4919 		case 178:
4920 			block = "SH";
4921 			break;
4922 		case 53:
4923 		case 190:
4924 			block = "VGT";
4925 			break;
4926 		case 117:
4927 			block = "IH";
4928 			break;
4929 		case 51:
4930 		case 115:
4931 			block = "RLC";
4932 			break;
4933 		case 119:
4934 		case 183:
4935 			block = "DMA0";
4936 			break;
4937 		case 61:
4938 			block = "DMA1";
4939 			break;
4940 		case 248:
4941 		case 120:
4942 			block = "HDP";
4943 			break;
4944 		default:
4945 			block = "unknown";
4946 			break;
4947 		}
4948 	} else {
4949 		switch (mc_id) {
4950 		case 32:
4951 		case 16:
4952 		case 96:
4953 		case 80:
4954 		case 160:
4955 		case 144:
4956 		case 224:
4957 		case 208:
4958 			block = "CB";
4959 			break;
4960 		case 33:
4961 		case 17:
4962 		case 97:
4963 		case 81:
4964 		case 161:
4965 		case 145:
4966 		case 225:
4967 		case 209:
4968 			block = "CB_FMASK";
4969 			break;
4970 		case 34:
4971 		case 18:
4972 		case 98:
4973 		case 82:
4974 		case 162:
4975 		case 146:
4976 		case 226:
4977 		case 210:
4978 			block = "CB_CMASK";
4979 			break;
4980 		case 35:
4981 		case 19:
4982 		case 99:
4983 		case 83:
4984 		case 163:
4985 		case 147:
4986 		case 227:
4987 		case 211:
4988 			block = "CB_IMMED";
4989 			break;
4990 		case 36:
4991 		case 20:
4992 		case 100:
4993 		case 84:
4994 		case 164:
4995 		case 148:
4996 		case 228:
4997 		case 212:
4998 			block = "DB";
4999 			break;
5000 		case 37:
5001 		case 21:
5002 		case 101:
5003 		case 85:
5004 		case 165:
5005 		case 149:
5006 		case 229:
5007 		case 213:
5008 			block = "DB_HTILE";
5009 			break;
5010 		case 39:
5011 		case 23:
5012 		case 103:
5013 		case 87:
5014 		case 167:
5015 		case 151:
5016 		case 231:
5017 		case 215:
5018 			block = "DB_STEN";
5019 			break;
5020 		case 72:
5021 		case 68:
5022 		case 8:
5023 		case 4:
5024 		case 136:
5025 		case 132:
5026 		case 200:
5027 		case 196:
5028 			block = "TC";
5029 			break;
5030 		case 112:
5031 		case 48:
5032 			block = "CP";
5033 			break;
5034 		case 49:
5035 		case 177:
5036 		case 50:
5037 		case 178:
5038 			block = "SH";
5039 			break;
5040 		case 53:
5041 			block = "VGT";
5042 			break;
5043 		case 117:
5044 			block = "IH";
5045 			break;
5046 		case 51:
5047 		case 115:
5048 			block = "RLC";
5049 			break;
5050 		case 119:
5051 		case 183:
5052 			block = "DMA0";
5053 			break;
5054 		case 61:
5055 			block = "DMA1";
5056 			break;
5057 		case 248:
5058 		case 120:
5059 			block = "HDP";
5060 			break;
5061 		default:
5062 			block = "unknown";
5063 			break;
5064 		}
5065 	}
5066 
5067 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
5068 	       protections, vmid, addr,
5069 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
5070 	       block, mc_id);
5071 }
5072 
5073 void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5074 		 unsigned vm_id, uint64_t pd_addr)
5075 {
5076 	/* write new base address */
5077 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5078 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5079 				 WRITE_DATA_DST_SEL(0)));
5080 
5081 	if (vm_id < 8) {
5082 		radeon_ring_write(ring,
5083 				  (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
5084 	} else {
5085 		radeon_ring_write(ring,
5086 				  (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5087 	}
5088 	radeon_ring_write(ring, 0);
5089 	radeon_ring_write(ring, pd_addr >> 12);
5090 
5091 	/* flush hdp cache */
5092 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5093 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5094 				 WRITE_DATA_DST_SEL(0)));
5095 	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
5096 	radeon_ring_write(ring, 0);
5097 	radeon_ring_write(ring, 0x1);
5098 
5099 	/* bits 0-15 are the VM contexts0-15 */
5100 	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5101 	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5102 				 WRITE_DATA_DST_SEL(0)));
5103 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5104 	radeon_ring_write(ring, 0);
5105 	radeon_ring_write(ring, 1 << vm_id);
5106 
5107 	/* wait for the invalidate to complete */
5108 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
5109 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
5110 				 WAIT_REG_MEM_ENGINE(0))); /* me */
5111 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5112 	radeon_ring_write(ring, 0);
5113 	radeon_ring_write(ring, 0); /* ref */
5114 	radeon_ring_write(ring, 0); /* mask */
5115 	radeon_ring_write(ring, 0x20); /* poll interval */
5116 
5117 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
5118 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
5119 	radeon_ring_write(ring, 0x0);
5120 }
5121 
5122 /*
5123  *  Power and clock gating
5124  */
5125 static void si_wait_for_rlc_serdes(struct radeon_device *rdev)
5126 {
5127 	int i;
5128 
5129 	for (i = 0; i < rdev->usec_timeout; i++) {
5130 		if (RREG32(RLC_SERDES_MASTER_BUSY_0) == 0)
5131 			break;
5132 		udelay(1);
5133 	}
5134 
5135 	for (i = 0; i < rdev->usec_timeout; i++) {
5136 		if (RREG32(RLC_SERDES_MASTER_BUSY_1) == 0)
5137 			break;
5138 		udelay(1);
5139 	}
5140 }
5141 
5142 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
5143 					 bool enable)
5144 {
5145 	u32 tmp = RREG32(CP_INT_CNTL_RING0);
5146 	u32 mask;
5147 	int i;
5148 
5149 	if (enable)
5150 		tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5151 	else
5152 		tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5153 	WREG32(CP_INT_CNTL_RING0, tmp);
5154 
5155 	if (!enable) {
5156 		/* read a gfx register */
5157 		tmp = RREG32(DB_DEPTH_INFO);
5158 
5159 		mask = RLC_BUSY_STATUS | GFX_POWER_STATUS | GFX_CLOCK_STATUS | GFX_LS_STATUS;
5160 		for (i = 0; i < rdev->usec_timeout; i++) {
5161 			if ((RREG32(RLC_STAT) & mask) == (GFX_CLOCK_STATUS | GFX_POWER_STATUS))
5162 				break;
5163 			udelay(1);
5164 		}
5165 	}
5166 }
5167 
5168 static void si_set_uvd_dcm(struct radeon_device *rdev,
5169 			   bool sw_mode)
5170 {
5171 	u32 tmp, tmp2;
5172 
5173 	tmp = RREG32(UVD_CGC_CTRL);
5174 	tmp &= ~(CLK_OD_MASK | CG_DT_MASK);
5175 	tmp |= DCM | CG_DT(1) | CLK_OD(4);
5176 
5177 	if (sw_mode) {
5178 		tmp &= ~0x7ffff800;
5179 		tmp2 = DYN_OR_EN | DYN_RR_EN | G_DIV_ID(7);
5180 	} else {
5181 		tmp |= 0x7ffff800;
5182 		tmp2 = 0;
5183 	}
5184 
5185 	WREG32(UVD_CGC_CTRL, tmp);
5186 	WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
5187 }
5188 
5189 void si_init_uvd_internal_cg(struct radeon_device *rdev)
5190 {
5191 	bool hw_mode = true;
5192 
5193 	if (hw_mode) {
5194 		si_set_uvd_dcm(rdev, false);
5195 	} else {
5196 		u32 tmp = RREG32(UVD_CGC_CTRL);
5197 		tmp &= ~DCM;
5198 		WREG32(UVD_CGC_CTRL, tmp);
5199 	}
5200 }
5201 
5202 static u32 si_halt_rlc(struct radeon_device *rdev)
5203 {
5204 	u32 data, orig;
5205 
5206 	orig = data = RREG32(RLC_CNTL);
5207 
5208 	if (data & RLC_ENABLE) {
5209 		data &= ~RLC_ENABLE;
5210 		WREG32(RLC_CNTL, data);
5211 
5212 		si_wait_for_rlc_serdes(rdev);
5213 	}
5214 
5215 	return orig;
5216 }
5217 
5218 static void si_update_rlc(struct radeon_device *rdev, u32 rlc)
5219 {
5220 	u32 tmp;
5221 
5222 	tmp = RREG32(RLC_CNTL);
5223 	if (tmp != rlc)
5224 		WREG32(RLC_CNTL, rlc);
5225 }
5226 
5227 static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
5228 {
5229 	u32 data, orig;
5230 
5231 	orig = data = RREG32(DMA_PG);
5232 	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
5233 		data |= PG_CNTL_ENABLE;
5234 	else
5235 		data &= ~PG_CNTL_ENABLE;
5236 	if (orig != data)
5237 		WREG32(DMA_PG, data);
5238 }
5239 
5240 static void si_init_dma_pg(struct radeon_device *rdev)
5241 {
5242 	u32 tmp;
5243 
5244 	WREG32(DMA_PGFSM_WRITE,  0x00002000);
5245 	WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
5246 
5247 	for (tmp = 0; tmp < 5; tmp++)
5248 		WREG32(DMA_PGFSM_WRITE, 0);
5249 }
5250 
5251 static void si_enable_gfx_cgpg(struct radeon_device *rdev,
5252 			       bool enable)
5253 {
5254 	u32 tmp;
5255 
5256 	if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG)) {
5257 		tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
5258 		WREG32(RLC_TTOP_D, tmp);
5259 
5260 		tmp = RREG32(RLC_PG_CNTL);
5261 		tmp |= GFX_PG_ENABLE;
5262 		WREG32(RLC_PG_CNTL, tmp);
5263 
5264 		tmp = RREG32(RLC_AUTO_PG_CTRL);
5265 		tmp |= AUTO_PG_EN;
5266 		WREG32(RLC_AUTO_PG_CTRL, tmp);
5267 	} else {
5268 		tmp = RREG32(RLC_AUTO_PG_CTRL);
5269 		tmp &= ~AUTO_PG_EN;
5270 		WREG32(RLC_AUTO_PG_CTRL, tmp);
5271 
5272 		tmp = RREG32(DB_RENDER_CONTROL);
5273 	}
5274 }
5275 
5276 static void si_init_gfx_cgpg(struct radeon_device *rdev)
5277 {
5278 	u32 tmp;
5279 
5280 	WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5281 
5282 	tmp = RREG32(RLC_PG_CNTL);
5283 	tmp |= GFX_PG_SRC;
5284 	WREG32(RLC_PG_CNTL, tmp);
5285 
5286 	WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5287 
5288 	tmp = RREG32(RLC_AUTO_PG_CTRL);
5289 
5290 	tmp &= ~GRBM_REG_SGIT_MASK;
5291 	tmp |= GRBM_REG_SGIT(0x700);
5292 	tmp &= ~PG_AFTER_GRBM_REG_ST_MASK;
5293 	WREG32(RLC_AUTO_PG_CTRL, tmp);
5294 }
5295 
5296 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
5297 {
5298 	u32 mask = 0, tmp, tmp1;
5299 	int i;
5300 
5301 	si_select_se_sh(rdev, se, sh);
5302 	tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
5303 	tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
5304 	si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5305 
5306 	tmp &= 0xffff0000;
5307 
5308 	tmp |= tmp1;
5309 	tmp >>= 16;
5310 
5311 	for (i = 0; i < rdev->config.si.max_cu_per_sh; i ++) {
5312 		mask <<= 1;
5313 		mask |= 1;
5314 	}
5315 
5316 	return (~tmp) & mask;
5317 }
5318 
5319 static void si_init_ao_cu_mask(struct radeon_device *rdev)
5320 {
5321 	u32 i, j, k, active_cu_number = 0;
5322 	u32 mask, counter, cu_bitmap;
5323 	u32 tmp = 0;
5324 
5325 	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {
5326 		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) {
5327 			mask = 1;
5328 			cu_bitmap = 0;
5329 			counter  = 0;
5330 			for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) {
5331 				if (si_get_cu_active_bitmap(rdev, i, j) & mask) {
5332 					if (counter < 2)
5333 						cu_bitmap |= mask;
5334 					counter++;
5335 				}
5336 				mask <<= 1;
5337 			}
5338 
5339 			active_cu_number += counter;
5340 			tmp |= (cu_bitmap << (i * 16 + j * 8));
5341 		}
5342 	}
5343 
5344 	WREG32(RLC_PG_AO_CU_MASK, tmp);
5345 
5346 	tmp = RREG32(RLC_MAX_PG_CU);
5347 	tmp &= ~MAX_PU_CU_MASK;
5348 	tmp |= MAX_PU_CU(active_cu_number);
5349 	WREG32(RLC_MAX_PG_CU, tmp);
5350 }
5351 
5352 static void si_enable_cgcg(struct radeon_device *rdev,
5353 			   bool enable)
5354 {
5355 	u32 data, orig, tmp;
5356 
5357 	orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
5358 
5359 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
5360 		si_enable_gui_idle_interrupt(rdev, true);
5361 
5362 		WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
5363 
5364 		tmp = si_halt_rlc(rdev);
5365 
5366 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5367 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5368 		WREG32(RLC_SERDES_WR_CTRL, 0x00b000ff);
5369 
5370 		si_wait_for_rlc_serdes(rdev);
5371 
5372 		si_update_rlc(rdev, tmp);
5373 
5374 		WREG32(RLC_SERDES_WR_CTRL, 0x007000ff);
5375 
5376 		data |= CGCG_EN | CGLS_EN;
5377 	} else {
5378 		si_enable_gui_idle_interrupt(rdev, false);
5379 
5380 		RREG32(CB_CGTT_SCLK_CTRL);
5381 		RREG32(CB_CGTT_SCLK_CTRL);
5382 		RREG32(CB_CGTT_SCLK_CTRL);
5383 		RREG32(CB_CGTT_SCLK_CTRL);
5384 
5385 		data &= ~(CGCG_EN | CGLS_EN);
5386 	}
5387 
5388 	if (orig != data)
5389 		WREG32(RLC_CGCG_CGLS_CTRL, data);
5390 }
5391 
5392 static void si_enable_mgcg(struct radeon_device *rdev,
5393 			   bool enable)
5394 {
5395 	u32 data, orig, tmp = 0;
5396 
5397 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
5398 		orig = data = RREG32(CGTS_SM_CTRL_REG);
5399 		data = 0x96940200;
5400 		if (orig != data)
5401 			WREG32(CGTS_SM_CTRL_REG, data);
5402 
5403 		if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
5404 			orig = data = RREG32(CP_MEM_SLP_CNTL);
5405 			data |= CP_MEM_LS_EN;
5406 			if (orig != data)
5407 				WREG32(CP_MEM_SLP_CNTL, data);
5408 		}
5409 
5410 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5411 		data &= 0xffffffc0;
5412 		if (orig != data)
5413 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5414 
5415 		tmp = si_halt_rlc(rdev);
5416 
5417 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5418 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5419 		WREG32(RLC_SERDES_WR_CTRL, 0x00d000ff);
5420 
5421 		si_update_rlc(rdev, tmp);
5422 	} else {
5423 		orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
5424 		data |= 0x00000003;
5425 		if (orig != data)
5426 			WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
5427 
5428 		data = RREG32(CP_MEM_SLP_CNTL);
5429 		if (data & CP_MEM_LS_EN) {
5430 			data &= ~CP_MEM_LS_EN;
5431 			WREG32(CP_MEM_SLP_CNTL, data);
5432 		}
5433 		orig = data = RREG32(CGTS_SM_CTRL_REG);
5434 		data |= LS_OVERRIDE | OVERRIDE;
5435 		if (orig != data)
5436 			WREG32(CGTS_SM_CTRL_REG, data);
5437 
5438 		tmp = si_halt_rlc(rdev);
5439 
5440 		WREG32(RLC_SERDES_WR_MASTER_MASK_0, 0xffffffff);
5441 		WREG32(RLC_SERDES_WR_MASTER_MASK_1, 0xffffffff);
5442 		WREG32(RLC_SERDES_WR_CTRL, 0x00e000ff);
5443 
5444 		si_update_rlc(rdev, tmp);
5445 	}
5446 }
5447 
5448 static void si_enable_uvd_mgcg(struct radeon_device *rdev,
5449 			       bool enable)
5450 {
5451 	u32 orig, data, tmp;
5452 
5453 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
5454 		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5455 		tmp |= 0x3fff;
5456 		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5457 
5458 		orig = data = RREG32(UVD_CGC_CTRL);
5459 		data |= DCM;
5460 		if (orig != data)
5461 			WREG32(UVD_CGC_CTRL, data);
5462 
5463 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0);
5464 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0);
5465 	} else {
5466 		tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
5467 		tmp &= ~0x3fff;
5468 		WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
5469 
5470 		orig = data = RREG32(UVD_CGC_CTRL);
5471 		data &= ~DCM;
5472 		if (orig != data)
5473 			WREG32(UVD_CGC_CTRL, data);
5474 
5475 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_0, 0xffffffff);
5476 		WREG32_SMC(SMC_CG_IND_START + CG_CGTT_LOCAL_1, 0xffffffff);
5477 	}
5478 }
5479 
5480 static const u32 mc_cg_registers[] =
5481 {
5482 	MC_HUB_MISC_HUB_CG,
5483 	MC_HUB_MISC_SIP_CG,
5484 	MC_HUB_MISC_VM_CG,
5485 	MC_XPB_CLK_GAT,
5486 	ATC_MISC_CG,
5487 	MC_CITF_MISC_WR_CG,
5488 	MC_CITF_MISC_RD_CG,
5489 	MC_CITF_MISC_VM_CG,
5490 	VM_L2_CG,
5491 };
5492 
5493 static void si_enable_mc_ls(struct radeon_device *rdev,
5494 			    bool enable)
5495 {
5496 	int i;
5497 	u32 orig, data;
5498 
5499 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5500 		orig = data = RREG32(mc_cg_registers[i]);
5501 		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
5502 			data |= MC_LS_ENABLE;
5503 		else
5504 			data &= ~MC_LS_ENABLE;
5505 		if (data != orig)
5506 			WREG32(mc_cg_registers[i], data);
5507 	}
5508 }
5509 
5510 static void si_enable_mc_mgcg(struct radeon_device *rdev,
5511 			       bool enable)
5512 {
5513 	int i;
5514 	u32 orig, data;
5515 
5516 	for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
5517 		orig = data = RREG32(mc_cg_registers[i]);
5518 		if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
5519 			data |= MC_CG_ENABLE;
5520 		else
5521 			data &= ~MC_CG_ENABLE;
5522 		if (data != orig)
5523 			WREG32(mc_cg_registers[i], data);
5524 	}
5525 }
5526 
5527 static void si_enable_dma_mgcg(struct radeon_device *rdev,
5528 			       bool enable)
5529 {
5530 	u32 orig, data, offset;
5531 	int i;
5532 
5533 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
5534 		for (i = 0; i < 2; i++) {
5535 			if (i == 0)
5536 				offset = DMA0_REGISTER_OFFSET;
5537 			else
5538 				offset = DMA1_REGISTER_OFFSET;
5539 			orig = data = RREG32(DMA_POWER_CNTL + offset);
5540 			data &= ~MEM_POWER_OVERRIDE;
5541 			if (data != orig)
5542 				WREG32(DMA_POWER_CNTL + offset, data);
5543 			WREG32(DMA_CLK_CTRL + offset, 0x00000100);
5544 		}
5545 	} else {
5546 		for (i = 0; i < 2; i++) {
5547 			if (i == 0)
5548 				offset = DMA0_REGISTER_OFFSET;
5549 			else
5550 				offset = DMA1_REGISTER_OFFSET;
5551 			orig = data = RREG32(DMA_POWER_CNTL + offset);
5552 			data |= MEM_POWER_OVERRIDE;
5553 			if (data != orig)
5554 				WREG32(DMA_POWER_CNTL + offset, data);
5555 
5556 			orig = data = RREG32(DMA_CLK_CTRL + offset);
5557 			data = 0xff000000;
5558 			if (data != orig)
5559 				WREG32(DMA_CLK_CTRL + offset, data);
5560 		}
5561 	}
5562 }
5563 
5564 static void si_enable_bif_mgls(struct radeon_device *rdev,
5565 			       bool enable)
5566 {
5567 	u32 orig, data;
5568 
5569 	orig = data = RREG32_PCIE(PCIE_CNTL2);
5570 
5571 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
5572 		data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
5573 			REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
5574 	else
5575 		data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
5576 			  REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
5577 
5578 	if (orig != data)
5579 		WREG32_PCIE(PCIE_CNTL2, data);
5580 }
5581 
5582 static void si_enable_hdp_mgcg(struct radeon_device *rdev,
5583 			       bool enable)
5584 {
5585 	u32 orig, data;
5586 
5587 	orig = data = RREG32(HDP_HOST_PATH_CNTL);
5588 
5589 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
5590 		data &= ~CLOCK_GATING_DIS;
5591 	else
5592 		data |= CLOCK_GATING_DIS;
5593 
5594 	if (orig != data)
5595 		WREG32(HDP_HOST_PATH_CNTL, data);
5596 }
5597 
5598 static void si_enable_hdp_ls(struct radeon_device *rdev,
5599 			     bool enable)
5600 {
5601 	u32 orig, data;
5602 
5603 	orig = data = RREG32(HDP_MEM_POWER_LS);
5604 
5605 	if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
5606 		data |= HDP_LS_ENABLE;
5607 	else
5608 		data &= ~HDP_LS_ENABLE;
5609 
5610 	if (orig != data)
5611 		WREG32(HDP_MEM_POWER_LS, data);
5612 }
5613 
5614 static void si_update_cg(struct radeon_device *rdev,
5615 			 u32 block, bool enable)
5616 {
5617 	if (block & RADEON_CG_BLOCK_GFX) {
5618 		si_enable_gui_idle_interrupt(rdev, false);
5619 		/* order matters! */
5620 		if (enable) {
5621 			si_enable_mgcg(rdev, true);
5622 			si_enable_cgcg(rdev, true);
5623 		} else {
5624 			si_enable_cgcg(rdev, false);
5625 			si_enable_mgcg(rdev, false);
5626 		}
5627 		si_enable_gui_idle_interrupt(rdev, true);
5628 	}
5629 
5630 	if (block & RADEON_CG_BLOCK_MC) {
5631 		si_enable_mc_mgcg(rdev, enable);
5632 		si_enable_mc_ls(rdev, enable);
5633 	}
5634 
5635 	if (block & RADEON_CG_BLOCK_SDMA) {
5636 		si_enable_dma_mgcg(rdev, enable);
5637 	}
5638 
5639 	if (block & RADEON_CG_BLOCK_BIF) {
5640 		si_enable_bif_mgls(rdev, enable);
5641 	}
5642 
5643 	if (block & RADEON_CG_BLOCK_UVD) {
5644 		if (rdev->has_uvd) {
5645 			si_enable_uvd_mgcg(rdev, enable);
5646 		}
5647 	}
5648 
5649 	if (block & RADEON_CG_BLOCK_HDP) {
5650 		si_enable_hdp_mgcg(rdev, enable);
5651 		si_enable_hdp_ls(rdev, enable);
5652 	}
5653 }
5654 
5655 static void si_init_cg(struct radeon_device *rdev)
5656 {
5657 	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5658 			    RADEON_CG_BLOCK_MC |
5659 			    RADEON_CG_BLOCK_SDMA |
5660 			    RADEON_CG_BLOCK_BIF |
5661 			    RADEON_CG_BLOCK_HDP), true);
5662 	if (rdev->has_uvd) {
5663 		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
5664 		si_init_uvd_internal_cg(rdev);
5665 	}
5666 }
5667 
5668 static void si_fini_cg(struct radeon_device *rdev)
5669 {
5670 	if (rdev->has_uvd) {
5671 		si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
5672 	}
5673 	si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
5674 			    RADEON_CG_BLOCK_MC |
5675 			    RADEON_CG_BLOCK_SDMA |
5676 			    RADEON_CG_BLOCK_BIF |
5677 			    RADEON_CG_BLOCK_HDP), false);
5678 }
5679 
5680 u32 si_get_csb_size(struct radeon_device *rdev)
5681 {
5682 	u32 count = 0;
5683 	const struct cs_section_def *sect = NULL;
5684 	const struct cs_extent_def *ext = NULL;
5685 
5686 	if (rdev->rlc.cs_data == NULL)
5687 		return 0;
5688 
5689 	/* begin clear state */
5690 	count += 2;
5691 	/* context control state */
5692 	count += 3;
5693 
5694 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5695 		for (ext = sect->section; ext->extent != NULL; ++ext) {
5696 			if (sect->id == SECT_CONTEXT)
5697 				count += 2 + ext->reg_count;
5698 			else
5699 				return 0;
5700 		}
5701 	}
5702 	/* pa_sc_raster_config */
5703 	count += 3;
5704 	/* end clear state */
5705 	count += 2;
5706 	/* clear state */
5707 	count += 2;
5708 
5709 	return count;
5710 }
5711 
5712 void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
5713 {
5714 	u32 count = 0, i;
5715 	const struct cs_section_def *sect = NULL;
5716 	const struct cs_extent_def *ext = NULL;
5717 
5718 	if (rdev->rlc.cs_data == NULL)
5719 		return;
5720 	if (buffer == NULL)
5721 		return;
5722 
5723 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5724 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
5725 
5726 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
5727 	buffer[count++] = cpu_to_le32(0x80000000);
5728 	buffer[count++] = cpu_to_le32(0x80000000);
5729 
5730 	for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
5731 		for (ext = sect->section; ext->extent != NULL; ++ext) {
5732 			if (sect->id == SECT_CONTEXT) {
5733 				buffer[count++] =
5734 					cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
5735 				buffer[count++] = cpu_to_le32(ext->reg_index - 0xa000);
5736 				for (i = 0; i < ext->reg_count; i++)
5737 					buffer[count++] = cpu_to_le32(ext->extent[i]);
5738 			} else {
5739 				return;
5740 			}
5741 		}
5742 	}
5743 
5744 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 1));
5745 	buffer[count++] = cpu_to_le32(PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
5746 	switch (rdev->family) {
5747 	case CHIP_TAHITI:
5748 	case CHIP_PITCAIRN:
5749 		buffer[count++] = cpu_to_le32(0x2a00126a);
5750 		break;
5751 	case CHIP_VERDE:
5752 		buffer[count++] = cpu_to_le32(0x0000124a);
5753 		break;
5754 	case CHIP_OLAND:
5755 		buffer[count++] = cpu_to_le32(0x00000082);
5756 		break;
5757 	case CHIP_HAINAN:
5758 		buffer[count++] = cpu_to_le32(0x00000000);
5759 		break;
5760 	default:
5761 		buffer[count++] = cpu_to_le32(0x00000000);
5762 		break;
5763 	}
5764 
5765 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
5766 	buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
5767 
5768 	buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
5769 	buffer[count++] = cpu_to_le32(0);
5770 }
5771 
5772 static void si_init_pg(struct radeon_device *rdev)
5773 {
5774 	if (rdev->pg_flags) {
5775 		if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
5776 			si_init_dma_pg(rdev);
5777 		}
5778 		si_init_ao_cu_mask(rdev);
5779 		if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_PG) {
5780 			si_init_gfx_cgpg(rdev);
5781 		} else {
5782 			WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5783 			WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5784 		}
5785 		si_enable_dma_pg(rdev, true);
5786 		si_enable_gfx_cgpg(rdev, true);
5787 	} else {
5788 		WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
5789 		WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
5790 	}
5791 }
5792 
5793 static void si_fini_pg(struct radeon_device *rdev)
5794 {
5795 	if (rdev->pg_flags) {
5796 		si_enable_dma_pg(rdev, false);
5797 		si_enable_gfx_cgpg(rdev, false);
5798 	}
5799 }
5800 
5801 /*
5802  * RLC
5803  */
5804 void si_rlc_reset(struct radeon_device *rdev)
5805 {
5806 	u32 tmp = RREG32(GRBM_SOFT_RESET);
5807 
5808 	tmp |= SOFT_RESET_RLC;
5809 	WREG32(GRBM_SOFT_RESET, tmp);
5810 	udelay(50);
5811 	tmp &= ~SOFT_RESET_RLC;
5812 	WREG32(GRBM_SOFT_RESET, tmp);
5813 	udelay(50);
5814 }
5815 
5816 static void si_rlc_stop(struct radeon_device *rdev)
5817 {
5818 	WREG32(RLC_CNTL, 0);
5819 
5820 	si_enable_gui_idle_interrupt(rdev, false);
5821 
5822 	si_wait_for_rlc_serdes(rdev);
5823 }
5824 
5825 static void si_rlc_start(struct radeon_device *rdev)
5826 {
5827 	WREG32(RLC_CNTL, RLC_ENABLE);
5828 
5829 	si_enable_gui_idle_interrupt(rdev, true);
5830 
5831 	udelay(50);
5832 }
5833 
5834 static bool si_lbpw_supported(struct radeon_device *rdev)
5835 {
5836 	u32 tmp;
5837 
5838 	/* Enable LBPW only for DDR3 */
5839 	tmp = RREG32(MC_SEQ_MISC0);
5840 	if ((tmp & 0xF0000000) == 0xB0000000)
5841 		return true;
5842 	return false;
5843 }
5844 
5845 static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
5846 {
5847 	u32 tmp;
5848 
5849 	tmp = RREG32(RLC_LB_CNTL);
5850 	if (enable)
5851 		tmp |= LOAD_BALANCE_ENABLE;
5852 	else
5853 		tmp &= ~LOAD_BALANCE_ENABLE;
5854 	WREG32(RLC_LB_CNTL, tmp);
5855 
5856 	if (!enable) {
5857 		si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
5858 		WREG32(SPI_LB_CU_MASK, 0x00ff);
5859 	}
5860 }
5861 
5862 static int si_rlc_resume(struct radeon_device *rdev)
5863 {
5864 	u32 i;
5865 
5866 	if (!rdev->rlc_fw)
5867 		return -EINVAL;
5868 
5869 	si_rlc_stop(rdev);
5870 
5871 	si_rlc_reset(rdev);
5872 
5873 	si_init_pg(rdev);
5874 
5875 	si_init_cg(rdev);
5876 
5877 	WREG32(RLC_RL_BASE, 0);
5878 	WREG32(RLC_RL_SIZE, 0);
5879 	WREG32(RLC_LB_CNTL, 0);
5880 	WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
5881 	WREG32(RLC_LB_CNTR_INIT, 0);
5882 	WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
5883 
5884 	WREG32(RLC_MC_CNTL, 0);
5885 	WREG32(RLC_UCODE_CNTL, 0);
5886 
5887 	if (rdev->new_fw) {
5888 		const struct rlc_firmware_header_v1_0 *hdr =
5889 			(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
5890 		u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
5891 		const __le32 *fw_data = (const __le32 *)
5892 			(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
5893 
5894 		radeon_ucode_print_rlc_hdr(&hdr->header);
5895 
5896 		for (i = 0; i < fw_size; i++) {
5897 			WREG32(RLC_UCODE_ADDR, i);
5898 			WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
5899 		}
5900 	} else {
5901 		const __be32 *fw_data =
5902 			(const __be32 *)rdev->rlc_fw->data;
5903 		for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
5904 			WREG32(RLC_UCODE_ADDR, i);
5905 			WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
5906 		}
5907 	}
5908 	WREG32(RLC_UCODE_ADDR, 0);
5909 
5910 	si_enable_lbpw(rdev, si_lbpw_supported(rdev));
5911 
5912 	si_rlc_start(rdev);
5913 
5914 	return 0;
5915 }
5916 
5917 static void si_enable_interrupts(struct radeon_device *rdev)
5918 {
5919 	u32 ih_cntl = RREG32(IH_CNTL);
5920 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5921 
5922 	ih_cntl |= ENABLE_INTR;
5923 	ih_rb_cntl |= IH_RB_ENABLE;
5924 	WREG32(IH_CNTL, ih_cntl);
5925 	WREG32(IH_RB_CNTL, ih_rb_cntl);
5926 	rdev->ih.enabled = true;
5927 }
5928 
5929 static void si_disable_interrupts(struct radeon_device *rdev)
5930 {
5931 	u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
5932 	u32 ih_cntl = RREG32(IH_CNTL);
5933 
5934 	ih_rb_cntl &= ~IH_RB_ENABLE;
5935 	ih_cntl &= ~ENABLE_INTR;
5936 	WREG32(IH_RB_CNTL, ih_rb_cntl);
5937 	WREG32(IH_CNTL, ih_cntl);
5938 	/* set rptr, wptr to 0 */
5939 	WREG32(IH_RB_RPTR, 0);
5940 	WREG32(IH_RB_WPTR, 0);
5941 	rdev->ih.enabled = false;
5942 	rdev->ih.rptr = 0;
5943 }
5944 
5945 static void si_disable_interrupt_state(struct radeon_device *rdev)
5946 {
5947 	u32 tmp;
5948 
5949 	tmp = RREG32(CP_INT_CNTL_RING0) &
5950 		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
5951 	WREG32(CP_INT_CNTL_RING0, tmp);
5952 	WREG32(CP_INT_CNTL_RING1, 0);
5953 	WREG32(CP_INT_CNTL_RING2, 0);
5954 	tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
5955 	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
5956 	tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
5957 	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
5958 	WREG32(GRBM_INT_CNTL, 0);
5959 	WREG32(SRBM_INT_CNTL, 0);
5960 	if (rdev->num_crtc >= 2) {
5961 		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5962 		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5963 	}
5964 	if (rdev->num_crtc >= 4) {
5965 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5966 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5967 	}
5968 	if (rdev->num_crtc >= 6) {
5969 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5970 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5971 	}
5972 
5973 	if (rdev->num_crtc >= 2) {
5974 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
5975 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
5976 	}
5977 	if (rdev->num_crtc >= 4) {
5978 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
5979 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
5980 	}
5981 	if (rdev->num_crtc >= 6) {
5982 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
5983 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
5984 	}
5985 
5986 	if (!ASIC_IS_NODCE(rdev)) {
5987 		WREG32(DAC_AUTODETECT_INT_CONTROL, 0);
5988 
5989 		tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5990 		WREG32(DC_HPD1_INT_CONTROL, tmp);
5991 		tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5992 		WREG32(DC_HPD2_INT_CONTROL, tmp);
5993 		tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5994 		WREG32(DC_HPD3_INT_CONTROL, tmp);
5995 		tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5996 		WREG32(DC_HPD4_INT_CONTROL, tmp);
5997 		tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
5998 		WREG32(DC_HPD5_INT_CONTROL, tmp);
5999 		tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
6000 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6001 	}
6002 }
6003 
6004 static int si_irq_init(struct radeon_device *rdev)
6005 {
6006 	int ret = 0;
6007 	int rb_bufsz;
6008 	u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
6009 
6010 	/* allocate ring */
6011 	ret = r600_ih_ring_alloc(rdev);
6012 	if (ret)
6013 		return ret;
6014 
6015 	/* disable irqs */
6016 	si_disable_interrupts(rdev);
6017 
6018 	/* init rlc */
6019 	ret = si_rlc_resume(rdev);
6020 	if (ret) {
6021 		r600_ih_ring_fini(rdev);
6022 		return ret;
6023 	}
6024 
6025 	/* setup interrupt control */
6026 	/* set dummy read address to ring address */
6027 	WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
6028 	interrupt_cntl = RREG32(INTERRUPT_CNTL);
6029 	/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
6030 	 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
6031 	 */
6032 	interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
6033 	/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
6034 	interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
6035 	WREG32(INTERRUPT_CNTL, interrupt_cntl);
6036 
6037 	WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
6038 	rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
6039 
6040 	ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
6041 		      IH_WPTR_OVERFLOW_CLEAR |
6042 		      (rb_bufsz << 1));
6043 
6044 	if (rdev->wb.enabled)
6045 		ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
6046 
6047 	/* set the writeback address whether it's enabled or not */
6048 	WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
6049 	WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
6050 
6051 	WREG32(IH_RB_CNTL, ih_rb_cntl);
6052 
6053 	/* set rptr, wptr to 0 */
6054 	WREG32(IH_RB_RPTR, 0);
6055 	WREG32(IH_RB_WPTR, 0);
6056 
6057 	/* Default settings for IH_CNTL (disabled at first) */
6058 	ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
6059 	/* RPTR_REARM only works if msi's are enabled */
6060 	if (rdev->msi_enabled)
6061 		ih_cntl |= RPTR_REARM;
6062 	WREG32(IH_CNTL, ih_cntl);
6063 
6064 	/* force the active interrupt state to all disabled */
6065 	si_disable_interrupt_state(rdev);
6066 
6067 	pci_set_master(rdev->pdev);
6068 
6069 	/* enable irqs */
6070 	si_enable_interrupts(rdev);
6071 
6072 	return ret;
6073 }
6074 
6075 int si_irq_set(struct radeon_device *rdev)
6076 {
6077 	u32 cp_int_cntl;
6078 	u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
6079 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
6080 	u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
6081 	u32 grbm_int_cntl = 0;
6082 	u32 dma_cntl, dma_cntl1;
6083 	u32 thermal_int = 0;
6084 
6085 	if (!rdev->irq.installed) {
6086 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
6087 		return -EINVAL;
6088 	}
6089 	/* don't enable anything if the ih is disabled */
6090 	if (!rdev->ih.enabled) {
6091 		si_disable_interrupts(rdev);
6092 		/* force the active interrupt state to all disabled */
6093 		si_disable_interrupt_state(rdev);
6094 		return 0;
6095 	}
6096 
6097 	cp_int_cntl = RREG32(CP_INT_CNTL_RING0) &
6098 		(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
6099 
6100 	if (!ASIC_IS_NODCE(rdev)) {
6101 		hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6102 		hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6103 		hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6104 		hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6105 		hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6106 		hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN);
6107 	}
6108 
6109 	dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
6110 	dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
6111 
6112 	thermal_int = RREG32(CG_THERMAL_INT) &
6113 		~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6114 
6115 	/* enable CP interrupts on all rings */
6116 	if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
6117 		DRM_DEBUG("si_irq_set: sw int gfx\n");
6118 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
6119 	}
6120 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
6121 		DRM_DEBUG("si_irq_set: sw int cp1\n");
6122 		cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
6123 	}
6124 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
6125 		DRM_DEBUG("si_irq_set: sw int cp2\n");
6126 		cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
6127 	}
6128 	if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
6129 		DRM_DEBUG("si_irq_set: sw int dma\n");
6130 		dma_cntl |= TRAP_ENABLE;
6131 	}
6132 
6133 	if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
6134 		DRM_DEBUG("si_irq_set: sw int dma1\n");
6135 		dma_cntl1 |= TRAP_ENABLE;
6136 	}
6137 	if (rdev->irq.crtc_vblank_int[0] ||
6138 	    atomic_read(&rdev->irq.pflip[0])) {
6139 		DRM_DEBUG("si_irq_set: vblank 0\n");
6140 		crtc1 |= VBLANK_INT_MASK;
6141 	}
6142 	if (rdev->irq.crtc_vblank_int[1] ||
6143 	    atomic_read(&rdev->irq.pflip[1])) {
6144 		DRM_DEBUG("si_irq_set: vblank 1\n");
6145 		crtc2 |= VBLANK_INT_MASK;
6146 	}
6147 	if (rdev->irq.crtc_vblank_int[2] ||
6148 	    atomic_read(&rdev->irq.pflip[2])) {
6149 		DRM_DEBUG("si_irq_set: vblank 2\n");
6150 		crtc3 |= VBLANK_INT_MASK;
6151 	}
6152 	if (rdev->irq.crtc_vblank_int[3] ||
6153 	    atomic_read(&rdev->irq.pflip[3])) {
6154 		DRM_DEBUG("si_irq_set: vblank 3\n");
6155 		crtc4 |= VBLANK_INT_MASK;
6156 	}
6157 	if (rdev->irq.crtc_vblank_int[4] ||
6158 	    atomic_read(&rdev->irq.pflip[4])) {
6159 		DRM_DEBUG("si_irq_set: vblank 4\n");
6160 		crtc5 |= VBLANK_INT_MASK;
6161 	}
6162 	if (rdev->irq.crtc_vblank_int[5] ||
6163 	    atomic_read(&rdev->irq.pflip[5])) {
6164 		DRM_DEBUG("si_irq_set: vblank 5\n");
6165 		crtc6 |= VBLANK_INT_MASK;
6166 	}
6167 	if (rdev->irq.hpd[0]) {
6168 		DRM_DEBUG("si_irq_set: hpd 1\n");
6169 		hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6170 	}
6171 	if (rdev->irq.hpd[1]) {
6172 		DRM_DEBUG("si_irq_set: hpd 2\n");
6173 		hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6174 	}
6175 	if (rdev->irq.hpd[2]) {
6176 		DRM_DEBUG("si_irq_set: hpd 3\n");
6177 		hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6178 	}
6179 	if (rdev->irq.hpd[3]) {
6180 		DRM_DEBUG("si_irq_set: hpd 4\n");
6181 		hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6182 	}
6183 	if (rdev->irq.hpd[4]) {
6184 		DRM_DEBUG("si_irq_set: hpd 5\n");
6185 		hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6186 	}
6187 	if (rdev->irq.hpd[5]) {
6188 		DRM_DEBUG("si_irq_set: hpd 6\n");
6189 		hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN;
6190 	}
6191 
6192 	WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
6193 	WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
6194 	WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
6195 
6196 	WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
6197 	WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
6198 
6199 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
6200 
6201 	if (rdev->irq.dpm_thermal) {
6202 		DRM_DEBUG("dpm thermal\n");
6203 		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6204 	}
6205 
6206 	if (rdev->num_crtc >= 2) {
6207 		WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
6208 		WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
6209 	}
6210 	if (rdev->num_crtc >= 4) {
6211 		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
6212 		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
6213 	}
6214 	if (rdev->num_crtc >= 6) {
6215 		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
6216 		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
6217 	}
6218 
6219 	if (rdev->num_crtc >= 2) {
6220 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET,
6221 		       GRPH_PFLIP_INT_MASK);
6222 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET,
6223 		       GRPH_PFLIP_INT_MASK);
6224 	}
6225 	if (rdev->num_crtc >= 4) {
6226 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET,
6227 		       GRPH_PFLIP_INT_MASK);
6228 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET,
6229 		       GRPH_PFLIP_INT_MASK);
6230 	}
6231 	if (rdev->num_crtc >= 6) {
6232 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET,
6233 		       GRPH_PFLIP_INT_MASK);
6234 		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET,
6235 		       GRPH_PFLIP_INT_MASK);
6236 	}
6237 
6238 	if (!ASIC_IS_NODCE(rdev)) {
6239 		WREG32(DC_HPD1_INT_CONTROL, hpd1);
6240 		WREG32(DC_HPD2_INT_CONTROL, hpd2);
6241 		WREG32(DC_HPD3_INT_CONTROL, hpd3);
6242 		WREG32(DC_HPD4_INT_CONTROL, hpd4);
6243 		WREG32(DC_HPD5_INT_CONTROL, hpd5);
6244 		WREG32(DC_HPD6_INT_CONTROL, hpd6);
6245 	}
6246 
6247 	WREG32(CG_THERMAL_INT, thermal_int);
6248 
6249 	/* posting read */
6250 	RREG32(SRBM_STATUS);
6251 
6252 	return 0;
6253 }
6254 
6255 static inline void si_irq_ack(struct radeon_device *rdev)
6256 {
6257 	u32 tmp;
6258 
6259 	if (ASIC_IS_NODCE(rdev))
6260 		return;
6261 
6262 	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
6263 	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
6264 	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
6265 	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
6266 	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
6267 	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
6268 	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
6269 	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
6270 	if (rdev->num_crtc >= 4) {
6271 		rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
6272 		rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
6273 	}
6274 	if (rdev->num_crtc >= 6) {
6275 		rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
6276 		rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
6277 	}
6278 
6279 	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
6280 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6281 	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
6282 		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6283 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
6284 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
6285 	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
6286 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
6287 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
6288 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
6289 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
6290 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
6291 
6292 	if (rdev->num_crtc >= 4) {
6293 		if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
6294 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6295 		if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
6296 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6297 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
6298 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
6299 		if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
6300 			WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
6301 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
6302 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
6303 		if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
6304 			WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
6305 	}
6306 
6307 	if (rdev->num_crtc >= 6) {
6308 		if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
6309 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6310 		if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
6311 			WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
6312 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
6313 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
6314 		if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
6315 			WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
6316 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
6317 			WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
6318 		if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
6319 			WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
6320 	}
6321 
6322 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
6323 		tmp = RREG32(DC_HPD1_INT_CONTROL);
6324 		tmp |= DC_HPDx_INT_ACK;
6325 		WREG32(DC_HPD1_INT_CONTROL, tmp);
6326 	}
6327 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
6328 		tmp = RREG32(DC_HPD2_INT_CONTROL);
6329 		tmp |= DC_HPDx_INT_ACK;
6330 		WREG32(DC_HPD2_INT_CONTROL, tmp);
6331 	}
6332 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
6333 		tmp = RREG32(DC_HPD3_INT_CONTROL);
6334 		tmp |= DC_HPDx_INT_ACK;
6335 		WREG32(DC_HPD3_INT_CONTROL, tmp);
6336 	}
6337 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
6338 		tmp = RREG32(DC_HPD4_INT_CONTROL);
6339 		tmp |= DC_HPDx_INT_ACK;
6340 		WREG32(DC_HPD4_INT_CONTROL, tmp);
6341 	}
6342 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
6343 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6344 		tmp |= DC_HPDx_INT_ACK;
6345 		WREG32(DC_HPD5_INT_CONTROL, tmp);
6346 	}
6347 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
6348 		tmp = RREG32(DC_HPD6_INT_CONTROL);
6349 		tmp |= DC_HPDx_INT_ACK;
6350 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6351 	}
6352 
6353 	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
6354 		tmp = RREG32(DC_HPD1_INT_CONTROL);
6355 		tmp |= DC_HPDx_RX_INT_ACK;
6356 		WREG32(DC_HPD1_INT_CONTROL, tmp);
6357 	}
6358 	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
6359 		tmp = RREG32(DC_HPD2_INT_CONTROL);
6360 		tmp |= DC_HPDx_RX_INT_ACK;
6361 		WREG32(DC_HPD2_INT_CONTROL, tmp);
6362 	}
6363 	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
6364 		tmp = RREG32(DC_HPD3_INT_CONTROL);
6365 		tmp |= DC_HPDx_RX_INT_ACK;
6366 		WREG32(DC_HPD3_INT_CONTROL, tmp);
6367 	}
6368 	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
6369 		tmp = RREG32(DC_HPD4_INT_CONTROL);
6370 		tmp |= DC_HPDx_RX_INT_ACK;
6371 		WREG32(DC_HPD4_INT_CONTROL, tmp);
6372 	}
6373 	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
6374 		tmp = RREG32(DC_HPD5_INT_CONTROL);
6375 		tmp |= DC_HPDx_RX_INT_ACK;
6376 		WREG32(DC_HPD5_INT_CONTROL, tmp);
6377 	}
6378 	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
6379 		tmp = RREG32(DC_HPD6_INT_CONTROL);
6380 		tmp |= DC_HPDx_RX_INT_ACK;
6381 		WREG32(DC_HPD6_INT_CONTROL, tmp);
6382 	}
6383 }
6384 
6385 static void si_irq_disable(struct radeon_device *rdev)
6386 {
6387 	si_disable_interrupts(rdev);
6388 	/* Wait and acknowledge irq */
6389 	mdelay(1);
6390 	si_irq_ack(rdev);
6391 	si_disable_interrupt_state(rdev);
6392 }
6393 
6394 static void si_irq_suspend(struct radeon_device *rdev)
6395 {
6396 	si_irq_disable(rdev);
6397 	si_rlc_stop(rdev);
6398 }
6399 
6400 static void si_irq_fini(struct radeon_device *rdev)
6401 {
6402 	si_irq_suspend(rdev);
6403 	r600_ih_ring_fini(rdev);
6404 }
6405 
6406 static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
6407 {
6408 	u32 wptr, tmp;
6409 
6410 	if (rdev->wb.enabled)
6411 		wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
6412 	else
6413 		wptr = RREG32(IH_RB_WPTR);
6414 
6415 	if (wptr & RB_OVERFLOW) {
6416 		wptr &= ~RB_OVERFLOW;
6417 		/* When a ring buffer overflow happen start parsing interrupt
6418 		 * from the last not overwritten vector (wptr + 16). Hopefully
6419 		 * this should allow us to catchup.
6420 		 */
6421 		dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
6422 			 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
6423 		rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
6424 		tmp = RREG32(IH_RB_CNTL);
6425 		tmp |= IH_WPTR_OVERFLOW_CLEAR;
6426 		WREG32(IH_RB_CNTL, tmp);
6427 	}
6428 	return (wptr & rdev->ih.ptr_mask);
6429 }
6430 
6431 /*        SI IV Ring
6432  * Each IV ring entry is 128 bits:
6433  * [7:0]    - interrupt source id
6434  * [31:8]   - reserved
6435  * [59:32]  - interrupt source data
6436  * [63:60]  - reserved
6437  * [71:64]  - RINGID
6438  * [79:72]  - VMID
6439  * [127:80] - reserved
6440  */
6441 irqreturn_t si_irq_process(struct radeon_device *rdev)
6442 {
6443 	u32 wptr;
6444 	u32 rptr;
6445 	u32 src_id, src_data, ring_id;
6446 	u32 ring_index;
6447 	bool queue_hotplug = false;
6448 	bool queue_dp = false;
6449 	bool queue_thermal = false;
6450 	u32 status, addr;
6451 
6452 	if (!rdev->ih.enabled || rdev->shutdown)
6453 		return IRQ_NONE;
6454 
6455 	wptr = si_get_ih_wptr(rdev);
6456 
6457 restart_ih:
6458 	/* is somebody else already processing irqs? */
6459 	if (atomic_xchg(&rdev->ih.lock, 1))
6460 		return IRQ_NONE;
6461 
6462 	rptr = rdev->ih.rptr;
6463 	DRM_DEBUG_VBLANK("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
6464 
6465 	/* Order reading of wptr vs. reading of IH ring data */
6466 	rmb();
6467 
6468 	/* display interrupts */
6469 	si_irq_ack(rdev);
6470 
6471 	while (rptr != wptr) {
6472 		/* wptr/rptr are in bytes! */
6473 		ring_index = rptr / 4;
6474 		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
6475 		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
6476 		ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
6477 
6478 		switch (src_id) {
6479 		case 1: /* D1 vblank/vline */
6480 			switch (src_data) {
6481 			case 0: /* D1 vblank */
6482 				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6483 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 
6485 				if (rdev->irq.crtc_vblank_int[0]) {
6486 					drm_handle_vblank(rdev->ddev, 0);
6487 					rdev->pm.vblank_sync = true;
6488 					wake_up(&rdev->irq.vblank_queue);
6489 				}
6490 				if (atomic_read(&rdev->irq.pflip[0]))
6491 					radeon_crtc_handle_vblank(rdev, 0);
6492 				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6493 				DRM_DEBUG_VBLANK("IH: D1 vblank\n");
6494 
6495 				break;
6496 			case 1: /* D1 vline */
6497 				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6498 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6499 
6500 				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6501 				DRM_DEBUG_VBLANK("IH: D1 vline\n");
6502 
6503 				break;
6504 			default:
6505 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6506 				break;
6507 			}
6508 			break;
6509 		case 2: /* D2 vblank/vline */
6510 			switch (src_data) {
6511 			case 0: /* D2 vblank */
6512 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6513 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6514 
6515 				if (rdev->irq.crtc_vblank_int[1]) {
6516 					drm_handle_vblank(rdev->ddev, 1);
6517 					rdev->pm.vblank_sync = true;
6518 					wake_up(&rdev->irq.vblank_queue);
6519 				}
6520 				if (atomic_read(&rdev->irq.pflip[1]))
6521 					radeon_crtc_handle_vblank(rdev, 1);
6522 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6523 				DRM_DEBUG_VBLANK("IH: D2 vblank\n");
6524 
6525 				break;
6526 			case 1: /* D2 vline */
6527 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6528 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6529 
6530 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6531 				DRM_DEBUG_VBLANK("IH: D2 vline\n");
6532 
6533 				break;
6534 			default:
6535 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6536 				break;
6537 			}
6538 			break;
6539 		case 3: /* D3 vblank/vline */
6540 			switch (src_data) {
6541 			case 0: /* D3 vblank */
6542 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6543 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6544 
6545 				if (rdev->irq.crtc_vblank_int[2]) {
6546 					drm_handle_vblank(rdev->ddev, 2);
6547 					rdev->pm.vblank_sync = true;
6548 					wake_up(&rdev->irq.vblank_queue);
6549 				}
6550 				if (atomic_read(&rdev->irq.pflip[2]))
6551 					radeon_crtc_handle_vblank(rdev, 2);
6552 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6553 				DRM_DEBUG_VBLANK("IH: D3 vblank\n");
6554 
6555 				break;
6556 			case 1: /* D3 vline */
6557 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6558 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6559 
6560 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6561 				DRM_DEBUG_VBLANK("IH: D3 vline\n");
6562 
6563 				break;
6564 			default:
6565 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6566 				break;
6567 			}
6568 			break;
6569 		case 4: /* D4 vblank/vline */
6570 			switch (src_data) {
6571 			case 0: /* D4 vblank */
6572 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6573 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6574 
6575 				if (rdev->irq.crtc_vblank_int[3]) {
6576 					drm_handle_vblank(rdev->ddev, 3);
6577 					rdev->pm.vblank_sync = true;
6578 					wake_up(&rdev->irq.vblank_queue);
6579 				}
6580 				if (atomic_read(&rdev->irq.pflip[3]))
6581 					radeon_crtc_handle_vblank(rdev, 3);
6582 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6583 				DRM_DEBUG_VBLANK("IH: D4 vblank\n");
6584 
6585 				break;
6586 			case 1: /* D4 vline */
6587 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6588 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6589 
6590 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6591 				DRM_DEBUG_VBLANK("IH: D4 vline\n");
6592 
6593 				break;
6594 			default:
6595 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6596 				break;
6597 			}
6598 			break;
6599 		case 5: /* D5 vblank/vline */
6600 			switch (src_data) {
6601 			case 0: /* D5 vblank */
6602 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6603 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6604 
6605 				if (rdev->irq.crtc_vblank_int[4]) {
6606 					drm_handle_vblank(rdev->ddev, 4);
6607 					rdev->pm.vblank_sync = true;
6608 					wake_up(&rdev->irq.vblank_queue);
6609 				}
6610 				if (atomic_read(&rdev->irq.pflip[4]))
6611 					radeon_crtc_handle_vblank(rdev, 4);
6612 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6613 				DRM_DEBUG_VBLANK("IH: D5 vblank\n");
6614 
6615 				break;
6616 			case 1: /* D5 vline */
6617 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6618 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6619 
6620 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6621 				DRM_DEBUG_VBLANK("IH: D5 vline\n");
6622 
6623 				break;
6624 			default:
6625 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6626 				break;
6627 			}
6628 			break;
6629 		case 6: /* D6 vblank/vline */
6630 			switch (src_data) {
6631 			case 0: /* D6 vblank */
6632 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6633 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6634 
6635 				if (rdev->irq.crtc_vblank_int[5]) {
6636 					drm_handle_vblank(rdev->ddev, 5);
6637 					rdev->pm.vblank_sync = true;
6638 					wake_up(&rdev->irq.vblank_queue);
6639 				}
6640 				if (atomic_read(&rdev->irq.pflip[5]))
6641 					radeon_crtc_handle_vblank(rdev, 5);
6642 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6643 				DRM_DEBUG_VBLANK("IH: D6 vblank\n");
6644 
6645 				break;
6646 			case 1: /* D6 vline */
6647 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6648 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6649 
6650 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6651 				DRM_DEBUG_VBLANK("IH: D6 vline\n");
6652 
6653 				break;
6654 			default:
6655 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6656 				break;
6657 			}
6658 			break;
6659 		case 8: /* D1 page flip */
6660 		case 10: /* D2 page flip */
6661 		case 12: /* D3 page flip */
6662 		case 14: /* D4 page flip */
6663 		case 16: /* D5 page flip */
6664 		case 18: /* D6 page flip */
6665 			DRM_DEBUG_VBLANK("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
6666 			if (radeon_use_pflipirq > 0)
6667 				radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
6668 			break;
6669 		case 42: /* HPD hotplug */
6670 			switch (src_data) {
6671 			case 0:
6672 				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6673 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6674 
6675 				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6676 				queue_hotplug = true;
6677 				DRM_DEBUG("IH: HPD1\n");
6678 
6679 				break;
6680 			case 1:
6681 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6682 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6683 
6684 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6685 				queue_hotplug = true;
6686 				DRM_DEBUG("IH: HPD2\n");
6687 
6688 				break;
6689 			case 2:
6690 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6691 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6692 
6693 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6694 				queue_hotplug = true;
6695 				DRM_DEBUG("IH: HPD3\n");
6696 
6697 				break;
6698 			case 3:
6699 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6700 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6701 
6702 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6703 				queue_hotplug = true;
6704 				DRM_DEBUG("IH: HPD4\n");
6705 
6706 				break;
6707 			case 4:
6708 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6709 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6710 
6711 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6712 				queue_hotplug = true;
6713 				DRM_DEBUG("IH: HPD5\n");
6714 
6715 				break;
6716 			case 5:
6717 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6718 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6719 
6720 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6721 				queue_hotplug = true;
6722 				DRM_DEBUG("IH: HPD6\n");
6723 
6724 				break;
6725 			case 6:
6726 				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6727 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6728 
6729 				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6730 				queue_dp = true;
6731 				DRM_DEBUG("IH: HPD_RX 1\n");
6732 
6733 				break;
6734 			case 7:
6735 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6736 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6737 
6738 				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6739 				queue_dp = true;
6740 				DRM_DEBUG("IH: HPD_RX 2\n");
6741 
6742 				break;
6743 			case 8:
6744 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6745 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6746 
6747 				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6748 				queue_dp = true;
6749 				DRM_DEBUG("IH: HPD_RX 3\n");
6750 
6751 				break;
6752 			case 9:
6753 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6754 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6755 
6756 				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6757 				queue_dp = true;
6758 				DRM_DEBUG("IH: HPD_RX 4\n");
6759 
6760 				break;
6761 			case 10:
6762 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6763 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6764 
6765 				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6766 				queue_dp = true;
6767 				DRM_DEBUG("IH: HPD_RX 5\n");
6768 
6769 				break;
6770 			case 11:
6771 				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6772 					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6773 
6774 				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6775 				queue_dp = true;
6776 				DRM_DEBUG("IH: HPD_RX 6\n");
6777 
6778 				break;
6779 			default:
6780 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6781 				break;
6782 			}
6783 			break;
6784 		case 96:
6785 			DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR));
6786 			WREG32(SRBM_INT_ACK, 0x1);
6787 			break;
6788 		case 124: /* UVD */
6789 			DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
6790 			radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
6791 			break;
6792 		case 146:
6793 		case 147:
6794 			addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6795 			status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6796 			/* reset addr and status */
6797 			WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6798 			if (addr == 0x0 && status == 0x0)
6799 				break;
6800 			dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6801 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
6802 				addr);
6803 			dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6804 				status);
6805 			si_vm_decode_fault(rdev, status, addr);
6806 			break;
6807 		case 176: /* RINGID0 CP_INT */
6808 			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6809 			break;
6810 		case 177: /* RINGID1 CP_INT */
6811 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6812 			break;
6813 		case 178: /* RINGID2 CP_INT */
6814 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6815 			break;
6816 		case 181: /* CP EOP event */
6817 			DRM_DEBUG("IH: CP EOP\n");
6818 			switch (ring_id) {
6819 			case 0:
6820 				radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
6821 				break;
6822 			case 1:
6823 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
6824 				break;
6825 			case 2:
6826 				radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
6827 				break;
6828 			}
6829 			break;
6830 		case 224: /* DMA trap event */
6831 			DRM_DEBUG("IH: DMA trap\n");
6832 			radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
6833 			break;
6834 		case 230: /* thermal low to high */
6835 			DRM_DEBUG("IH: thermal low to high\n");
6836 			rdev->pm.dpm.thermal.high_to_low = false;
6837 			queue_thermal = true;
6838 			break;
6839 		case 231: /* thermal high to low */
6840 			DRM_DEBUG("IH: thermal high to low\n");
6841 			rdev->pm.dpm.thermal.high_to_low = true;
6842 			queue_thermal = true;
6843 			break;
6844 		case 233: /* GUI IDLE */
6845 			DRM_DEBUG("IH: GUI idle\n");
6846 			break;
6847 		case 244: /* DMA trap event */
6848 			DRM_DEBUG("IH: DMA1 trap\n");
6849 			radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
6850 			break;
6851 		default:
6852 			DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
6853 			break;
6854 		}
6855 
6856 		/* wptr/rptr are in bytes! */
6857 		rptr += 16;
6858 		rptr &= rdev->ih.ptr_mask;
6859 		WREG32(IH_RB_RPTR, rptr);
6860 	}
6861 	if (queue_dp)
6862 		schedule_work(&rdev->dp_work);
6863 	if (queue_hotplug)
6864 		schedule_delayed_work(&rdev->hotplug_work, 0);
6865 	if (queue_thermal && rdev->pm.dpm_enabled)
6866 		schedule_work(&rdev->pm.dpm.thermal.work);
6867 	rdev->ih.rptr = rptr;
6868 	atomic_set(&rdev->ih.lock, 0);
6869 
6870 	/* make sure wptr hasn't changed while processing */
6871 	wptr = si_get_ih_wptr(rdev);
6872 	if (wptr != rptr)
6873 		goto restart_ih;
6874 
6875 	return IRQ_HANDLED;
6876 }
6877 
6878 /*
6879  * startup/shutdown callbacks
6880  */
6881 static void si_uvd_init(struct radeon_device *rdev)
6882 {
6883 	int r;
6884 
6885 	if (!rdev->has_uvd)
6886 		return;
6887 
6888 	r = radeon_uvd_init(rdev);
6889 	if (r) {
6890 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
6891 		/*
6892 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
6893 		 * to early fails uvd_v2_2_resume() and thus nothing happens
6894 		 * there. So it is pointless to try to go through that code
6895 		 * hence why we disable uvd here.
6896 		 */
6897 		rdev->has_uvd = 0;
6898 		return;
6899 	}
6900 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
6901 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
6902 }
6903 
6904 static void si_uvd_start(struct radeon_device *rdev)
6905 {
6906 	int r;
6907 
6908 	if (!rdev->has_uvd)
6909 		return;
6910 
6911 	r = uvd_v2_2_resume(rdev);
6912 	if (r) {
6913 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
6914 		goto error;
6915 	}
6916 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
6917 	if (r) {
6918 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
6919 		goto error;
6920 	}
6921 	return;
6922 
6923 error:
6924 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
6925 }
6926 
6927 static void si_uvd_resume(struct radeon_device *rdev)
6928 {
6929 	struct radeon_ring *ring;
6930 	int r;
6931 
6932 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
6933 		return;
6934 
6935 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
6936 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
6937 	if (r) {
6938 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
6939 		return;
6940 	}
6941 	r = uvd_v1_0_init(rdev);
6942 	if (r) {
6943 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
6944 		return;
6945 	}
6946 }
6947 
6948 static void si_vce_init(struct radeon_device *rdev)
6949 {
6950 	int r;
6951 
6952 	if (!rdev->has_vce)
6953 		return;
6954 
6955 	r = radeon_vce_init(rdev);
6956 	if (r) {
6957 		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
6958 		/*
6959 		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
6960 		 * to early fails si_vce_start() and thus nothing happens
6961 		 * there. So it is pointless to try to go through that code
6962 		 * hence why we disable vce here.
6963 		 */
6964 		rdev->has_vce = 0;
6965 		return;
6966 	}
6967 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
6968 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
6969 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
6970 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
6971 }
6972 
6973 static void si_vce_start(struct radeon_device *rdev)
6974 {
6975 	int r;
6976 
6977 	if (!rdev->has_vce)
6978 		return;
6979 
6980 	r = radeon_vce_resume(rdev);
6981 	if (r) {
6982 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6983 		goto error;
6984 	}
6985 	r = vce_v1_0_resume(rdev);
6986 	if (r) {
6987 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
6988 		goto error;
6989 	}
6990 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
6991 	if (r) {
6992 		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
6993 		goto error;
6994 	}
6995 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
6996 	if (r) {
6997 		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
6998 		goto error;
6999 	}
7000 	return;
7001 
7002 error:
7003 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
7004 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
7005 }
7006 
7007 static void si_vce_resume(struct radeon_device *rdev)
7008 {
7009 	struct radeon_ring *ring;
7010 	int r;
7011 
7012 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
7013 		return;
7014 
7015 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
7016 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7017 	if (r) {
7018 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7019 		return;
7020 	}
7021 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
7022 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, VCE_CMD_NO_OP);
7023 	if (r) {
7024 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
7025 		return;
7026 	}
7027 	r = vce_v1_0_init(rdev);
7028 	if (r) {
7029 		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
7030 		return;
7031 	}
7032 }
7033 
7034 static int si_startup(struct radeon_device *rdev)
7035 {
7036 	struct radeon_ring *ring;
7037 	int r;
7038 
7039 	/* enable pcie gen2/3 link */
7040 	si_pcie_gen3_enable(rdev);
7041 	/* enable aspm */
7042 	si_program_aspm(rdev);
7043 
7044 	/* scratch needs to be initialized before MC */
7045 	r = r600_vram_scratch_init(rdev);
7046 	if (r)
7047 		return r;
7048 
7049 	si_mc_program(rdev);
7050 
7051 	if (!rdev->pm.dpm_enabled) {
7052 		r = si_mc_load_microcode(rdev);
7053 		if (r) {
7054 			DRM_ERROR("Failed to load MC firmware!\n");
7055 			return r;
7056 		}
7057 	}
7058 
7059 	r = si_pcie_gart_enable(rdev);
7060 	if (r)
7061 		return r;
7062 	si_gpu_init(rdev);
7063 
7064 	/* allocate rlc buffers */
7065 	if (rdev->family == CHIP_VERDE) {
7066 		rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
7067 		rdev->rlc.reg_list_size =
7068 			(u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
7069 	}
7070 	rdev->rlc.cs_data = si_cs_data;
7071 	r = sumo_rlc_init(rdev);
7072 	if (r) {
7073 		DRM_ERROR("Failed to init rlc BOs!\n");
7074 		return r;
7075 	}
7076 
7077 	/* allocate wb buffer */
7078 	r = radeon_wb_init(rdev);
7079 	if (r)
7080 		return r;
7081 
7082 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
7083 	if (r) {
7084 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7085 		return r;
7086 	}
7087 
7088 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
7089 	if (r) {
7090 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7091 		return r;
7092 	}
7093 
7094 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
7095 	if (r) {
7096 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
7097 		return r;
7098 	}
7099 
7100 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
7101 	if (r) {
7102 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7103 		return r;
7104 	}
7105 
7106 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
7107 	if (r) {
7108 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
7109 		return r;
7110 	}
7111 
7112 	si_uvd_start(rdev);
7113 	si_vce_start(rdev);
7114 
7115 	/* Enable IRQ */
7116 	if (!rdev->irq.installed) {
7117 		r = radeon_irq_kms_init(rdev);
7118 		if (r)
7119 			return r;
7120 	}
7121 
7122 	r = si_irq_init(rdev);
7123 	if (r) {
7124 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
7125 		radeon_irq_kms_fini(rdev);
7126 		return r;
7127 	}
7128 	si_irq_set(rdev);
7129 
7130 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7131 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
7132 			     RADEON_CP_PACKET2);
7133 	if (r)
7134 		return r;
7135 
7136 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7137 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
7138 			     RADEON_CP_PACKET2);
7139 	if (r)
7140 		return r;
7141 
7142 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7143 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
7144 			     RADEON_CP_PACKET2);
7145 	if (r)
7146 		return r;
7147 
7148 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7149 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
7150 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
7151 	if (r)
7152 		return r;
7153 
7154 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7155 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
7156 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
7157 	if (r)
7158 		return r;
7159 
7160 	r = si_cp_load_microcode(rdev);
7161 	if (r)
7162 		return r;
7163 	r = si_cp_resume(rdev);
7164 	if (r)
7165 		return r;
7166 
7167 	r = cayman_dma_resume(rdev);
7168 	if (r)
7169 		return r;
7170 
7171 	si_uvd_resume(rdev);
7172 	si_vce_resume(rdev);
7173 
7174 	r = radeon_ib_pool_init(rdev);
7175 	if (r) {
7176 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
7177 		return r;
7178 	}
7179 
7180 	r = radeon_vm_manager_init(rdev);
7181 	if (r) {
7182 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
7183 		return r;
7184 	}
7185 
7186 	r = radeon_audio_init(rdev);
7187 	if (r)
7188 		return r;
7189 
7190 	return 0;
7191 }
7192 
7193 int si_resume(struct radeon_device *rdev)
7194 {
7195 	int r;
7196 
7197 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
7198 	 * posting will perform necessary task to bring back GPU into good
7199 	 * shape.
7200 	 */
7201 	/* post card */
7202 	atom_asic_init(rdev->mode_info.atom_context);
7203 
7204 	/* init golden registers */
7205 	si_init_golden_registers(rdev);
7206 
7207 	if (rdev->pm.pm_method == PM_METHOD_DPM)
7208 		radeon_pm_resume(rdev);
7209 
7210 	rdev->accel_working = true;
7211 	r = si_startup(rdev);
7212 	if (r) {
7213 		DRM_ERROR("si startup failed on resume\n");
7214 		rdev->accel_working = false;
7215 		return r;
7216 	}
7217 
7218 	return r;
7219 
7220 }
7221 
7222 int si_suspend(struct radeon_device *rdev)
7223 {
7224 	radeon_pm_suspend(rdev);
7225 	radeon_audio_fini(rdev);
7226 	radeon_vm_manager_fini(rdev);
7227 	si_cp_enable(rdev, false);
7228 	cayman_dma_stop(rdev);
7229 	if (rdev->has_uvd) {
7230 		uvd_v1_0_fini(rdev);
7231 		radeon_uvd_suspend(rdev);
7232 	}
7233 	if (rdev->has_vce)
7234 		radeon_vce_suspend(rdev);
7235 	si_fini_pg(rdev);
7236 	si_fini_cg(rdev);
7237 	si_irq_suspend(rdev);
7238 	radeon_wb_disable(rdev);
7239 	si_pcie_gart_disable(rdev);
7240 	return 0;
7241 }
7242 
7243 /* Plan is to move initialization in that function and use
7244  * helper function so that radeon_device_init pretty much
7245  * do nothing more than calling asic specific function. This
7246  * should also allow to remove a bunch of callback function
7247  * like vram_info.
7248  */
7249 int si_init(struct radeon_device *rdev)
7250 {
7251 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7252 	int r;
7253 
7254 	/* Read BIOS */
7255 	if (!radeon_get_bios(rdev)) {
7256 		if (ASIC_IS_AVIVO(rdev))
7257 			return -EINVAL;
7258 	}
7259 	/* Must be an ATOMBIOS */
7260 	if (!rdev->is_atom_bios) {
7261 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
7262 		return -EINVAL;
7263 	}
7264 	r = radeon_atombios_init(rdev);
7265 	if (r)
7266 		return r;
7267 
7268 	/* Post card if necessary */
7269 	if (!radeon_card_posted(rdev)) {
7270 		if (!rdev->bios) {
7271 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
7272 			return -EINVAL;
7273 		}
7274 		DRM_INFO("GPU not posted. posting now...\n");
7275 		atom_asic_init(rdev->mode_info.atom_context);
7276 	}
7277 	/* init golden registers */
7278 	si_init_golden_registers(rdev);
7279 	/* Initialize scratch registers */
7280 	si_scratch_init(rdev);
7281 	/* Initialize surface registers */
7282 	radeon_surface_init(rdev);
7283 	/* Initialize clocks */
7284 	radeon_get_clock_info(rdev->ddev);
7285 
7286 	/* Fence driver */
7287 	r = radeon_fence_driver_init(rdev);
7288 	if (r)
7289 		return r;
7290 
7291 	/* initialize memory controller */
7292 	r = si_mc_init(rdev);
7293 	if (r)
7294 		return r;
7295 	/* Memory manager */
7296 	r = radeon_bo_init(rdev);
7297 	if (r)
7298 		return r;
7299 
7300 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
7301 	    !rdev->rlc_fw || !rdev->mc_fw) {
7302 		r = si_init_microcode(rdev);
7303 		if (r) {
7304 			DRM_ERROR("Failed to load firmware!\n");
7305 			return r;
7306 		}
7307 	}
7308 
7309 	/* Initialize power management */
7310 	radeon_pm_init(rdev);
7311 
7312 	ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
7313 	ring->ring_obj = NULL;
7314 	r600_ring_init(rdev, ring, 1024 * 1024);
7315 
7316 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
7317 	ring->ring_obj = NULL;
7318 	r600_ring_init(rdev, ring, 1024 * 1024);
7319 
7320 	ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
7321 	ring->ring_obj = NULL;
7322 	r600_ring_init(rdev, ring, 1024 * 1024);
7323 
7324 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
7325 	ring->ring_obj = NULL;
7326 	r600_ring_init(rdev, ring, 64 * 1024);
7327 
7328 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
7329 	ring->ring_obj = NULL;
7330 	r600_ring_init(rdev, ring, 64 * 1024);
7331 
7332 	si_uvd_init(rdev);
7333 	si_vce_init(rdev);
7334 
7335 	rdev->ih.ring_obj = NULL;
7336 	r600_ih_ring_init(rdev, 64 * 1024);
7337 
7338 	r = r600_pcie_gart_init(rdev);
7339 	if (r)
7340 		return r;
7341 
7342 #ifdef __DragonFly__
7343 	/*
7344 	   Some glx operations (xfce 4.14) hang on si hardware,
7345 	   tell userland acceleration is not working properly
7346 	*/
7347 	rdev->accel_working = false;
7348 	DRM_ERROR("GPU acceleration disabled for now on DragonFly\n");
7349 #else
7350 	rdev->accel_working = true;
7351 #endif
7352 	r = si_startup(rdev);
7353 	if (r) {
7354 		dev_err(rdev->dev, "disabling GPU acceleration\n");
7355 		si_cp_fini(rdev);
7356 		cayman_dma_fini(rdev);
7357 		si_irq_fini(rdev);
7358 		sumo_rlc_fini(rdev);
7359 		radeon_wb_fini(rdev);
7360 		radeon_ib_pool_fini(rdev);
7361 		radeon_vm_manager_fini(rdev);
7362 		radeon_irq_kms_fini(rdev);
7363 		si_pcie_gart_fini(rdev);
7364 		rdev->accel_working = false;
7365 	}
7366 
7367 	/* Don't start up if the MC ucode is missing.
7368 	 * The default clocks and voltages before the MC ucode
7369 	 * is loaded are not suffient for advanced operations.
7370 	 */
7371 	if (!rdev->mc_fw) {
7372 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
7373 		return -EINVAL;
7374 	}
7375 
7376 	return 0;
7377 }
7378 
7379 void si_fini(struct radeon_device *rdev)
7380 {
7381 	radeon_pm_fini(rdev);
7382 	si_cp_fini(rdev);
7383 	cayman_dma_fini(rdev);
7384 	si_fini_pg(rdev);
7385 	si_fini_cg(rdev);
7386 	si_irq_fini(rdev);
7387 	sumo_rlc_fini(rdev);
7388 	radeon_wb_fini(rdev);
7389 	radeon_vm_manager_fini(rdev);
7390 	radeon_ib_pool_fini(rdev);
7391 	radeon_irq_kms_fini(rdev);
7392 	if (rdev->has_uvd) {
7393 		uvd_v1_0_fini(rdev);
7394 		radeon_uvd_fini(rdev);
7395 	}
7396 	if (rdev->has_vce)
7397 		radeon_vce_fini(rdev);
7398 	si_pcie_gart_fini(rdev);
7399 	r600_vram_scratch_fini(rdev);
7400 	radeon_gem_fini(rdev);
7401 	radeon_fence_driver_fini(rdev);
7402 	radeon_bo_fini(rdev);
7403 	radeon_atombios_fini(rdev);
7404 	si_fini_microcode(rdev);
7405 	kfree(rdev->bios);
7406 	rdev->bios = NULL;
7407 }
7408 
7409 /**
7410  * si_get_gpu_clock_counter - return GPU clock counter snapshot
7411  *
7412  * @rdev: radeon_device pointer
7413  *
7414  * Fetches a GPU clock counter snapshot (SI).
7415  * Returns the 64 bit clock counter snapshot.
7416  */
7417 uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
7418 {
7419 	uint64_t clock;
7420 
7421 	mutex_lock(&rdev->gpu_clock_mutex);
7422 	WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
7423 	clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
7424 		((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
7425 	mutex_unlock(&rdev->gpu_clock_mutex);
7426 	return clock;
7427 }
7428 
7429 int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
7430 {
7431 	unsigned fb_div = 0, vclk_div = 0, dclk_div = 0;
7432 	int r;
7433 
7434 	/* bypass vclk and dclk with bclk */
7435 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7436 		VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
7437 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7438 
7439 	/* put PLL in bypass mode */
7440 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK);
7441 
7442 	if (!vclk || !dclk) {
7443 		/* keep the Bypass mode */
7444 		return 0;
7445 	}
7446 
7447 	r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 125000, 250000,
7448 					  16384, 0x03FFFFFF, 0, 128, 5,
7449 					  &fb_div, &vclk_div, &dclk_div);
7450 	if (r)
7451 		return r;
7452 
7453 	/* set RESET_ANTI_MUX to 0 */
7454 	WREG32_P(CG_UPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7455 
7456 	/* set VCO_MODE to 1 */
7457 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK);
7458 
7459 	/* disable sleep mode */
7460 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK);
7461 
7462 	/* deassert UPLL_RESET */
7463 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7464 
7465 	mdelay(1);
7466 
7467 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7468 	if (r)
7469 		return r;
7470 
7471 	/* assert UPLL_RESET again */
7472 	WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK);
7473 
7474 	/* disable spread spectrum. */
7475 	WREG32_P(CG_UPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7476 
7477 	/* set feedback divider */
7478 	WREG32_P(CG_UPLL_FUNC_CNTL_3, UPLL_FB_DIV(fb_div), ~UPLL_FB_DIV_MASK);
7479 
7480 	/* set ref divider to 0 */
7481 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_REF_DIV_MASK);
7482 
7483 	if (fb_div < 307200)
7484 		WREG32_P(CG_UPLL_FUNC_CNTL_4, 0, ~UPLL_SPARE_ISPARE9);
7485 	else
7486 		WREG32_P(CG_UPLL_FUNC_CNTL_4, UPLL_SPARE_ISPARE9, ~UPLL_SPARE_ISPARE9);
7487 
7488 	/* set PDIV_A and PDIV_B */
7489 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7490 		UPLL_PDIV_A(vclk_div) | UPLL_PDIV_B(dclk_div),
7491 		~(UPLL_PDIV_A_MASK | UPLL_PDIV_B_MASK));
7492 
7493 	/* give the PLL some time to settle */
7494 	mdelay(15);
7495 
7496 	/* deassert PLL_RESET */
7497 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK);
7498 
7499 	mdelay(15);
7500 
7501 	/* switch from bypass mode to normal mode */
7502 	WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK);
7503 
7504 	r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL);
7505 	if (r)
7506 		return r;
7507 
7508 	/* switch VCLK and DCLK selection */
7509 	WREG32_P(CG_UPLL_FUNC_CNTL_2,
7510 		VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
7511 		~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK));
7512 
7513 	mdelay(100);
7514 
7515 	return 0;
7516 }
7517 
7518 static void si_pcie_gen3_enable(struct radeon_device *rdev)
7519 {
7520 	struct pci_dev *root = rdev->pdev->bus->self;
7521 	int bridge_pos, gpu_pos;
7522 	u32 speed_cntl, mask, current_data_rate;
7523 	int ret, i;
7524 	u16 tmp16;
7525 
7526 #if 0
7527 	if (pci_is_root_bus(rdev->pdev->bus))
7528 		return;
7529 #endif
7530 
7531 	if (radeon_pcie_gen2 == 0)
7532 		return;
7533 
7534 	if (rdev->flags & RADEON_IS_IGP)
7535 		return;
7536 
7537 	if (!(rdev->flags & RADEON_IS_PCIE))
7538 		return;
7539 
7540 	ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
7541 	if (ret != 0)
7542 		return;
7543 
7544 	if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
7545 		return;
7546 
7547 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7548 	current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
7549 		LC_CURRENT_DATA_RATE_SHIFT;
7550 	if (mask & DRM_PCIE_SPEED_80) {
7551 		if (current_data_rate == 2) {
7552 			DRM_INFO("PCIE gen 3 link speeds already enabled\n");
7553 			return;
7554 		}
7555 		DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
7556 	} else if (mask & DRM_PCIE_SPEED_50) {
7557 		if (current_data_rate == 1) {
7558 			DRM_INFO("PCIE gen 2 link speeds already enabled\n");
7559 			return;
7560 		}
7561 		DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
7562 	}
7563 
7564 	bridge_pos = pci_pcie_cap(root);
7565 	if (!bridge_pos)
7566 		return;
7567 
7568 	gpu_pos = pci_pcie_cap(rdev->pdev);
7569 	if (!gpu_pos)
7570 		return;
7571 
7572 	if (mask & DRM_PCIE_SPEED_80) {
7573 		/* re-try equalization if gen3 is not already enabled */
7574 		if (current_data_rate != 2) {
7575 			u16 bridge_cfg, gpu_cfg;
7576 			u16 bridge_cfg2, gpu_cfg2;
7577 			u32 max_lw, current_lw, tmp;
7578 
7579 			pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7580 			pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7581 
7582 			tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
7583 			pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7584 
7585 			tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
7586 			pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7587 
7588 			tmp = RREG32_PCIE(PCIE_LC_STATUS1);
7589 			max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
7590 			current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
7591 
7592 			if (current_lw < max_lw) {
7593 				tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7594 				if (tmp & LC_RENEGOTIATION_SUPPORT) {
7595 					tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
7596 					tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
7597 					tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
7598 					WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
7599 				}
7600 			}
7601 
7602 			for (i = 0; i < 10; i++) {
7603 				/* check status */
7604 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
7605 				if (tmp16 & PCI_EXP_DEVSTA_TRPND)
7606 					break;
7607 
7608 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
7609 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
7610 
7611 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
7612 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
7613 
7614 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7615 				tmp |= LC_SET_QUIESCE;
7616 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7617 
7618 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7619 				tmp |= LC_REDO_EQ;
7620 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7621 
7622 				mdelay(100);
7623 
7624 				/* linkctl */
7625 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
7626 				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7627 				tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
7628 				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
7629 
7630 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
7631 				tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
7632 				tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
7633 				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
7634 
7635 				/* linkctl2 */
7636 				pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
7637 				tmp16 &= ~((1 << 4) | (7 << 9));
7638 				tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
7639 				pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
7640 
7641 				pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7642 				tmp16 &= ~((1 << 4) | (7 << 9));
7643 				tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
7644 				pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7645 
7646 				tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
7647 				tmp &= ~LC_SET_QUIESCE;
7648 				WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
7649 			}
7650 		}
7651 	}
7652 
7653 	/* set the link speed */
7654 	speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
7655 	speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
7656 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7657 
7658 	pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
7659 	tmp16 &= ~0xf;
7660 	if (mask & DRM_PCIE_SPEED_80)
7661 		tmp16 |= 3; /* gen3 */
7662 	else if (mask & DRM_PCIE_SPEED_50)
7663 		tmp16 |= 2; /* gen2 */
7664 	else
7665 		tmp16 |= 1; /* gen1 */
7666 	pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
7667 
7668 	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7669 	speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
7670 	WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
7671 
7672 	for (i = 0; i < rdev->usec_timeout; i++) {
7673 		speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
7674 		if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
7675 			break;
7676 		udelay(1);
7677 	}
7678 }
7679 
7680 static void si_program_aspm(struct radeon_device *rdev)
7681 {
7682 	u32 data, orig;
7683 	bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
7684 #if 0
7685 	bool disable_clkreq = false;
7686 #endif
7687 
7688 	if (radeon_aspm == 0)
7689 		return;
7690 
7691 	if (!(rdev->flags & RADEON_IS_PCIE))
7692 		return;
7693 
7694 	orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7695 	data &= ~LC_XMIT_N_FTS_MASK;
7696 	data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
7697 	if (orig != data)
7698 		WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
7699 
7700 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
7701 	data |= LC_GO_TO_RECOVERY;
7702 	if (orig != data)
7703 		WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
7704 
7705 	orig = data = RREG32_PCIE(PCIE_P_CNTL);
7706 	data |= P_IGNORE_EDB_ERR;
7707 	if (orig != data)
7708 		WREG32_PCIE(PCIE_P_CNTL, data);
7709 
7710 	orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7711 	data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
7712 	data |= LC_PMI_TO_L1_DIS;
7713 	if (!disable_l0s)
7714 		data |= LC_L0S_INACTIVITY(7);
7715 
7716 	if (!disable_l1) {
7717 		data |= LC_L1_INACTIVITY(7);
7718 		data &= ~LC_PMI_TO_L1_DIS;
7719 		if (orig != data)
7720 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7721 
7722 		if (!disable_plloff_in_l1) {
7723 			bool clk_req_support;
7724 
7725 			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7726 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7727 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7728 			if (orig != data)
7729 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7730 
7731 			orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7732 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7733 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7734 			if (orig != data)
7735 				WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7736 
7737 			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7738 			data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
7739 			data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
7740 			if (orig != data)
7741 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7742 
7743 			orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7744 			data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
7745 			data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
7746 			if (orig != data)
7747 				WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7748 
7749 			if ((rdev->family != CHIP_OLAND) && (rdev->family != CHIP_HAINAN)) {
7750 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0);
7751 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7752 				if (orig != data)
7753 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_0, data);
7754 
7755 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1);
7756 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7757 				if (orig != data)
7758 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_1, data);
7759 
7760 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2);
7761 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7762 				if (orig != data)
7763 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_2, data);
7764 
7765 				orig = data = RREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3);
7766 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7767 				if (orig != data)
7768 					WREG32_PIF_PHY0(PB0_PIF_PWRDOWN_3, data);
7769 
7770 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0);
7771 				data &= ~PLL_RAMP_UP_TIME_0_MASK;
7772 				if (orig != data)
7773 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_0, data);
7774 
7775 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1);
7776 				data &= ~PLL_RAMP_UP_TIME_1_MASK;
7777 				if (orig != data)
7778 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_1, data);
7779 
7780 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2);
7781 				data &= ~PLL_RAMP_UP_TIME_2_MASK;
7782 				if (orig != data)
7783 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_2, data);
7784 
7785 				orig = data = RREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3);
7786 				data &= ~PLL_RAMP_UP_TIME_3_MASK;
7787 				if (orig != data)
7788 					WREG32_PIF_PHY1(PB1_PIF_PWRDOWN_3, data);
7789 			}
7790 			orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
7791 			data &= ~LC_DYN_LANES_PWR_STATE_MASK;
7792 			data |= LC_DYN_LANES_PWR_STATE(3);
7793 			if (orig != data)
7794 				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
7795 
7796 			orig = data = RREG32_PIF_PHY0(PB0_PIF_CNTL);
7797 			data &= ~LS2_EXIT_TIME_MASK;
7798 			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7799 				data |= LS2_EXIT_TIME(5);
7800 			if (orig != data)
7801 				WREG32_PIF_PHY0(PB0_PIF_CNTL, data);
7802 
7803 			orig = data = RREG32_PIF_PHY1(PB1_PIF_CNTL);
7804 			data &= ~LS2_EXIT_TIME_MASK;
7805 			if ((rdev->family == CHIP_OLAND) || (rdev->family == CHIP_HAINAN))
7806 				data |= LS2_EXIT_TIME(5);
7807 			if (orig != data)
7808 				WREG32_PIF_PHY1(PB1_PIF_CNTL, data);
7809 
7810 #ifdef zMN_TODO
7811 			if (!disable_clkreq &&
7812 			    !pci_is_root_bus(rdev->pdev->bus)) {
7813 				struct pci_dev *root = rdev->pdev->bus->self;
7814 				u32 lnkcap;
7815 
7816 				clk_req_support = false;
7817 				pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
7818 				if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
7819 					clk_req_support = true;
7820 			} else {
7821 				clk_req_support = false;
7822 			}
7823 #else
7824 			clk_req_support = false;
7825 #endif
7826 
7827 			if (clk_req_support) {
7828 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
7829 				data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
7830 				if (orig != data)
7831 					WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
7832 
7833 				orig = data = RREG32(THM_CLK_CNTL);
7834 				data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
7835 				data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
7836 				if (orig != data)
7837 					WREG32(THM_CLK_CNTL, data);
7838 
7839 				orig = data = RREG32(MISC_CLK_CNTL);
7840 				data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
7841 				data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
7842 				if (orig != data)
7843 					WREG32(MISC_CLK_CNTL, data);
7844 
7845 				orig = data = RREG32(CG_CLKPIN_CNTL);
7846 				data &= ~BCLK_AS_XCLK;
7847 				if (orig != data)
7848 					WREG32(CG_CLKPIN_CNTL, data);
7849 
7850 				orig = data = RREG32(CG_CLKPIN_CNTL_2);
7851 				data &= ~FORCE_BIF_REFCLK_EN;
7852 				if (orig != data)
7853 					WREG32(CG_CLKPIN_CNTL_2, data);
7854 
7855 				orig = data = RREG32(MPLL_BYPASSCLK_SEL);
7856 				data &= ~MPLL_CLKOUT_SEL_MASK;
7857 				data |= MPLL_CLKOUT_SEL(4);
7858 				if (orig != data)
7859 					WREG32(MPLL_BYPASSCLK_SEL, data);
7860 
7861 				orig = data = RREG32(SPLL_CNTL_MODE);
7862 				data &= ~SPLL_REFCLK_SEL_MASK;
7863 				if (orig != data)
7864 					WREG32(SPLL_CNTL_MODE, data);
7865 			}
7866 		}
7867 	} else {
7868 		if (orig != data)
7869 			WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7870 	}
7871 
7872 	orig = data = RREG32_PCIE(PCIE_CNTL2);
7873 	data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
7874 	if (orig != data)
7875 		WREG32_PCIE(PCIE_CNTL2, data);
7876 
7877 	if (!disable_l0s) {
7878 		data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
7879 		if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
7880 			data = RREG32_PCIE(PCIE_LC_STATUS1);
7881 			if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
7882 				orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
7883 				data &= ~LC_L0S_INACTIVITY_MASK;
7884 				if (orig != data)
7885 					WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
7886 			}
7887 		}
7888 	}
7889 }
7890 
7891 static int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev)
7892 {
7893 	unsigned i;
7894 
7895 	/* make sure VCEPLL_CTLREQ is deasserted */
7896 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7897 
7898 	mdelay(10);
7899 
7900 	/* assert UPLL_CTLREQ */
7901 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
7902 
7903 	/* wait for CTLACK and CTLACK2 to get asserted */
7904 	for (i = 0; i < 100; ++i) {
7905 		uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
7906 		if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask)
7907 			break;
7908 		mdelay(10);
7909 	}
7910 
7911 	/* deassert UPLL_CTLREQ */
7912 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK);
7913 
7914 	if (i == 100) {
7915 		DRM_ERROR("Timeout setting UVD clocks!\n");
7916 		return -ETIMEDOUT;
7917 	}
7918 
7919 	return 0;
7920 }
7921 
7922 int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
7923 {
7924 	unsigned fb_div = 0, evclk_div = 0, ecclk_div = 0;
7925 	int r;
7926 
7927 	/* bypass evclk and ecclk with bclk */
7928 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7929 		     EVCLK_SRC_SEL(1) | ECCLK_SRC_SEL(1),
7930 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
7931 
7932 	/* put PLL in bypass mode */
7933 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_BYPASS_EN_MASK,
7934 		     ~VCEPLL_BYPASS_EN_MASK);
7935 
7936 	if (!evclk || !ecclk) {
7937 		/* keep the Bypass mode, put PLL to sleep */
7938 		WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7939 			     ~VCEPLL_SLEEP_MASK);
7940 		return 0;
7941 	}
7942 
7943 	r = radeon_uvd_calc_upll_dividers(rdev, evclk, ecclk, 125000, 250000,
7944 					  16384, 0x03FFFFFF, 0, 128, 5,
7945 					  &fb_div, &evclk_div, &ecclk_div);
7946 	if (r)
7947 		return r;
7948 
7949 	/* set RESET_ANTI_MUX to 0 */
7950 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_5, 0, ~RESET_ANTI_MUX_MASK);
7951 
7952 	/* set VCO_MODE to 1 */
7953 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_VCO_MODE_MASK,
7954 		     ~VCEPLL_VCO_MODE_MASK);
7955 
7956 	/* toggle VCEPLL_SLEEP to 1 then back to 0 */
7957 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
7958 		     ~VCEPLL_SLEEP_MASK);
7959 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_SLEEP_MASK);
7960 
7961 	/* deassert VCEPLL_RESET */
7962 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7963 
7964 	mdelay(1);
7965 
7966 	r = si_vce_send_vcepll_ctlreq(rdev);
7967 	if (r)
7968 		return r;
7969 
7970 	/* assert VCEPLL_RESET again */
7971 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_RESET_MASK, ~VCEPLL_RESET_MASK);
7972 
7973 	/* disable spread spectrum. */
7974 	WREG32_SMC_P(CG_VCEPLL_SPREAD_SPECTRUM, 0, ~SSEN_MASK);
7975 
7976 	/* set feedback divider */
7977 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_3, VCEPLL_FB_DIV(fb_div), ~VCEPLL_FB_DIV_MASK);
7978 
7979 	/* set ref divider to 0 */
7980 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_REF_DIV_MASK);
7981 
7982 	/* set PDIV_A and PDIV_B */
7983 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
7984 		     VCEPLL_PDIV_A(evclk_div) | VCEPLL_PDIV_B(ecclk_div),
7985 		     ~(VCEPLL_PDIV_A_MASK | VCEPLL_PDIV_B_MASK));
7986 
7987 	/* give the PLL some time to settle */
7988 	mdelay(15);
7989 
7990 	/* deassert PLL_RESET */
7991 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_RESET_MASK);
7992 
7993 	mdelay(15);
7994 
7995 	/* switch from bypass mode to normal mode */
7996 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~VCEPLL_BYPASS_EN_MASK);
7997 
7998 	r = si_vce_send_vcepll_ctlreq(rdev);
7999 	if (r)
8000 		return r;
8001 
8002 	/* switch VCLK and DCLK selection */
8003 	WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL_2,
8004 		     EVCLK_SRC_SEL(16) | ECCLK_SRC_SEL(16),
8005 		     ~(EVCLK_SRC_SEL_MASK | ECCLK_SRC_SEL_MASK));
8006 
8007 	mdelay(100);
8008 
8009 	return 0;
8010 }
8011