xref: /linux/drivers/gpu/drm/radeon/ni.c (revision fb1b5e1d)
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 
30 #include <drm/radeon_drm.h>
31 
32 #include "atom.h"
33 #include "cayman_blit_shaders.h"
34 #include "clearstate_cayman.h"
35 #include "evergreen.h"
36 #include "ni.h"
37 #include "ni_reg.h"
38 #include "nid.h"
39 #include "radeon.h"
40 #include "radeon_asic.h"
41 #include "radeon_audio.h"
42 #include "radeon_ucode.h"
43 
44 /*
45  * Indirect registers accessor
46  */
tn_smc_rreg(struct radeon_device * rdev,u32 reg)47 u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg)
48 {
49 	unsigned long flags;
50 	u32 r;
51 
52 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
53 	WREG32(TN_SMC_IND_INDEX_0, (reg));
54 	r = RREG32(TN_SMC_IND_DATA_0);
55 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
56 	return r;
57 }
58 
tn_smc_wreg(struct radeon_device * rdev,u32 reg,u32 v)59 void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v)
60 {
61 	unsigned long flags;
62 
63 	spin_lock_irqsave(&rdev->smc_idx_lock, flags);
64 	WREG32(TN_SMC_IND_INDEX_0, (reg));
65 	WREG32(TN_SMC_IND_DATA_0, (v));
66 	spin_unlock_irqrestore(&rdev->smc_idx_lock, flags);
67 }
68 
69 static const u32 tn_rlc_save_restore_register_list[] = {
70 	0x98fc,
71 	0x98f0,
72 	0x9834,
73 	0x9838,
74 	0x9870,
75 	0x9874,
76 	0x8a14,
77 	0x8b24,
78 	0x8bcc,
79 	0x8b10,
80 	0x8c30,
81 	0x8d00,
82 	0x8d04,
83 	0x8c00,
84 	0x8c04,
85 	0x8c10,
86 	0x8c14,
87 	0x8d8c,
88 	0x8cf0,
89 	0x8e38,
90 	0x9508,
91 	0x9688,
92 	0x9608,
93 	0x960c,
94 	0x9610,
95 	0x9614,
96 	0x88c4,
97 	0x8978,
98 	0x88d4,
99 	0x900c,
100 	0x9100,
101 	0x913c,
102 	0x90e8,
103 	0x9354,
104 	0xa008,
105 	0x98f8,
106 	0x9148,
107 	0x914c,
108 	0x3f94,
109 	0x98f4,
110 	0x9b7c,
111 	0x3f8c,
112 	0x8950,
113 	0x8954,
114 	0x8a18,
115 	0x8b28,
116 	0x9144,
117 	0x3f90,
118 	0x915c,
119 	0x9160,
120 	0x9178,
121 	0x917c,
122 	0x9180,
123 	0x918c,
124 	0x9190,
125 	0x9194,
126 	0x9198,
127 	0x919c,
128 	0x91a8,
129 	0x91ac,
130 	0x91b0,
131 	0x91b4,
132 	0x91b8,
133 	0x91c4,
134 	0x91c8,
135 	0x91cc,
136 	0x91d0,
137 	0x91d4,
138 	0x91e0,
139 	0x91e4,
140 	0x91ec,
141 	0x91f0,
142 	0x91f4,
143 	0x9200,
144 	0x9204,
145 	0x929c,
146 	0x8030,
147 	0x9150,
148 	0x9a60,
149 	0x920c,
150 	0x9210,
151 	0x9228,
152 	0x922c,
153 	0x9244,
154 	0x9248,
155 	0x91e8,
156 	0x9294,
157 	0x9208,
158 	0x9224,
159 	0x9240,
160 	0x9220,
161 	0x923c,
162 	0x9258,
163 	0x9744,
164 	0xa200,
165 	0xa204,
166 	0xa208,
167 	0xa20c,
168 	0x8d58,
169 	0x9030,
170 	0x9034,
171 	0x9038,
172 	0x903c,
173 	0x9040,
174 	0x9654,
175 	0x897c,
176 	0xa210,
177 	0xa214,
178 	0x9868,
179 	0xa02c,
180 	0x9664,
181 	0x9698,
182 	0x949c,
183 	0x8e10,
184 	0x8e18,
185 	0x8c50,
186 	0x8c58,
187 	0x8c60,
188 	0x8c68,
189 	0x89b4,
190 	0x9830,
191 	0x802c,
192 };
193 
194 /* Firmware Names */
195 MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
196 MODULE_FIRMWARE("radeon/BARTS_me.bin");
197 MODULE_FIRMWARE("radeon/BARTS_mc.bin");
198 MODULE_FIRMWARE("radeon/BARTS_smc.bin");
199 MODULE_FIRMWARE("radeon/BTC_rlc.bin");
200 MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
201 MODULE_FIRMWARE("radeon/TURKS_me.bin");
202 MODULE_FIRMWARE("radeon/TURKS_mc.bin");
203 MODULE_FIRMWARE("radeon/TURKS_smc.bin");
204 MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
205 MODULE_FIRMWARE("radeon/CAICOS_me.bin");
206 MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
207 MODULE_FIRMWARE("radeon/CAICOS_smc.bin");
208 MODULE_FIRMWARE("radeon/CAYMAN_pfp.bin");
209 MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
210 MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
211 MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
212 MODULE_FIRMWARE("radeon/CAYMAN_smc.bin");
213 MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
214 MODULE_FIRMWARE("radeon/ARUBA_me.bin");
215 MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
216 
217 
218 static const u32 cayman_golden_registers2[] = {
219 	0x3e5c, 0xffffffff, 0x00000000,
220 	0x3e48, 0xffffffff, 0x00000000,
221 	0x3e4c, 0xffffffff, 0x00000000,
222 	0x3e64, 0xffffffff, 0x00000000,
223 	0x3e50, 0xffffffff, 0x00000000,
224 	0x3e60, 0xffffffff, 0x00000000
225 };
226 
227 static const u32 cayman_golden_registers[] = {
228 	0x5eb4, 0xffffffff, 0x00000002,
229 	0x5e78, 0x8f311ff1, 0x001000f0,
230 	0x3f90, 0xffff0000, 0xff000000,
231 	0x9148, 0xffff0000, 0xff000000,
232 	0x3f94, 0xffff0000, 0xff000000,
233 	0x914c, 0xffff0000, 0xff000000,
234 	0xc78, 0x00000080, 0x00000080,
235 	0xbd4, 0x70073777, 0x00011003,
236 	0xd02c, 0xbfffff1f, 0x08421000,
237 	0xd0b8, 0x73773777, 0x02011003,
238 	0x5bc0, 0x00200000, 0x50100000,
239 	0x98f8, 0x33773777, 0x02011003,
240 	0x98fc, 0xffffffff, 0x76541032,
241 	0x7030, 0x31000311, 0x00000011,
242 	0x2f48, 0x33773777, 0x42010001,
243 	0x6b28, 0x00000010, 0x00000012,
244 	0x7728, 0x00000010, 0x00000012,
245 	0x10328, 0x00000010, 0x00000012,
246 	0x10f28, 0x00000010, 0x00000012,
247 	0x11b28, 0x00000010, 0x00000012,
248 	0x12728, 0x00000010, 0x00000012,
249 	0x240c, 0x000007ff, 0x00000000,
250 	0x8a14, 0xf000001f, 0x00000007,
251 	0x8b24, 0x3fff3fff, 0x00ff0fff,
252 	0x8b10, 0x0000ff0f, 0x00000000,
253 	0x28a4c, 0x07ffffff, 0x06000000,
254 	0x10c, 0x00000001, 0x00010003,
255 	0xa02c, 0xffffffff, 0x0000009b,
256 	0x913c, 0x0000010f, 0x01000100,
257 	0x8c04, 0xf8ff00ff, 0x40600060,
258 	0x28350, 0x00000f01, 0x00000000,
259 	0x9508, 0x3700001f, 0x00000002,
260 	0x960c, 0xffffffff, 0x54763210,
261 	0x88c4, 0x001f3ae3, 0x00000082,
262 	0x88d0, 0xffffffff, 0x0f40df40,
263 	0x88d4, 0x0000001f, 0x00000010,
264 	0x8974, 0xffffffff, 0x00000000
265 };
266 
267 static const u32 dvst_golden_registers2[] = {
268 	0x8f8, 0xffffffff, 0,
269 	0x8fc, 0x00380000, 0,
270 	0x8f8, 0xffffffff, 1,
271 	0x8fc, 0x0e000000, 0
272 };
273 
274 static const u32 dvst_golden_registers[] = {
275 	0x690, 0x3fff3fff, 0x20c00033,
276 	0x918c, 0x0fff0fff, 0x00010006,
277 	0x91a8, 0x0fff0fff, 0x00010006,
278 	0x9150, 0xffffdfff, 0x6e944040,
279 	0x917c, 0x0fff0fff, 0x00030002,
280 	0x9198, 0x0fff0fff, 0x00030002,
281 	0x915c, 0x0fff0fff, 0x00010000,
282 	0x3f90, 0xffff0001, 0xff000000,
283 	0x9178, 0x0fff0fff, 0x00070000,
284 	0x9194, 0x0fff0fff, 0x00070000,
285 	0x9148, 0xffff0001, 0xff000000,
286 	0x9190, 0x0fff0fff, 0x00090008,
287 	0x91ac, 0x0fff0fff, 0x00090008,
288 	0x3f94, 0xffff0000, 0xff000000,
289 	0x914c, 0xffff0000, 0xff000000,
290 	0x929c, 0x00000fff, 0x00000001,
291 	0x55e4, 0xff607fff, 0xfc000100,
292 	0x8a18, 0xff000fff, 0x00000100,
293 	0x8b28, 0xff000fff, 0x00000100,
294 	0x9144, 0xfffc0fff, 0x00000100,
295 	0x6ed8, 0x00010101, 0x00010000,
296 	0x9830, 0xffffffff, 0x00000000,
297 	0x9834, 0xf00fffff, 0x00000400,
298 	0x9838, 0xfffffffe, 0x00000000,
299 	0xd0c0, 0xff000fff, 0x00000100,
300 	0xd02c, 0xbfffff1f, 0x08421000,
301 	0xd0b8, 0x73773777, 0x12010001,
302 	0x5bb0, 0x000000f0, 0x00000070,
303 	0x98f8, 0x73773777, 0x12010001,
304 	0x98fc, 0xffffffff, 0x00000010,
305 	0x9b7c, 0x00ff0000, 0x00fc0000,
306 	0x8030, 0x00001f0f, 0x0000100a,
307 	0x2f48, 0x73773777, 0x12010001,
308 	0x2408, 0x00030000, 0x000c007f,
309 	0x8a14, 0xf000003f, 0x00000007,
310 	0x8b24, 0x3fff3fff, 0x00ff0fff,
311 	0x8b10, 0x0000ff0f, 0x00000000,
312 	0x28a4c, 0x07ffffff, 0x06000000,
313 	0x4d8, 0x00000fff, 0x00000100,
314 	0xa008, 0xffffffff, 0x00010000,
315 	0x913c, 0xffff03ff, 0x01000100,
316 	0x8c00, 0x000000ff, 0x00000003,
317 	0x8c04, 0xf8ff00ff, 0x40600060,
318 	0x8cf0, 0x1fff1fff, 0x08e00410,
319 	0x28350, 0x00000f01, 0x00000000,
320 	0x9508, 0xf700071f, 0x00000002,
321 	0x960c, 0xffffffff, 0x54763210,
322 	0x20ef8, 0x01ff01ff, 0x00000002,
323 	0x20e98, 0xfffffbff, 0x00200000,
324 	0x2015c, 0xffffffff, 0x00000f40,
325 	0x88c4, 0x001f3ae3, 0x00000082,
326 	0x8978, 0x3fffffff, 0x04050140,
327 	0x88d4, 0x0000001f, 0x00000010,
328 	0x8974, 0xffffffff, 0x00000000
329 };
330 
331 static const u32 scrapper_golden_registers[] = {
332 	0x690, 0x3fff3fff, 0x20c00033,
333 	0x918c, 0x0fff0fff, 0x00010006,
334 	0x918c, 0x0fff0fff, 0x00010006,
335 	0x91a8, 0x0fff0fff, 0x00010006,
336 	0x91a8, 0x0fff0fff, 0x00010006,
337 	0x9150, 0xffffdfff, 0x6e944040,
338 	0x9150, 0xffffdfff, 0x6e944040,
339 	0x917c, 0x0fff0fff, 0x00030002,
340 	0x917c, 0x0fff0fff, 0x00030002,
341 	0x9198, 0x0fff0fff, 0x00030002,
342 	0x9198, 0x0fff0fff, 0x00030002,
343 	0x915c, 0x0fff0fff, 0x00010000,
344 	0x915c, 0x0fff0fff, 0x00010000,
345 	0x3f90, 0xffff0001, 0xff000000,
346 	0x3f90, 0xffff0001, 0xff000000,
347 	0x9178, 0x0fff0fff, 0x00070000,
348 	0x9178, 0x0fff0fff, 0x00070000,
349 	0x9194, 0x0fff0fff, 0x00070000,
350 	0x9194, 0x0fff0fff, 0x00070000,
351 	0x9148, 0xffff0001, 0xff000000,
352 	0x9148, 0xffff0001, 0xff000000,
353 	0x9190, 0x0fff0fff, 0x00090008,
354 	0x9190, 0x0fff0fff, 0x00090008,
355 	0x91ac, 0x0fff0fff, 0x00090008,
356 	0x91ac, 0x0fff0fff, 0x00090008,
357 	0x3f94, 0xffff0000, 0xff000000,
358 	0x3f94, 0xffff0000, 0xff000000,
359 	0x914c, 0xffff0000, 0xff000000,
360 	0x914c, 0xffff0000, 0xff000000,
361 	0x929c, 0x00000fff, 0x00000001,
362 	0x929c, 0x00000fff, 0x00000001,
363 	0x55e4, 0xff607fff, 0xfc000100,
364 	0x8a18, 0xff000fff, 0x00000100,
365 	0x8a18, 0xff000fff, 0x00000100,
366 	0x8b28, 0xff000fff, 0x00000100,
367 	0x8b28, 0xff000fff, 0x00000100,
368 	0x9144, 0xfffc0fff, 0x00000100,
369 	0x9144, 0xfffc0fff, 0x00000100,
370 	0x6ed8, 0x00010101, 0x00010000,
371 	0x9830, 0xffffffff, 0x00000000,
372 	0x9830, 0xffffffff, 0x00000000,
373 	0x9834, 0xf00fffff, 0x00000400,
374 	0x9834, 0xf00fffff, 0x00000400,
375 	0x9838, 0xfffffffe, 0x00000000,
376 	0x9838, 0xfffffffe, 0x00000000,
377 	0xd0c0, 0xff000fff, 0x00000100,
378 	0xd02c, 0xbfffff1f, 0x08421000,
379 	0xd02c, 0xbfffff1f, 0x08421000,
380 	0xd0b8, 0x73773777, 0x12010001,
381 	0xd0b8, 0x73773777, 0x12010001,
382 	0x5bb0, 0x000000f0, 0x00000070,
383 	0x98f8, 0x73773777, 0x12010001,
384 	0x98f8, 0x73773777, 0x12010001,
385 	0x98fc, 0xffffffff, 0x00000010,
386 	0x98fc, 0xffffffff, 0x00000010,
387 	0x9b7c, 0x00ff0000, 0x00fc0000,
388 	0x9b7c, 0x00ff0000, 0x00fc0000,
389 	0x8030, 0x00001f0f, 0x0000100a,
390 	0x8030, 0x00001f0f, 0x0000100a,
391 	0x2f48, 0x73773777, 0x12010001,
392 	0x2f48, 0x73773777, 0x12010001,
393 	0x2408, 0x00030000, 0x000c007f,
394 	0x8a14, 0xf000003f, 0x00000007,
395 	0x8a14, 0xf000003f, 0x00000007,
396 	0x8b24, 0x3fff3fff, 0x00ff0fff,
397 	0x8b24, 0x3fff3fff, 0x00ff0fff,
398 	0x8b10, 0x0000ff0f, 0x00000000,
399 	0x8b10, 0x0000ff0f, 0x00000000,
400 	0x28a4c, 0x07ffffff, 0x06000000,
401 	0x28a4c, 0x07ffffff, 0x06000000,
402 	0x4d8, 0x00000fff, 0x00000100,
403 	0x4d8, 0x00000fff, 0x00000100,
404 	0xa008, 0xffffffff, 0x00010000,
405 	0xa008, 0xffffffff, 0x00010000,
406 	0x913c, 0xffff03ff, 0x01000100,
407 	0x913c, 0xffff03ff, 0x01000100,
408 	0x90e8, 0x001fffff, 0x010400c0,
409 	0x8c00, 0x000000ff, 0x00000003,
410 	0x8c00, 0x000000ff, 0x00000003,
411 	0x8c04, 0xf8ff00ff, 0x40600060,
412 	0x8c04, 0xf8ff00ff, 0x40600060,
413 	0x8c30, 0x0000000f, 0x00040005,
414 	0x8cf0, 0x1fff1fff, 0x08e00410,
415 	0x8cf0, 0x1fff1fff, 0x08e00410,
416 	0x900c, 0x00ffffff, 0x0017071f,
417 	0x28350, 0x00000f01, 0x00000000,
418 	0x28350, 0x00000f01, 0x00000000,
419 	0x9508, 0xf700071f, 0x00000002,
420 	0x9508, 0xf700071f, 0x00000002,
421 	0x9688, 0x00300000, 0x0017000f,
422 	0x960c, 0xffffffff, 0x54763210,
423 	0x960c, 0xffffffff, 0x54763210,
424 	0x20ef8, 0x01ff01ff, 0x00000002,
425 	0x20e98, 0xfffffbff, 0x00200000,
426 	0x2015c, 0xffffffff, 0x00000f40,
427 	0x88c4, 0x001f3ae3, 0x00000082,
428 	0x88c4, 0x001f3ae3, 0x00000082,
429 	0x8978, 0x3fffffff, 0x04050140,
430 	0x8978, 0x3fffffff, 0x04050140,
431 	0x88d4, 0x0000001f, 0x00000010,
432 	0x88d4, 0x0000001f, 0x00000010,
433 	0x8974, 0xffffffff, 0x00000000,
434 	0x8974, 0xffffffff, 0x00000000
435 };
436 
ni_init_golden_registers(struct radeon_device * rdev)437 static void ni_init_golden_registers(struct radeon_device *rdev)
438 {
439 	switch (rdev->family) {
440 	case CHIP_CAYMAN:
441 		radeon_program_register_sequence(rdev,
442 						 cayman_golden_registers,
443 						 (const u32)ARRAY_SIZE(cayman_golden_registers));
444 		radeon_program_register_sequence(rdev,
445 						 cayman_golden_registers2,
446 						 (const u32)ARRAY_SIZE(cayman_golden_registers2));
447 		break;
448 	case CHIP_ARUBA:
449 		if ((rdev->pdev->device == 0x9900) ||
450 		    (rdev->pdev->device == 0x9901) ||
451 		    (rdev->pdev->device == 0x9903) ||
452 		    (rdev->pdev->device == 0x9904) ||
453 		    (rdev->pdev->device == 0x9905) ||
454 		    (rdev->pdev->device == 0x9906) ||
455 		    (rdev->pdev->device == 0x9907) ||
456 		    (rdev->pdev->device == 0x9908) ||
457 		    (rdev->pdev->device == 0x9909) ||
458 		    (rdev->pdev->device == 0x990A) ||
459 		    (rdev->pdev->device == 0x990B) ||
460 		    (rdev->pdev->device == 0x990C) ||
461 		    (rdev->pdev->device == 0x990D) ||
462 		    (rdev->pdev->device == 0x990E) ||
463 		    (rdev->pdev->device == 0x990F) ||
464 		    (rdev->pdev->device == 0x9910) ||
465 		    (rdev->pdev->device == 0x9913) ||
466 		    (rdev->pdev->device == 0x9917) ||
467 		    (rdev->pdev->device == 0x9918)) {
468 			radeon_program_register_sequence(rdev,
469 							 dvst_golden_registers,
470 							 (const u32)ARRAY_SIZE(dvst_golden_registers));
471 			radeon_program_register_sequence(rdev,
472 							 dvst_golden_registers2,
473 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
474 		} else {
475 			radeon_program_register_sequence(rdev,
476 							 scrapper_golden_registers,
477 							 (const u32)ARRAY_SIZE(scrapper_golden_registers));
478 			radeon_program_register_sequence(rdev,
479 							 dvst_golden_registers2,
480 							 (const u32)ARRAY_SIZE(dvst_golden_registers2));
481 		}
482 		break;
483 	default:
484 		break;
485 	}
486 }
487 
488 #define BTC_IO_MC_REGS_SIZE 29
489 
490 static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
491 	{0x00000077, 0xff010100},
492 	{0x00000078, 0x00000000},
493 	{0x00000079, 0x00001434},
494 	{0x0000007a, 0xcc08ec08},
495 	{0x0000007b, 0x00040000},
496 	{0x0000007c, 0x000080c0},
497 	{0x0000007d, 0x09000000},
498 	{0x0000007e, 0x00210404},
499 	{0x00000081, 0x08a8e800},
500 	{0x00000082, 0x00030444},
501 	{0x00000083, 0x00000000},
502 	{0x00000085, 0x00000001},
503 	{0x00000086, 0x00000002},
504 	{0x00000087, 0x48490000},
505 	{0x00000088, 0x20244647},
506 	{0x00000089, 0x00000005},
507 	{0x0000008b, 0x66030000},
508 	{0x0000008c, 0x00006603},
509 	{0x0000008d, 0x00000100},
510 	{0x0000008f, 0x00001c0a},
511 	{0x00000090, 0xff000001},
512 	{0x00000094, 0x00101101},
513 	{0x00000095, 0x00000fff},
514 	{0x00000096, 0x00116fff},
515 	{0x00000097, 0x60010000},
516 	{0x00000098, 0x10010000},
517 	{0x00000099, 0x00006000},
518 	{0x0000009a, 0x00001000},
519 	{0x0000009f, 0x00946a00}
520 };
521 
522 static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
523 	{0x00000077, 0xff010100},
524 	{0x00000078, 0x00000000},
525 	{0x00000079, 0x00001434},
526 	{0x0000007a, 0xcc08ec08},
527 	{0x0000007b, 0x00040000},
528 	{0x0000007c, 0x000080c0},
529 	{0x0000007d, 0x09000000},
530 	{0x0000007e, 0x00210404},
531 	{0x00000081, 0x08a8e800},
532 	{0x00000082, 0x00030444},
533 	{0x00000083, 0x00000000},
534 	{0x00000085, 0x00000001},
535 	{0x00000086, 0x00000002},
536 	{0x00000087, 0x48490000},
537 	{0x00000088, 0x20244647},
538 	{0x00000089, 0x00000005},
539 	{0x0000008b, 0x66030000},
540 	{0x0000008c, 0x00006603},
541 	{0x0000008d, 0x00000100},
542 	{0x0000008f, 0x00001c0a},
543 	{0x00000090, 0xff000001},
544 	{0x00000094, 0x00101101},
545 	{0x00000095, 0x00000fff},
546 	{0x00000096, 0x00116fff},
547 	{0x00000097, 0x60010000},
548 	{0x00000098, 0x10010000},
549 	{0x00000099, 0x00006000},
550 	{0x0000009a, 0x00001000},
551 	{0x0000009f, 0x00936a00}
552 };
553 
554 static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
555 	{0x00000077, 0xff010100},
556 	{0x00000078, 0x00000000},
557 	{0x00000079, 0x00001434},
558 	{0x0000007a, 0xcc08ec08},
559 	{0x0000007b, 0x00040000},
560 	{0x0000007c, 0x000080c0},
561 	{0x0000007d, 0x09000000},
562 	{0x0000007e, 0x00210404},
563 	{0x00000081, 0x08a8e800},
564 	{0x00000082, 0x00030444},
565 	{0x00000083, 0x00000000},
566 	{0x00000085, 0x00000001},
567 	{0x00000086, 0x00000002},
568 	{0x00000087, 0x48490000},
569 	{0x00000088, 0x20244647},
570 	{0x00000089, 0x00000005},
571 	{0x0000008b, 0x66030000},
572 	{0x0000008c, 0x00006603},
573 	{0x0000008d, 0x00000100},
574 	{0x0000008f, 0x00001c0a},
575 	{0x00000090, 0xff000001},
576 	{0x00000094, 0x00101101},
577 	{0x00000095, 0x00000fff},
578 	{0x00000096, 0x00116fff},
579 	{0x00000097, 0x60010000},
580 	{0x00000098, 0x10010000},
581 	{0x00000099, 0x00006000},
582 	{0x0000009a, 0x00001000},
583 	{0x0000009f, 0x00916a00}
584 };
585 
586 static const u32 cayman_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
587 	{0x00000077, 0xff010100},
588 	{0x00000078, 0x00000000},
589 	{0x00000079, 0x00001434},
590 	{0x0000007a, 0xcc08ec08},
591 	{0x0000007b, 0x00040000},
592 	{0x0000007c, 0x000080c0},
593 	{0x0000007d, 0x09000000},
594 	{0x0000007e, 0x00210404},
595 	{0x00000081, 0x08a8e800},
596 	{0x00000082, 0x00030444},
597 	{0x00000083, 0x00000000},
598 	{0x00000085, 0x00000001},
599 	{0x00000086, 0x00000002},
600 	{0x00000087, 0x48490000},
601 	{0x00000088, 0x20244647},
602 	{0x00000089, 0x00000005},
603 	{0x0000008b, 0x66030000},
604 	{0x0000008c, 0x00006603},
605 	{0x0000008d, 0x00000100},
606 	{0x0000008f, 0x00001c0a},
607 	{0x00000090, 0xff000001},
608 	{0x00000094, 0x00101101},
609 	{0x00000095, 0x00000fff},
610 	{0x00000096, 0x00116fff},
611 	{0x00000097, 0x60010000},
612 	{0x00000098, 0x10010000},
613 	{0x00000099, 0x00006000},
614 	{0x0000009a, 0x00001000},
615 	{0x0000009f, 0x00976b00}
616 };
617 
ni_mc_load_microcode(struct radeon_device * rdev)618 int ni_mc_load_microcode(struct radeon_device *rdev)
619 {
620 	const __be32 *fw_data;
621 	u32 mem_type, running;
622 	u32 *io_mc_regs;
623 	int i, ucode_size, regs_size;
624 
625 	if (!rdev->mc_fw)
626 		return -EINVAL;
627 
628 	switch (rdev->family) {
629 	case CHIP_BARTS:
630 		io_mc_regs = (u32 *)&barts_io_mc_regs;
631 		ucode_size = BTC_MC_UCODE_SIZE;
632 		regs_size = BTC_IO_MC_REGS_SIZE;
633 		break;
634 	case CHIP_TURKS:
635 		io_mc_regs = (u32 *)&turks_io_mc_regs;
636 		ucode_size = BTC_MC_UCODE_SIZE;
637 		regs_size = BTC_IO_MC_REGS_SIZE;
638 		break;
639 	case CHIP_CAICOS:
640 	default:
641 		io_mc_regs = (u32 *)&caicos_io_mc_regs;
642 		ucode_size = BTC_MC_UCODE_SIZE;
643 		regs_size = BTC_IO_MC_REGS_SIZE;
644 		break;
645 	case CHIP_CAYMAN:
646 		io_mc_regs = (u32 *)&cayman_io_mc_regs;
647 		ucode_size = CAYMAN_MC_UCODE_SIZE;
648 		regs_size = BTC_IO_MC_REGS_SIZE;
649 		break;
650 	}
651 
652 	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
653 	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
654 
655 	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
656 		/* reset the engine and set to writable */
657 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
658 		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
659 
660 		/* load mc io regs */
661 		for (i = 0; i < regs_size; i++) {
662 			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
663 			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
664 		}
665 		/* load the MC ucode */
666 		fw_data = (const __be32 *)rdev->mc_fw->data;
667 		for (i = 0; i < ucode_size; i++)
668 			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
669 
670 		/* put the engine back into the active state */
671 		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
672 		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
673 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
674 
675 		/* wait for training to complete */
676 		for (i = 0; i < rdev->usec_timeout; i++) {
677 			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
678 				break;
679 			udelay(1);
680 		}
681 	}
682 
683 	return 0;
684 }
685 
ni_init_microcode(struct radeon_device * rdev)686 int ni_init_microcode(struct radeon_device *rdev)
687 {
688 	const char *chip_name;
689 	const char *rlc_chip_name;
690 	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
691 	size_t smc_req_size = 0;
692 	char fw_name[30];
693 	int err;
694 
695 	DRM_DEBUG("\n");
696 
697 	switch (rdev->family) {
698 	case CHIP_BARTS:
699 		chip_name = "BARTS";
700 		rlc_chip_name = "BTC";
701 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
702 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
703 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
704 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
705 		smc_req_size = ALIGN(BARTS_SMC_UCODE_SIZE, 4);
706 		break;
707 	case CHIP_TURKS:
708 		chip_name = "TURKS";
709 		rlc_chip_name = "BTC";
710 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
711 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
712 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
713 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
714 		smc_req_size = ALIGN(TURKS_SMC_UCODE_SIZE, 4);
715 		break;
716 	case CHIP_CAICOS:
717 		chip_name = "CAICOS";
718 		rlc_chip_name = "BTC";
719 		pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
720 		me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
721 		rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
722 		mc_req_size = BTC_MC_UCODE_SIZE * 4;
723 		smc_req_size = ALIGN(CAICOS_SMC_UCODE_SIZE, 4);
724 		break;
725 	case CHIP_CAYMAN:
726 		chip_name = "CAYMAN";
727 		rlc_chip_name = "CAYMAN";
728 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
729 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
730 		rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
731 		mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
732 		smc_req_size = ALIGN(CAYMAN_SMC_UCODE_SIZE, 4);
733 		break;
734 	case CHIP_ARUBA:
735 		chip_name = "ARUBA";
736 		rlc_chip_name = "ARUBA";
737 		/* pfp/me same size as CAYMAN */
738 		pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
739 		me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
740 		rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
741 		mc_req_size = 0;
742 		break;
743 	default:
744 		BUG();
745 	}
746 
747 	DRM_INFO("Loading %s Microcode\n", chip_name);
748 
749 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
750 	err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
751 	if (err)
752 		goto out;
753 	if (rdev->pfp_fw->size != pfp_req_size) {
754 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
755 		       rdev->pfp_fw->size, fw_name);
756 		err = -EINVAL;
757 		goto out;
758 	}
759 
760 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
761 	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
762 	if (err)
763 		goto out;
764 	if (rdev->me_fw->size != me_req_size) {
765 		pr_err("ni_cp: Bogus length %zu in firmware \"%s\"\n",
766 		       rdev->me_fw->size, fw_name);
767 		err = -EINVAL;
768 	}
769 
770 	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
771 	err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
772 	if (err)
773 		goto out;
774 	if (rdev->rlc_fw->size != rlc_req_size) {
775 		pr_err("ni_rlc: Bogus length %zu in firmware \"%s\"\n",
776 		       rdev->rlc_fw->size, fw_name);
777 		err = -EINVAL;
778 	}
779 
780 	/* no MC ucode on TN */
781 	if (!(rdev->flags & RADEON_IS_IGP)) {
782 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
783 		err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
784 		if (err)
785 			goto out;
786 		if (rdev->mc_fw->size != mc_req_size) {
787 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
788 			       rdev->mc_fw->size, fw_name);
789 			err = -EINVAL;
790 		}
791 	}
792 
793 	if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
794 		snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
795 		err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
796 		if (err) {
797 			pr_err("smc: error loading firmware \"%s\"\n", fw_name);
798 			release_firmware(rdev->smc_fw);
799 			rdev->smc_fw = NULL;
800 			err = 0;
801 		} else if (rdev->smc_fw->size != smc_req_size) {
802 			pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
803 			       rdev->smc_fw->size, fw_name);
804 			err = -EINVAL;
805 		}
806 	}
807 
808 out:
809 	if (err) {
810 		if (err != -EINVAL)
811 			pr_err("ni_cp: Failed to load firmware \"%s\"\n",
812 			       fw_name);
813 		release_firmware(rdev->pfp_fw);
814 		rdev->pfp_fw = NULL;
815 		release_firmware(rdev->me_fw);
816 		rdev->me_fw = NULL;
817 		release_firmware(rdev->rlc_fw);
818 		rdev->rlc_fw = NULL;
819 		release_firmware(rdev->mc_fw);
820 		rdev->mc_fw = NULL;
821 	}
822 	return err;
823 }
824 
825 /**
826  * cayman_get_allowed_info_register - fetch the register for the info ioctl
827  *
828  * @rdev: radeon_device pointer
829  * @reg: register offset in bytes
830  * @val: register value
831  *
832  * Returns 0 for success or -EINVAL for an invalid register
833  *
834  */
cayman_get_allowed_info_register(struct radeon_device * rdev,u32 reg,u32 * val)835 int cayman_get_allowed_info_register(struct radeon_device *rdev,
836 				     u32 reg, u32 *val)
837 {
838 	switch (reg) {
839 	case GRBM_STATUS:
840 	case GRBM_STATUS_SE0:
841 	case GRBM_STATUS_SE1:
842 	case SRBM_STATUS:
843 	case SRBM_STATUS2:
844 	case (DMA_STATUS_REG + DMA0_REGISTER_OFFSET):
845 	case (DMA_STATUS_REG + DMA1_REGISTER_OFFSET):
846 	case UVD_STATUS:
847 		*val = RREG32(reg);
848 		return 0;
849 	default:
850 		return -EINVAL;
851 	}
852 }
853 
tn_get_temp(struct radeon_device * rdev)854 int tn_get_temp(struct radeon_device *rdev)
855 {
856 	u32 temp = RREG32_SMC(TN_CURRENT_GNB_TEMP) & 0x7ff;
857 	int actual_temp = (temp / 8) - 49;
858 
859 	return actual_temp * 1000;
860 }
861 
862 /*
863  * Core functions
864  */
cayman_gpu_init(struct radeon_device * rdev)865 static void cayman_gpu_init(struct radeon_device *rdev)
866 {
867 	u32 gb_addr_config = 0;
868 	u32 mc_arb_ramcfg;
869 	u32 cgts_tcc_disable;
870 	u32 sx_debug_1;
871 	u32 smx_dc_ctl0;
872 	u32 cgts_sm_ctrl_reg;
873 	u32 hdp_host_path_cntl;
874 	u32 tmp;
875 	u32 disabled_rb_mask;
876 	int i, j;
877 
878 	switch (rdev->family) {
879 	case CHIP_CAYMAN:
880 		rdev->config.cayman.max_shader_engines = 2;
881 		rdev->config.cayman.max_pipes_per_simd = 4;
882 		rdev->config.cayman.max_tile_pipes = 8;
883 		rdev->config.cayman.max_simds_per_se = 12;
884 		rdev->config.cayman.max_backends_per_se = 4;
885 		rdev->config.cayman.max_texture_channel_caches = 8;
886 		rdev->config.cayman.max_gprs = 256;
887 		rdev->config.cayman.max_threads = 256;
888 		rdev->config.cayman.max_gs_threads = 32;
889 		rdev->config.cayman.max_stack_entries = 512;
890 		rdev->config.cayman.sx_num_of_sets = 8;
891 		rdev->config.cayman.sx_max_export_size = 256;
892 		rdev->config.cayman.sx_max_export_pos_size = 64;
893 		rdev->config.cayman.sx_max_export_smx_size = 192;
894 		rdev->config.cayman.max_hw_contexts = 8;
895 		rdev->config.cayman.sq_num_cf_insts = 2;
896 
897 		rdev->config.cayman.sc_prim_fifo_size = 0x100;
898 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
899 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
900 		gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
901 		break;
902 	case CHIP_ARUBA:
903 	default:
904 		rdev->config.cayman.max_shader_engines = 1;
905 		rdev->config.cayman.max_pipes_per_simd = 4;
906 		rdev->config.cayman.max_tile_pipes = 2;
907 		if ((rdev->pdev->device == 0x9900) ||
908 		    (rdev->pdev->device == 0x9901) ||
909 		    (rdev->pdev->device == 0x9905) ||
910 		    (rdev->pdev->device == 0x9906) ||
911 		    (rdev->pdev->device == 0x9907) ||
912 		    (rdev->pdev->device == 0x9908) ||
913 		    (rdev->pdev->device == 0x9909) ||
914 		    (rdev->pdev->device == 0x990B) ||
915 		    (rdev->pdev->device == 0x990C) ||
916 		    (rdev->pdev->device == 0x990F) ||
917 		    (rdev->pdev->device == 0x9910) ||
918 		    (rdev->pdev->device == 0x9917) ||
919 		    (rdev->pdev->device == 0x9999) ||
920 		    (rdev->pdev->device == 0x999C)) {
921 			rdev->config.cayman.max_simds_per_se = 6;
922 			rdev->config.cayman.max_backends_per_se = 2;
923 			rdev->config.cayman.max_hw_contexts = 8;
924 			rdev->config.cayman.sx_max_export_size = 256;
925 			rdev->config.cayman.sx_max_export_pos_size = 64;
926 			rdev->config.cayman.sx_max_export_smx_size = 192;
927 		} else if ((rdev->pdev->device == 0x9903) ||
928 			   (rdev->pdev->device == 0x9904) ||
929 			   (rdev->pdev->device == 0x990A) ||
930 			   (rdev->pdev->device == 0x990D) ||
931 			   (rdev->pdev->device == 0x990E) ||
932 			   (rdev->pdev->device == 0x9913) ||
933 			   (rdev->pdev->device == 0x9918) ||
934 			   (rdev->pdev->device == 0x999D)) {
935 			rdev->config.cayman.max_simds_per_se = 4;
936 			rdev->config.cayman.max_backends_per_se = 2;
937 			rdev->config.cayman.max_hw_contexts = 8;
938 			rdev->config.cayman.sx_max_export_size = 256;
939 			rdev->config.cayman.sx_max_export_pos_size = 64;
940 			rdev->config.cayman.sx_max_export_smx_size = 192;
941 		} else if ((rdev->pdev->device == 0x9919) ||
942 			   (rdev->pdev->device == 0x9990) ||
943 			   (rdev->pdev->device == 0x9991) ||
944 			   (rdev->pdev->device == 0x9994) ||
945 			   (rdev->pdev->device == 0x9995) ||
946 			   (rdev->pdev->device == 0x9996) ||
947 			   (rdev->pdev->device == 0x999A) ||
948 			   (rdev->pdev->device == 0x99A0)) {
949 			rdev->config.cayman.max_simds_per_se = 3;
950 			rdev->config.cayman.max_backends_per_se = 1;
951 			rdev->config.cayman.max_hw_contexts = 4;
952 			rdev->config.cayman.sx_max_export_size = 128;
953 			rdev->config.cayman.sx_max_export_pos_size = 32;
954 			rdev->config.cayman.sx_max_export_smx_size = 96;
955 		} else {
956 			rdev->config.cayman.max_simds_per_se = 2;
957 			rdev->config.cayman.max_backends_per_se = 1;
958 			rdev->config.cayman.max_hw_contexts = 4;
959 			rdev->config.cayman.sx_max_export_size = 128;
960 			rdev->config.cayman.sx_max_export_pos_size = 32;
961 			rdev->config.cayman.sx_max_export_smx_size = 96;
962 		}
963 		rdev->config.cayman.max_texture_channel_caches = 2;
964 		rdev->config.cayman.max_gprs = 256;
965 		rdev->config.cayman.max_threads = 256;
966 		rdev->config.cayman.max_gs_threads = 32;
967 		rdev->config.cayman.max_stack_entries = 512;
968 		rdev->config.cayman.sx_num_of_sets = 8;
969 		rdev->config.cayman.sq_num_cf_insts = 2;
970 
971 		rdev->config.cayman.sc_prim_fifo_size = 0x40;
972 		rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
973 		rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
974 		gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
975 		break;
976 	}
977 
978 	/* Initialize HDP */
979 	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
980 		WREG32((0x2c14 + j), 0x00000000);
981 		WREG32((0x2c18 + j), 0x00000000);
982 		WREG32((0x2c1c + j), 0x00000000);
983 		WREG32((0x2c20 + j), 0x00000000);
984 		WREG32((0x2c24 + j), 0x00000000);
985 	}
986 
987 	WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
988 	WREG32(SRBM_INT_CNTL, 0x1);
989 	WREG32(SRBM_INT_ACK, 0x1);
990 
991 	evergreen_fix_pci_max_read_req_size(rdev);
992 
993 	RREG32(MC_SHARED_CHMAP);
994 	mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
995 
996 	tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
997 	rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
998 	if (rdev->config.cayman.mem_row_size_in_kb > 4)
999 		rdev->config.cayman.mem_row_size_in_kb = 4;
1000 	/* XXX use MC settings? */
1001 	rdev->config.cayman.shader_engine_tile_size = 32;
1002 	rdev->config.cayman.num_gpus = 1;
1003 	rdev->config.cayman.multi_gpu_tile_size = 64;
1004 
1005 	tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1006 	rdev->config.cayman.num_tile_pipes = (1 << tmp);
1007 	tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1008 	rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
1009 	tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1010 	rdev->config.cayman.num_shader_engines = tmp + 1;
1011 	tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1012 	rdev->config.cayman.num_gpus = tmp + 1;
1013 	tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1014 	rdev->config.cayman.multi_gpu_tile_size = 1 << tmp;
1015 	tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1016 	rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
1017 
1018 
1019 	/* setup tiling info dword.  gb_addr_config is not adequate since it does
1020 	 * not have bank info, so create a custom tiling dword.
1021 	 * bits 3:0   num_pipes
1022 	 * bits 7:4   num_banks
1023 	 * bits 11:8  group_size
1024 	 * bits 15:12 row_size
1025 	 */
1026 	rdev->config.cayman.tile_config = 0;
1027 	switch (rdev->config.cayman.num_tile_pipes) {
1028 	case 1:
1029 	default:
1030 		rdev->config.cayman.tile_config |= (0 << 0);
1031 		break;
1032 	case 2:
1033 		rdev->config.cayman.tile_config |= (1 << 0);
1034 		break;
1035 	case 4:
1036 		rdev->config.cayman.tile_config |= (2 << 0);
1037 		break;
1038 	case 8:
1039 		rdev->config.cayman.tile_config |= (3 << 0);
1040 		break;
1041 	}
1042 
1043 	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1044 	if (rdev->flags & RADEON_IS_IGP)
1045 		rdev->config.cayman.tile_config |= 1 << 4;
1046 	else {
1047 		switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
1048 		case 0: /* four banks */
1049 			rdev->config.cayman.tile_config |= 0 << 4;
1050 			break;
1051 		case 1: /* eight banks */
1052 			rdev->config.cayman.tile_config |= 1 << 4;
1053 			break;
1054 		case 2: /* sixteen banks */
1055 		default:
1056 			rdev->config.cayman.tile_config |= 2 << 4;
1057 			break;
1058 		}
1059 	}
1060 	rdev->config.cayman.tile_config |=
1061 		((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1062 	rdev->config.cayman.tile_config |=
1063 		((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1064 
1065 	tmp = 0;
1066 	for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
1067 		u32 rb_disable_bitmap;
1068 
1069 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1070 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1071 		rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1072 		tmp <<= 4;
1073 		tmp |= rb_disable_bitmap;
1074 	}
1075 	/* enabled rb are just the one not disabled :) */
1076 	disabled_rb_mask = tmp;
1077 	tmp = 0;
1078 	for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1079 		tmp |= (1 << i);
1080 	/* if all the backends are disabled, fix it up here */
1081 	if ((disabled_rb_mask & tmp) == tmp) {
1082 		for (i = 0; i < (rdev->config.cayman.max_backends_per_se * rdev->config.cayman.max_shader_engines); i++)
1083 			disabled_rb_mask &= ~(1 << i);
1084 	}
1085 
1086 	for (i = 0; i < rdev->config.cayman.max_shader_engines; i++) {
1087 		u32 simd_disable_bitmap;
1088 
1089 		WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1090 		WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1091 		simd_disable_bitmap = (RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffff0000) >> 16;
1092 		simd_disable_bitmap |= 0xffffffff << rdev->config.cayman.max_simds_per_se;
1093 		tmp <<= 16;
1094 		tmp |= simd_disable_bitmap;
1095 	}
1096 	rdev->config.cayman.active_simds = hweight32(~tmp);
1097 
1098 	WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1099 	WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
1100 
1101 	WREG32(GB_ADDR_CONFIG, gb_addr_config);
1102 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1103 	if (ASIC_IS_DCE6(rdev))
1104 		WREG32(DMIF_ADDR_CALC, gb_addr_config);
1105 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1106 	WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1107 	WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1108 	WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
1109 	WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
1110 	WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
1111 
1112 	if ((rdev->config.cayman.max_backends_per_se == 1) &&
1113 	    (rdev->flags & RADEON_IS_IGP)) {
1114 		if ((disabled_rb_mask & 3) == 2) {
1115 			/* RB1 disabled, RB0 enabled */
1116 			tmp = 0x00000000;
1117 		} else {
1118 			/* RB0 disabled, RB1 enabled */
1119 			tmp = 0x11111111;
1120 		}
1121 	} else {
1122 		tmp = gb_addr_config & NUM_PIPES_MASK;
1123 		tmp = r6xx_remap_render_backend(rdev, tmp,
1124 						rdev->config.cayman.max_backends_per_se *
1125 						rdev->config.cayman.max_shader_engines,
1126 						CAYMAN_MAX_BACKENDS, disabled_rb_mask);
1127 	}
1128 	rdev->config.cayman.backend_map = tmp;
1129 	WREG32(GB_BACKEND_MAP, tmp);
1130 
1131 	cgts_tcc_disable = 0xffff0000;
1132 	for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
1133 		cgts_tcc_disable &= ~(1 << (16 + i));
1134 	WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1135 	WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
1136 	WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
1137 	WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
1138 
1139 	/* reprogram the shader complex */
1140 	cgts_sm_ctrl_reg = RREG32(CGTS_SM_CTRL_REG);
1141 	for (i = 0; i < 16; i++)
1142 		WREG32(CGTS_SM_CTRL_REG, OVERRIDE);
1143 	WREG32(CGTS_SM_CTRL_REG, cgts_sm_ctrl_reg);
1144 
1145 	/* set HW defaults for 3D engine */
1146 	WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
1147 
1148 	sx_debug_1 = RREG32(SX_DEBUG_1);
1149 	sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
1150 	WREG32(SX_DEBUG_1, sx_debug_1);
1151 
1152 	smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
1153 	smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
1154 	smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
1155 	WREG32(SMX_DC_CTL0, smx_dc_ctl0);
1156 
1157 	WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
1158 
1159 	/* need to be explicitly zero-ed */
1160 	WREG32(VGT_OFFCHIP_LDS_BASE, 0);
1161 	WREG32(SQ_LSTMP_RING_BASE, 0);
1162 	WREG32(SQ_HSTMP_RING_BASE, 0);
1163 	WREG32(SQ_ESTMP_RING_BASE, 0);
1164 	WREG32(SQ_GSTMP_RING_BASE, 0);
1165 	WREG32(SQ_VSTMP_RING_BASE, 0);
1166 	WREG32(SQ_PSTMP_RING_BASE, 0);
1167 
1168 	WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
1169 
1170 	WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
1171 					POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
1172 					SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
1173 
1174 	WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
1175 				 SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
1176 				 SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
1177 
1178 
1179 	WREG32(VGT_NUM_INSTANCES, 1);
1180 
1181 	WREG32(CP_PERFMON_CNTL, 0);
1182 
1183 	WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
1184 				  FETCH_FIFO_HIWATER(0x4) |
1185 				  DONE_FIFO_HIWATER(0xe0) |
1186 				  ALU_UPDATE_FIFO_HIWATER(0x8)));
1187 
1188 	WREG32(SQ_GPR_RESOURCE_MGMT_1, NUM_CLAUSE_TEMP_GPRS(4));
1189 	WREG32(SQ_CONFIG, (VC_ENABLE |
1190 			   EXPORT_SRC_C |
1191 			   GFX_PRIO(0) |
1192 			   CS1_PRIO(0) |
1193 			   CS2_PRIO(1)));
1194 	WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, DYN_GPR_ENABLE);
1195 
1196 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
1197 					  FORCE_EOV_MAX_REZ_CNT(255)));
1198 
1199 	WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
1200 	       AUTO_INVLD_EN(ES_AND_GS_AUTO));
1201 
1202 	WREG32(VGT_GS_VERTEX_REUSE, 16);
1203 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1204 
1205 	WREG32(CB_PERF_CTR0_SEL_0, 0);
1206 	WREG32(CB_PERF_CTR0_SEL_1, 0);
1207 	WREG32(CB_PERF_CTR1_SEL_0, 0);
1208 	WREG32(CB_PERF_CTR1_SEL_1, 0);
1209 	WREG32(CB_PERF_CTR2_SEL_0, 0);
1210 	WREG32(CB_PERF_CTR2_SEL_1, 0);
1211 	WREG32(CB_PERF_CTR3_SEL_0, 0);
1212 	WREG32(CB_PERF_CTR3_SEL_1, 0);
1213 
1214 	tmp = RREG32(HDP_MISC_CNTL);
1215 	tmp |= HDP_FLUSH_INVALIDATE_CACHE;
1216 	WREG32(HDP_MISC_CNTL, tmp);
1217 
1218 	hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
1219 	WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
1220 
1221 	WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
1222 
1223 	udelay(50);
1224 
1225 	/* set clockgating golden values on TN */
1226 	if (rdev->family == CHIP_ARUBA) {
1227 		tmp = RREG32_CG(CG_CGTT_LOCAL_0);
1228 		tmp &= ~0x00380000;
1229 		WREG32_CG(CG_CGTT_LOCAL_0, tmp);
1230 		tmp = RREG32_CG(CG_CGTT_LOCAL_1);
1231 		tmp &= ~0x0e000000;
1232 		WREG32_CG(CG_CGTT_LOCAL_1, tmp);
1233 	}
1234 }
1235 
1236 /*
1237  * GART
1238  */
cayman_pcie_gart_tlb_flush(struct radeon_device * rdev)1239 void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
1240 {
1241 	/* flush hdp cache */
1242 	WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1243 
1244 	/* bits 0-7 are the VM contexts0-7 */
1245 	WREG32(VM_INVALIDATE_REQUEST, 1);
1246 }
1247 
cayman_pcie_gart_enable(struct radeon_device * rdev)1248 static int cayman_pcie_gart_enable(struct radeon_device *rdev)
1249 {
1250 	int i, r;
1251 
1252 	if (rdev->gart.robj == NULL) {
1253 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
1254 		return -EINVAL;
1255 	}
1256 	r = radeon_gart_table_vram_pin(rdev);
1257 	if (r)
1258 		return r;
1259 	/* Setup TLB control */
1260 	WREG32(MC_VM_MX_L1_TLB_CNTL,
1261 	       (0xA << 7) |
1262 	       ENABLE_L1_TLB |
1263 	       ENABLE_L1_FRAGMENT_PROCESSING |
1264 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1265 	       ENABLE_ADVANCED_DRIVER_MODEL |
1266 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1267 	/* Setup L2 cache */
1268 	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
1269 	       ENABLE_L2_FRAGMENT_PROCESSING |
1270 	       ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1271 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1272 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1273 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1274 	WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
1275 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1276 	       BANK_SELECT(6) |
1277 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1278 	/* setup context0 */
1279 	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
1280 	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
1281 	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
1282 	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
1283 			(u32)(rdev->dummy_page.addr >> 12));
1284 	WREG32(VM_CONTEXT0_CNTL2, 0);
1285 	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
1286 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
1287 
1288 	WREG32(0x15D4, 0);
1289 	WREG32(0x15D8, 0);
1290 	WREG32(0x15DC, 0);
1291 
1292 	/* empty context1-7 */
1293 	/* Assign the pt base to something valid for now; the pts used for
1294 	 * the VMs are determined by the application and setup and assigned
1295 	 * on the fly in the vm part of radeon_gart.c
1296 	 */
1297 	for (i = 1; i < 8; i++) {
1298 		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
1299 		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
1300 			rdev->vm_manager.max_pfn - 1);
1301 		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
1302 		       rdev->vm_manager.saved_table_addr[i]);
1303 	}
1304 
1305 	/* enable context1-7 */
1306 	WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
1307 	       (u32)(rdev->dummy_page.addr >> 12));
1308 	WREG32(VM_CONTEXT1_CNTL2, 4);
1309 	WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
1310 				PAGE_TABLE_BLOCK_SIZE(radeon_vm_block_size - 9) |
1311 				RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1312 				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1313 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1314 				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
1315 				PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
1316 				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
1317 				VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
1318 				VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
1319 				READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
1320 				READ_PROTECTION_FAULT_ENABLE_DEFAULT |
1321 				WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
1322 				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
1323 
1324 	cayman_pcie_gart_tlb_flush(rdev);
1325 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1326 		 (unsigned)(rdev->mc.gtt_size >> 20),
1327 		 (unsigned long long)rdev->gart.table_addr);
1328 	rdev->gart.ready = true;
1329 	return 0;
1330 }
1331 
cayman_pcie_gart_disable(struct radeon_device * rdev)1332 static void cayman_pcie_gart_disable(struct radeon_device *rdev)
1333 {
1334 	unsigned i;
1335 
1336 	for (i = 1; i < 8; ++i) {
1337 		rdev->vm_manager.saved_table_addr[i] = RREG32(
1338 			VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2));
1339 	}
1340 
1341 	/* Disable all tables */
1342 	WREG32(VM_CONTEXT0_CNTL, 0);
1343 	WREG32(VM_CONTEXT1_CNTL, 0);
1344 	/* Setup TLB control */
1345 	WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING |
1346 	       SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1347 	       SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
1348 	/* Setup L2 cache */
1349 	WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1350 	       ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
1351 	       EFFECTIVE_L2_QUEUE_SIZE(7) |
1352 	       CONTEXT1_IDENTITY_ACCESS_MODE(1));
1353 	WREG32(VM_L2_CNTL2, 0);
1354 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
1355 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
1356 	radeon_gart_table_vram_unpin(rdev);
1357 }
1358 
cayman_pcie_gart_fini(struct radeon_device * rdev)1359 static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1360 {
1361 	cayman_pcie_gart_disable(rdev);
1362 	radeon_gart_table_vram_free(rdev);
1363 	radeon_gart_fini(rdev);
1364 }
1365 
cayman_cp_int_cntl_setup(struct radeon_device * rdev,int ring,u32 cp_int_cntl)1366 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1367 			      int ring, u32 cp_int_cntl)
1368 {
1369 	WREG32(SRBM_GFX_CNTL, RINGID(ring));
1370 	WREG32(CP_INT_CNTL, cp_int_cntl);
1371 }
1372 
1373 /*
1374  * CP.
1375  */
cayman_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)1376 void cayman_fence_ring_emit(struct radeon_device *rdev,
1377 			    struct radeon_fence *fence)
1378 {
1379 	struct radeon_ring *ring = &rdev->ring[fence->ring];
1380 	u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
1381 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1382 		PACKET3_SH_ACTION_ENA;
1383 
1384 	/* flush read cache over gart for this vmid */
1385 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1386 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1387 	radeon_ring_write(ring, 0xFFFFFFFF);
1388 	radeon_ring_write(ring, 0);
1389 	radeon_ring_write(ring, 10); /* poll interval */
1390 	/* EVENT_WRITE_EOP - flush caches, send int */
1391 	radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
1392 	radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
1393 	radeon_ring_write(ring, lower_32_bits(addr));
1394 	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1395 	radeon_ring_write(ring, fence->seq);
1396 	radeon_ring_write(ring, 0);
1397 }
1398 
cayman_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)1399 void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1400 {
1401 	struct radeon_ring *ring = &rdev->ring[ib->ring];
1402 	unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1403 	u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1404 		PACKET3_SH_ACTION_ENA;
1405 
1406 	/* set to DX10/11 mode */
1407 	radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1408 	radeon_ring_write(ring, 1);
1409 
1410 	if (ring->rptr_save_reg) {
1411 		uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
1412 		radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1413 		radeon_ring_write(ring, ((ring->rptr_save_reg -
1414 					  PACKET3_SET_CONFIG_REG_START) >> 2));
1415 		radeon_ring_write(ring, next_rptr);
1416 	}
1417 
1418 	radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1419 	radeon_ring_write(ring,
1420 #ifdef __BIG_ENDIAN
1421 			  (2 << 0) |
1422 #endif
1423 			  (ib->gpu_addr & 0xFFFFFFFC));
1424 	radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1425 	radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1426 
1427 	/* flush read cache over gart for this vmid */
1428 	radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1429 	radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1430 	radeon_ring_write(ring, 0xFFFFFFFF);
1431 	radeon_ring_write(ring, 0);
1432 	radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1433 }
1434 
cayman_cp_enable(struct radeon_device * rdev,bool enable)1435 static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
1436 {
1437 	if (enable)
1438 		WREG32(CP_ME_CNTL, 0);
1439 	else {
1440 		if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1441 			radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1442 		WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
1443 		WREG32(SCRATCH_UMSK, 0);
1444 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1445 	}
1446 }
1447 
cayman_gfx_get_rptr(struct radeon_device * rdev,struct radeon_ring * ring)1448 u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
1449 			struct radeon_ring *ring)
1450 {
1451 	u32 rptr;
1452 
1453 	if (rdev->wb.enabled)
1454 		rptr = rdev->wb.wb[ring->rptr_offs/4];
1455 	else {
1456 		if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1457 			rptr = RREG32(CP_RB0_RPTR);
1458 		else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1459 			rptr = RREG32(CP_RB1_RPTR);
1460 		else
1461 			rptr = RREG32(CP_RB2_RPTR);
1462 	}
1463 
1464 	return rptr;
1465 }
1466 
cayman_gfx_get_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1467 u32 cayman_gfx_get_wptr(struct radeon_device *rdev,
1468 			struct radeon_ring *ring)
1469 {
1470 	u32 wptr;
1471 
1472 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX)
1473 		wptr = RREG32(CP_RB0_WPTR);
1474 	else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX)
1475 		wptr = RREG32(CP_RB1_WPTR);
1476 	else
1477 		wptr = RREG32(CP_RB2_WPTR);
1478 
1479 	return wptr;
1480 }
1481 
cayman_gfx_set_wptr(struct radeon_device * rdev,struct radeon_ring * ring)1482 void cayman_gfx_set_wptr(struct radeon_device *rdev,
1483 			 struct radeon_ring *ring)
1484 {
1485 	if (ring->idx == RADEON_RING_TYPE_GFX_INDEX) {
1486 		WREG32(CP_RB0_WPTR, ring->wptr);
1487 		(void)RREG32(CP_RB0_WPTR);
1488 	} else if (ring->idx == CAYMAN_RING_TYPE_CP1_INDEX) {
1489 		WREG32(CP_RB1_WPTR, ring->wptr);
1490 		(void)RREG32(CP_RB1_WPTR);
1491 	} else {
1492 		WREG32(CP_RB2_WPTR, ring->wptr);
1493 		(void)RREG32(CP_RB2_WPTR);
1494 	}
1495 }
1496 
cayman_cp_load_microcode(struct radeon_device * rdev)1497 static int cayman_cp_load_microcode(struct radeon_device *rdev)
1498 {
1499 	const __be32 *fw_data;
1500 	int i;
1501 
1502 	if (!rdev->me_fw || !rdev->pfp_fw)
1503 		return -EINVAL;
1504 
1505 	cayman_cp_enable(rdev, false);
1506 
1507 	fw_data = (const __be32 *)rdev->pfp_fw->data;
1508 	WREG32(CP_PFP_UCODE_ADDR, 0);
1509 	for (i = 0; i < CAYMAN_PFP_UCODE_SIZE; i++)
1510 		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
1511 	WREG32(CP_PFP_UCODE_ADDR, 0);
1512 
1513 	fw_data = (const __be32 *)rdev->me_fw->data;
1514 	WREG32(CP_ME_RAM_WADDR, 0);
1515 	for (i = 0; i < CAYMAN_PM4_UCODE_SIZE; i++)
1516 		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
1517 
1518 	WREG32(CP_PFP_UCODE_ADDR, 0);
1519 	WREG32(CP_ME_RAM_WADDR, 0);
1520 	WREG32(CP_ME_RAM_RADDR, 0);
1521 	return 0;
1522 }
1523 
cayman_cp_start(struct radeon_device * rdev)1524 static int cayman_cp_start(struct radeon_device *rdev)
1525 {
1526 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1527 	int r, i;
1528 
1529 	r = radeon_ring_lock(rdev, ring, 7);
1530 	if (r) {
1531 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1532 		return r;
1533 	}
1534 	radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1535 	radeon_ring_write(ring, 0x1);
1536 	radeon_ring_write(ring, 0x0);
1537 	radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1538 	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1539 	radeon_ring_write(ring, 0);
1540 	radeon_ring_write(ring, 0);
1541 	radeon_ring_unlock_commit(rdev, ring, false);
1542 
1543 	cayman_cp_enable(rdev, true);
1544 
1545 	r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1546 	if (r) {
1547 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1548 		return r;
1549 	}
1550 
1551 	/* setup clear context state */
1552 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1553 	radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1554 
1555 	for (i = 0; i < cayman_default_size; i++)
1556 		radeon_ring_write(ring, cayman_default_state[i]);
1557 
1558 	radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1559 	radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1560 
1561 	/* set clear context state */
1562 	radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1563 	radeon_ring_write(ring, 0);
1564 
1565 	/* SQ_VTX_BASE_VTX_LOC */
1566 	radeon_ring_write(ring, 0xc0026f00);
1567 	radeon_ring_write(ring, 0x00000000);
1568 	radeon_ring_write(ring, 0x00000000);
1569 	radeon_ring_write(ring, 0x00000000);
1570 
1571 	/* Clear consts */
1572 	radeon_ring_write(ring, 0xc0036f00);
1573 	radeon_ring_write(ring, 0x00000bc4);
1574 	radeon_ring_write(ring, 0xffffffff);
1575 	radeon_ring_write(ring, 0xffffffff);
1576 	radeon_ring_write(ring, 0xffffffff);
1577 
1578 	radeon_ring_write(ring, 0xc0026900);
1579 	radeon_ring_write(ring, 0x00000316);
1580 	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1581 	radeon_ring_write(ring, 0x00000010); /*  */
1582 
1583 	radeon_ring_unlock_commit(rdev, ring, false);
1584 
1585 	/* XXX init other rings */
1586 
1587 	return 0;
1588 }
1589 
cayman_cp_fini(struct radeon_device * rdev)1590 static void cayman_cp_fini(struct radeon_device *rdev)
1591 {
1592 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1593 	cayman_cp_enable(rdev, false);
1594 	radeon_ring_fini(rdev, ring);
1595 	radeon_scratch_free(rdev, ring->rptr_save_reg);
1596 }
1597 
cayman_cp_resume(struct radeon_device * rdev)1598 static int cayman_cp_resume(struct radeon_device *rdev)
1599 {
1600 	static const int ridx[] = {
1601 		RADEON_RING_TYPE_GFX_INDEX,
1602 		CAYMAN_RING_TYPE_CP1_INDEX,
1603 		CAYMAN_RING_TYPE_CP2_INDEX
1604 	};
1605 	static const unsigned cp_rb_cntl[] = {
1606 		CP_RB0_CNTL,
1607 		CP_RB1_CNTL,
1608 		CP_RB2_CNTL,
1609 	};
1610 	static const unsigned cp_rb_rptr_addr[] = {
1611 		CP_RB0_RPTR_ADDR,
1612 		CP_RB1_RPTR_ADDR,
1613 		CP_RB2_RPTR_ADDR
1614 	};
1615 	static const unsigned cp_rb_rptr_addr_hi[] = {
1616 		CP_RB0_RPTR_ADDR_HI,
1617 		CP_RB1_RPTR_ADDR_HI,
1618 		CP_RB2_RPTR_ADDR_HI
1619 	};
1620 	static const unsigned cp_rb_base[] = {
1621 		CP_RB0_BASE,
1622 		CP_RB1_BASE,
1623 		CP_RB2_BASE
1624 	};
1625 	static const unsigned cp_rb_rptr[] = {
1626 		CP_RB0_RPTR,
1627 		CP_RB1_RPTR,
1628 		CP_RB2_RPTR
1629 	};
1630 	static const unsigned cp_rb_wptr[] = {
1631 		CP_RB0_WPTR,
1632 		CP_RB1_WPTR,
1633 		CP_RB2_WPTR
1634 	};
1635 	struct radeon_ring *ring;
1636 	int i, r;
1637 
1638 	/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1639 	WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1640 				 SOFT_RESET_PA |
1641 				 SOFT_RESET_SH |
1642 				 SOFT_RESET_VGT |
1643 				 SOFT_RESET_SPI |
1644 				 SOFT_RESET_SX));
1645 	RREG32(GRBM_SOFT_RESET);
1646 	mdelay(15);
1647 	WREG32(GRBM_SOFT_RESET, 0);
1648 	RREG32(GRBM_SOFT_RESET);
1649 
1650 	WREG32(CP_SEM_WAIT_TIMER, 0x0);
1651 	WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
1652 
1653 	/* Set the write pointer delay */
1654 	WREG32(CP_RB_WPTR_DELAY, 0);
1655 
1656 	WREG32(CP_DEBUG, (1 << 27));
1657 
1658 	/* set the wb address whether it's enabled or not */
1659 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1660 	WREG32(SCRATCH_UMSK, 0xff);
1661 
1662 	for (i = 0; i < 3; ++i) {
1663 		uint32_t rb_cntl;
1664 		uint64_t addr;
1665 
1666 		/* Set ring buffer size */
1667 		ring = &rdev->ring[ridx[i]];
1668 		rb_cntl = order_base_2(ring->ring_size / 8);
1669 		rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
1670 #ifdef __BIG_ENDIAN
1671 		rb_cntl |= BUF_SWAP_32BIT;
1672 #endif
1673 		WREG32(cp_rb_cntl[i], rb_cntl);
1674 
1675 		/* set the wb address whether it's enabled or not */
1676 		addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1677 		WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1678 		WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
1679 	}
1680 
1681 	/* set the rb base addr, this causes an internal reset of ALL rings */
1682 	for (i = 0; i < 3; ++i) {
1683 		ring = &rdev->ring[ridx[i]];
1684 		WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
1685 	}
1686 
1687 	for (i = 0; i < 3; ++i) {
1688 		/* Initialize the ring buffer's read and write pointers */
1689 		ring = &rdev->ring[ridx[i]];
1690 		WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
1691 
1692 		ring->wptr = 0;
1693 		WREG32(cp_rb_rptr[i], 0);
1694 		WREG32(cp_rb_wptr[i], ring->wptr);
1695 
1696 		mdelay(1);
1697 		WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
1698 	}
1699 
1700 	/* start the rings */
1701 	cayman_cp_start(rdev);
1702 	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1703 	rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1704 	rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1705 	/* this only test cp0 */
1706 	r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1707 	if (r) {
1708 		rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1709 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1710 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1711 		return r;
1712 	}
1713 
1714 	if (rdev->asic->copy.copy_ring_index == RADEON_RING_TYPE_GFX_INDEX)
1715 		radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1716 
1717 	return 0;
1718 }
1719 
cayman_gpu_check_soft_reset(struct radeon_device * rdev)1720 u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1721 {
1722 	u32 reset_mask = 0;
1723 	u32 tmp;
1724 
1725 	/* GRBM_STATUS */
1726 	tmp = RREG32(GRBM_STATUS);
1727 	if (tmp & (PA_BUSY | SC_BUSY |
1728 		   SH_BUSY | SX_BUSY |
1729 		   TA_BUSY | VGT_BUSY |
1730 		   DB_BUSY | CB_BUSY |
1731 		   GDS_BUSY | SPI_BUSY |
1732 		   IA_BUSY | IA_BUSY_NO_DMA))
1733 		reset_mask |= RADEON_RESET_GFX;
1734 
1735 	if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1736 		   CP_BUSY | CP_COHERENCY_BUSY))
1737 		reset_mask |= RADEON_RESET_CP;
1738 
1739 	if (tmp & GRBM_EE_BUSY)
1740 		reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1741 
1742 	/* DMA_STATUS_REG 0 */
1743 	tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1744 	if (!(tmp & DMA_IDLE))
1745 		reset_mask |= RADEON_RESET_DMA;
1746 
1747 	/* DMA_STATUS_REG 1 */
1748 	tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1749 	if (!(tmp & DMA_IDLE))
1750 		reset_mask |= RADEON_RESET_DMA1;
1751 
1752 	/* SRBM_STATUS2 */
1753 	tmp = RREG32(SRBM_STATUS2);
1754 	if (tmp & DMA_BUSY)
1755 		reset_mask |= RADEON_RESET_DMA;
1756 
1757 	if (tmp & DMA1_BUSY)
1758 		reset_mask |= RADEON_RESET_DMA1;
1759 
1760 	/* SRBM_STATUS */
1761 	tmp = RREG32(SRBM_STATUS);
1762 	if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1763 		reset_mask |= RADEON_RESET_RLC;
1764 
1765 	if (tmp & IH_BUSY)
1766 		reset_mask |= RADEON_RESET_IH;
1767 
1768 	if (tmp & SEM_BUSY)
1769 		reset_mask |= RADEON_RESET_SEM;
1770 
1771 	if (tmp & GRBM_RQ_PENDING)
1772 		reset_mask |= RADEON_RESET_GRBM;
1773 
1774 	if (tmp & VMC_BUSY)
1775 		reset_mask |= RADEON_RESET_VMC;
1776 
1777 	if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1778 		   MCC_BUSY | MCD_BUSY))
1779 		reset_mask |= RADEON_RESET_MC;
1780 
1781 	if (evergreen_is_display_hung(rdev))
1782 		reset_mask |= RADEON_RESET_DISPLAY;
1783 
1784 	/* VM_L2_STATUS */
1785 	tmp = RREG32(VM_L2_STATUS);
1786 	if (tmp & L2_BUSY)
1787 		reset_mask |= RADEON_RESET_VMC;
1788 
1789 	/* Skip MC reset as it's mostly likely not hung, just busy */
1790 	if (reset_mask & RADEON_RESET_MC) {
1791 		DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask);
1792 		reset_mask &= ~RADEON_RESET_MC;
1793 	}
1794 
1795 	return reset_mask;
1796 }
1797 
cayman_gpu_soft_reset(struct radeon_device * rdev,u32 reset_mask)1798 static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1799 {
1800 	struct evergreen_mc_save save;
1801 	u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1802 	u32 tmp;
1803 
1804 	if (reset_mask == 0)
1805 		return;
1806 
1807 	dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1808 
1809 	evergreen_print_gpu_status_regs(rdev);
1810 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_ADDR   0x%08X\n",
1811 		 RREG32(0x14F8));
1812 	dev_info(rdev->dev, "  VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1813 		 RREG32(0x14D8));
1814 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
1815 		 RREG32(0x14FC));
1816 	dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1817 		 RREG32(0x14DC));
1818 
1819 	/* Disable CP parsing/prefetching */
1820 	WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1821 
1822 	if (reset_mask & RADEON_RESET_DMA) {
1823 		/* dma0 */
1824 		tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1825 		tmp &= ~DMA_RB_ENABLE;
1826 		WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1827 	}
1828 
1829 	if (reset_mask & RADEON_RESET_DMA1) {
1830 		/* dma1 */
1831 		tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1832 		tmp &= ~DMA_RB_ENABLE;
1833 		WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1834 	}
1835 
1836 	udelay(50);
1837 
1838 	evergreen_mc_stop(rdev, &save);
1839 	if (evergreen_mc_wait_for_idle(rdev)) {
1840 		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1841 	}
1842 
1843 	if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1844 		grbm_soft_reset = SOFT_RESET_CB |
1845 			SOFT_RESET_DB |
1846 			SOFT_RESET_GDS |
1847 			SOFT_RESET_PA |
1848 			SOFT_RESET_SC |
1849 			SOFT_RESET_SPI |
1850 			SOFT_RESET_SH |
1851 			SOFT_RESET_SX |
1852 			SOFT_RESET_TC |
1853 			SOFT_RESET_TA |
1854 			SOFT_RESET_VGT |
1855 			SOFT_RESET_IA;
1856 	}
1857 
1858 	if (reset_mask & RADEON_RESET_CP) {
1859 		grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1860 
1861 		srbm_soft_reset |= SOFT_RESET_GRBM;
1862 	}
1863 
1864 	if (reset_mask & RADEON_RESET_DMA)
1865 		srbm_soft_reset |= SOFT_RESET_DMA;
1866 
1867 	if (reset_mask & RADEON_RESET_DMA1)
1868 		srbm_soft_reset |= SOFT_RESET_DMA1;
1869 
1870 	if (reset_mask & RADEON_RESET_DISPLAY)
1871 		srbm_soft_reset |= SOFT_RESET_DC;
1872 
1873 	if (reset_mask & RADEON_RESET_RLC)
1874 		srbm_soft_reset |= SOFT_RESET_RLC;
1875 
1876 	if (reset_mask & RADEON_RESET_SEM)
1877 		srbm_soft_reset |= SOFT_RESET_SEM;
1878 
1879 	if (reset_mask & RADEON_RESET_IH)
1880 		srbm_soft_reset |= SOFT_RESET_IH;
1881 
1882 	if (reset_mask & RADEON_RESET_GRBM)
1883 		srbm_soft_reset |= SOFT_RESET_GRBM;
1884 
1885 	if (reset_mask & RADEON_RESET_VMC)
1886 		srbm_soft_reset |= SOFT_RESET_VMC;
1887 
1888 	if (!(rdev->flags & RADEON_IS_IGP)) {
1889 		if (reset_mask & RADEON_RESET_MC)
1890 			srbm_soft_reset |= SOFT_RESET_MC;
1891 	}
1892 
1893 	if (grbm_soft_reset) {
1894 		tmp = RREG32(GRBM_SOFT_RESET);
1895 		tmp |= grbm_soft_reset;
1896 		dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1897 		WREG32(GRBM_SOFT_RESET, tmp);
1898 		tmp = RREG32(GRBM_SOFT_RESET);
1899 
1900 		udelay(50);
1901 
1902 		tmp &= ~grbm_soft_reset;
1903 		WREG32(GRBM_SOFT_RESET, tmp);
1904 		tmp = RREG32(GRBM_SOFT_RESET);
1905 	}
1906 
1907 	if (srbm_soft_reset) {
1908 		tmp = RREG32(SRBM_SOFT_RESET);
1909 		tmp |= srbm_soft_reset;
1910 		dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1911 		WREG32(SRBM_SOFT_RESET, tmp);
1912 		tmp = RREG32(SRBM_SOFT_RESET);
1913 
1914 		udelay(50);
1915 
1916 		tmp &= ~srbm_soft_reset;
1917 		WREG32(SRBM_SOFT_RESET, tmp);
1918 		tmp = RREG32(SRBM_SOFT_RESET);
1919 	}
1920 
1921 	/* Wait a little for things to settle down */
1922 	udelay(50);
1923 
1924 	evergreen_mc_resume(rdev, &save);
1925 	udelay(50);
1926 
1927 	evergreen_print_gpu_status_regs(rdev);
1928 }
1929 
cayman_asic_reset(struct radeon_device * rdev,bool hard)1930 int cayman_asic_reset(struct radeon_device *rdev, bool hard)
1931 {
1932 	u32 reset_mask;
1933 
1934 	if (hard) {
1935 		evergreen_gpu_pci_config_reset(rdev);
1936 		return 0;
1937 	}
1938 
1939 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1940 
1941 	if (reset_mask)
1942 		r600_set_bios_scratch_engine_hung(rdev, true);
1943 
1944 	cayman_gpu_soft_reset(rdev, reset_mask);
1945 
1946 	reset_mask = cayman_gpu_check_soft_reset(rdev);
1947 
1948 	if (reset_mask)
1949 		evergreen_gpu_pci_config_reset(rdev);
1950 
1951 	r600_set_bios_scratch_engine_hung(rdev, false);
1952 
1953 	return 0;
1954 }
1955 
1956 /**
1957  * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1958  *
1959  * @rdev: radeon_device pointer
1960  * @ring: radeon_ring structure holding ring information
1961  *
1962  * Check if the GFX engine is locked up.
1963  * Returns true if the engine appears to be locked up, false if not.
1964  */
cayman_gfx_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)1965 bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1966 {
1967 	u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1968 
1969 	if (!(reset_mask & (RADEON_RESET_GFX |
1970 			    RADEON_RESET_COMPUTE |
1971 			    RADEON_RESET_CP))) {
1972 		radeon_ring_lockup_update(rdev, ring);
1973 		return false;
1974 	}
1975 	return radeon_ring_test_lockup(rdev, ring);
1976 }
1977 
cayman_uvd_init(struct radeon_device * rdev)1978 static void cayman_uvd_init(struct radeon_device *rdev)
1979 {
1980 	int r;
1981 
1982 	if (!rdev->has_uvd)
1983 		return;
1984 
1985 	r = radeon_uvd_init(rdev);
1986 	if (r) {
1987 		dev_err(rdev->dev, "failed UVD (%d) init.\n", r);
1988 		/*
1989 		 * At this point rdev->uvd.vcpu_bo is NULL which trickles down
1990 		 * to early fails uvd_v2_2_resume() and thus nothing happens
1991 		 * there. So it is pointless to try to go through that code
1992 		 * hence why we disable uvd here.
1993 		 */
1994 		rdev->has_uvd = false;
1995 		return;
1996 	}
1997 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
1998 	r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
1999 }
2000 
cayman_uvd_start(struct radeon_device * rdev)2001 static void cayman_uvd_start(struct radeon_device *rdev)
2002 {
2003 	int r;
2004 
2005 	if (!rdev->has_uvd)
2006 		return;
2007 
2008 	r = uvd_v2_2_resume(rdev);
2009 	if (r) {
2010 		dev_err(rdev->dev, "failed UVD resume (%d).\n", r);
2011 		goto error;
2012 	}
2013 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
2014 	if (r) {
2015 		dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
2016 		goto error;
2017 	}
2018 	return;
2019 
2020 error:
2021 	rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
2022 }
2023 
cayman_uvd_resume(struct radeon_device * rdev)2024 static void cayman_uvd_resume(struct radeon_device *rdev)
2025 {
2026 	struct radeon_ring *ring;
2027 	int r;
2028 
2029 	if (!rdev->has_uvd || !rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size)
2030 		return;
2031 
2032 	ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
2033 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, PACKET0(UVD_NO_OP, 0));
2034 	if (r) {
2035 		dev_err(rdev->dev, "failed initializing UVD ring (%d).\n", r);
2036 		return;
2037 	}
2038 	r = uvd_v1_0_init(rdev);
2039 	if (r) {
2040 		dev_err(rdev->dev, "failed initializing UVD (%d).\n", r);
2041 		return;
2042 	}
2043 }
2044 
cayman_vce_init(struct radeon_device * rdev)2045 static void cayman_vce_init(struct radeon_device *rdev)
2046 {
2047 	int r;
2048 
2049 	/* Only set for CHIP_ARUBA */
2050 	if (!rdev->has_vce)
2051 		return;
2052 
2053 	r = radeon_vce_init(rdev);
2054 	if (r) {
2055 		dev_err(rdev->dev, "failed VCE (%d) init.\n", r);
2056 		/*
2057 		 * At this point rdev->vce.vcpu_bo is NULL which trickles down
2058 		 * to early fails cayman_vce_start() and thus nothing happens
2059 		 * there. So it is pointless to try to go through that code
2060 		 * hence why we disable vce here.
2061 		 */
2062 		rdev->has_vce = false;
2063 		return;
2064 	}
2065 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
2066 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE1_INDEX], 4096);
2067 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_obj = NULL;
2068 	r600_ring_init(rdev, &rdev->ring[TN_RING_TYPE_VCE2_INDEX], 4096);
2069 }
2070 
cayman_vce_start(struct radeon_device * rdev)2071 static void cayman_vce_start(struct radeon_device *rdev)
2072 {
2073 	int r;
2074 
2075 	if (!rdev->has_vce)
2076 		return;
2077 
2078 	r = radeon_vce_resume(rdev);
2079 	if (r) {
2080 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2081 		goto error;
2082 	}
2083 	r = vce_v1_0_resume(rdev);
2084 	if (r) {
2085 		dev_err(rdev->dev, "failed VCE resume (%d).\n", r);
2086 		goto error;
2087 	}
2088 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE1_INDEX);
2089 	if (r) {
2090 		dev_err(rdev->dev, "failed initializing VCE1 fences (%d).\n", r);
2091 		goto error;
2092 	}
2093 	r = radeon_fence_driver_start_ring(rdev, TN_RING_TYPE_VCE2_INDEX);
2094 	if (r) {
2095 		dev_err(rdev->dev, "failed initializing VCE2 fences (%d).\n", r);
2096 		goto error;
2097 	}
2098 	return;
2099 
2100 error:
2101 	rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size = 0;
2102 	rdev->ring[TN_RING_TYPE_VCE2_INDEX].ring_size = 0;
2103 }
2104 
cayman_vce_resume(struct radeon_device * rdev)2105 static void cayman_vce_resume(struct radeon_device *rdev)
2106 {
2107 	struct radeon_ring *ring;
2108 	int r;
2109 
2110 	if (!rdev->has_vce || !rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_size)
2111 		return;
2112 
2113 	ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2114 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2115 	if (r) {
2116 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2117 		return;
2118 	}
2119 	ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2120 	r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2121 	if (r) {
2122 		dev_err(rdev->dev, "failed initializing VCE1 ring (%d).\n", r);
2123 		return;
2124 	}
2125 	r = vce_v1_0_init(rdev);
2126 	if (r) {
2127 		dev_err(rdev->dev, "failed initializing VCE (%d).\n", r);
2128 		return;
2129 	}
2130 }
2131 
cayman_startup(struct radeon_device * rdev)2132 static int cayman_startup(struct radeon_device *rdev)
2133 {
2134 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2135 	int r;
2136 
2137 	/* enable pcie gen2 link */
2138 	evergreen_pcie_gen2_enable(rdev);
2139 	/* enable aspm */
2140 	evergreen_program_aspm(rdev);
2141 
2142 	/* scratch needs to be initialized before MC */
2143 	r = r600_vram_scratch_init(rdev);
2144 	if (r)
2145 		return r;
2146 
2147 	evergreen_mc_program(rdev);
2148 
2149 	if (!(rdev->flags & RADEON_IS_IGP) && !rdev->pm.dpm_enabled) {
2150 		r = ni_mc_load_microcode(rdev);
2151 		if (r) {
2152 			DRM_ERROR("Failed to load MC firmware!\n");
2153 			return r;
2154 		}
2155 	}
2156 
2157 	r = cayman_pcie_gart_enable(rdev);
2158 	if (r)
2159 		return r;
2160 	cayman_gpu_init(rdev);
2161 
2162 	/* allocate rlc buffers */
2163 	if (rdev->flags & RADEON_IS_IGP) {
2164 		rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
2165 		rdev->rlc.reg_list_size =
2166 			(u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
2167 		rdev->rlc.cs_data = cayman_cs_data;
2168 		r = sumo_rlc_init(rdev);
2169 		if (r) {
2170 			DRM_ERROR("Failed to init rlc BOs!\n");
2171 			return r;
2172 		}
2173 	}
2174 
2175 	/* allocate wb buffer */
2176 	r = radeon_wb_init(rdev);
2177 	if (r)
2178 		return r;
2179 
2180 	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
2181 	if (r) {
2182 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2183 		return r;
2184 	}
2185 
2186 	cayman_uvd_start(rdev);
2187 	cayman_vce_start(rdev);
2188 
2189 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
2190 	if (r) {
2191 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2192 		return r;
2193 	}
2194 
2195 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
2196 	if (r) {
2197 		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2198 		return r;
2199 	}
2200 
2201 	r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2202 	if (r) {
2203 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2204 		return r;
2205 	}
2206 
2207 	r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
2208 	if (r) {
2209 		dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2210 		return r;
2211 	}
2212 
2213 	/* Enable IRQ */
2214 	if (!rdev->irq.installed) {
2215 		r = radeon_irq_kms_init(rdev);
2216 		if (r)
2217 			return r;
2218 	}
2219 
2220 	r = r600_irq_init(rdev);
2221 	if (r) {
2222 		DRM_ERROR("radeon: IH init failed (%d).\n", r);
2223 		radeon_irq_kms_fini(rdev);
2224 		return r;
2225 	}
2226 	evergreen_irq_set(rdev);
2227 
2228 	r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2229 			     RADEON_CP_PACKET2);
2230 	if (r)
2231 		return r;
2232 
2233 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2234 	r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2235 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2236 	if (r)
2237 		return r;
2238 
2239 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2240 	r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
2241 			     DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2242 	if (r)
2243 		return r;
2244 
2245 	r = cayman_cp_load_microcode(rdev);
2246 	if (r)
2247 		return r;
2248 	r = cayman_cp_resume(rdev);
2249 	if (r)
2250 		return r;
2251 
2252 	r = cayman_dma_resume(rdev);
2253 	if (r)
2254 		return r;
2255 
2256 	cayman_uvd_resume(rdev);
2257 	cayman_vce_resume(rdev);
2258 
2259 	r = radeon_ib_pool_init(rdev);
2260 	if (r) {
2261 		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2262 		return r;
2263 	}
2264 
2265 	r = radeon_vm_manager_init(rdev);
2266 	if (r) {
2267 		dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
2268 		return r;
2269 	}
2270 
2271 	r = radeon_audio_init(rdev);
2272 	if (r)
2273 		return r;
2274 
2275 	return 0;
2276 }
2277 
cayman_resume(struct radeon_device * rdev)2278 int cayman_resume(struct radeon_device *rdev)
2279 {
2280 	int r;
2281 
2282 	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2283 	 * posting will perform necessary task to bring back GPU into good
2284 	 * shape.
2285 	 */
2286 	/* post card */
2287 	atom_asic_init(rdev->mode_info.atom_context);
2288 
2289 	/* init golden registers */
2290 	ni_init_golden_registers(rdev);
2291 
2292 	if (rdev->pm.pm_method == PM_METHOD_DPM)
2293 		radeon_pm_resume(rdev);
2294 
2295 	rdev->accel_working = true;
2296 	r = cayman_startup(rdev);
2297 	if (r) {
2298 		DRM_ERROR("cayman startup failed on resume\n");
2299 		rdev->accel_working = false;
2300 		return r;
2301 	}
2302 	return r;
2303 }
2304 
cayman_suspend(struct radeon_device * rdev)2305 int cayman_suspend(struct radeon_device *rdev)
2306 {
2307 	radeon_pm_suspend(rdev);
2308 	radeon_audio_fini(rdev);
2309 	radeon_vm_manager_fini(rdev);
2310 	cayman_cp_enable(rdev, false);
2311 	cayman_dma_stop(rdev);
2312 	if (rdev->has_uvd) {
2313 		radeon_uvd_suspend(rdev);
2314 		uvd_v1_0_fini(rdev);
2315 	}
2316 	evergreen_irq_suspend(rdev);
2317 	radeon_wb_disable(rdev);
2318 	cayman_pcie_gart_disable(rdev);
2319 	return 0;
2320 }
2321 
2322 /* Plan is to move initialization in that function and use
2323  * helper function so that radeon_device_init pretty much
2324  * do nothing more than calling asic specific function. This
2325  * should also allow to remove a bunch of callback function
2326  * like vram_info.
2327  */
cayman_init(struct radeon_device * rdev)2328 int cayman_init(struct radeon_device *rdev)
2329 {
2330 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2331 	int r;
2332 
2333 	/* Read BIOS */
2334 	if (!radeon_get_bios(rdev)) {
2335 		if (ASIC_IS_AVIVO(rdev))
2336 			return -EINVAL;
2337 	}
2338 	/* Must be an ATOMBIOS */
2339 	if (!rdev->is_atom_bios) {
2340 		dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
2341 		return -EINVAL;
2342 	}
2343 	r = radeon_atombios_init(rdev);
2344 	if (r)
2345 		return r;
2346 
2347 	/* Post card if necessary */
2348 	if (!radeon_card_posted(rdev)) {
2349 		if (!rdev->bios) {
2350 			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2351 			return -EINVAL;
2352 		}
2353 		DRM_INFO("GPU not posted. posting now...\n");
2354 		atom_asic_init(rdev->mode_info.atom_context);
2355 	}
2356 	/* init golden registers */
2357 	ni_init_golden_registers(rdev);
2358 	/* Initialize scratch registers */
2359 	r600_scratch_init(rdev);
2360 	/* Initialize surface registers */
2361 	radeon_surface_init(rdev);
2362 	/* Initialize clocks */
2363 	radeon_get_clock_info(rdev_to_drm(rdev));
2364 	/* Fence driver */
2365 	radeon_fence_driver_init(rdev);
2366 	/* initialize memory controller */
2367 	r = evergreen_mc_init(rdev);
2368 	if (r)
2369 		return r;
2370 	/* Memory manager */
2371 	r = radeon_bo_init(rdev);
2372 	if (r)
2373 		return r;
2374 
2375 	if (rdev->flags & RADEON_IS_IGP) {
2376 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2377 			r = ni_init_microcode(rdev);
2378 			if (r) {
2379 				DRM_ERROR("Failed to load firmware!\n");
2380 				return r;
2381 			}
2382 		}
2383 	} else {
2384 		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
2385 			r = ni_init_microcode(rdev);
2386 			if (r) {
2387 				DRM_ERROR("Failed to load firmware!\n");
2388 				return r;
2389 			}
2390 		}
2391 	}
2392 
2393 	/* Initialize power management */
2394 	radeon_pm_init(rdev);
2395 
2396 	ring->ring_obj = NULL;
2397 	r600_ring_init(rdev, ring, 1024 * 1024);
2398 
2399 	ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2400 	ring->ring_obj = NULL;
2401 	r600_ring_init(rdev, ring, 64 * 1024);
2402 
2403 	ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
2404 	ring->ring_obj = NULL;
2405 	r600_ring_init(rdev, ring, 64 * 1024);
2406 
2407 	cayman_uvd_init(rdev);
2408 	cayman_vce_init(rdev);
2409 
2410 	rdev->ih.ring_obj = NULL;
2411 	r600_ih_ring_init(rdev, 64 * 1024);
2412 
2413 	r = r600_pcie_gart_init(rdev);
2414 	if (r)
2415 		return r;
2416 
2417 	rdev->accel_working = true;
2418 	r = cayman_startup(rdev);
2419 	if (r) {
2420 		dev_err(rdev->dev, "disabling GPU acceleration\n");
2421 		cayman_cp_fini(rdev);
2422 		cayman_dma_fini(rdev);
2423 		r600_irq_fini(rdev);
2424 		if (rdev->flags & RADEON_IS_IGP)
2425 			sumo_rlc_fini(rdev);
2426 		radeon_wb_fini(rdev);
2427 		radeon_ib_pool_fini(rdev);
2428 		radeon_vm_manager_fini(rdev);
2429 		radeon_irq_kms_fini(rdev);
2430 		cayman_pcie_gart_fini(rdev);
2431 		rdev->accel_working = false;
2432 	}
2433 
2434 	/* Don't start up if the MC ucode is missing.
2435 	 * The default clocks and voltages before the MC ucode
2436 	 * is loaded are not suffient for advanced operations.
2437 	 *
2438 	 * We can skip this check for TN, because there is no MC
2439 	 * ucode.
2440 	 */
2441 	if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
2442 		DRM_ERROR("radeon: MC ucode required for NI+.\n");
2443 		return -EINVAL;
2444 	}
2445 
2446 	return 0;
2447 }
2448 
cayman_fini(struct radeon_device * rdev)2449 void cayman_fini(struct radeon_device *rdev)
2450 {
2451 	radeon_pm_fini(rdev);
2452 	cayman_cp_fini(rdev);
2453 	cayman_dma_fini(rdev);
2454 	r600_irq_fini(rdev);
2455 	if (rdev->flags & RADEON_IS_IGP)
2456 		sumo_rlc_fini(rdev);
2457 	radeon_wb_fini(rdev);
2458 	radeon_vm_manager_fini(rdev);
2459 	radeon_ib_pool_fini(rdev);
2460 	radeon_irq_kms_fini(rdev);
2461 	uvd_v1_0_fini(rdev);
2462 	radeon_uvd_fini(rdev);
2463 	if (rdev->has_vce)
2464 		radeon_vce_fini(rdev);
2465 	cayman_pcie_gart_fini(rdev);
2466 	r600_vram_scratch_fini(rdev);
2467 	radeon_gem_fini(rdev);
2468 	radeon_fence_driver_fini(rdev);
2469 	radeon_bo_fini(rdev);
2470 	radeon_atombios_fini(rdev);
2471 	kfree(rdev->bios);
2472 	rdev->bios = NULL;
2473 }
2474 
2475 /*
2476  * vm
2477  */
cayman_vm_init(struct radeon_device * rdev)2478 int cayman_vm_init(struct radeon_device *rdev)
2479 {
2480 	/* number of VMs */
2481 	rdev->vm_manager.nvm = 8;
2482 	/* base offset of vram pages */
2483 	if (rdev->flags & RADEON_IS_IGP) {
2484 		u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
2485 		tmp <<= 22;
2486 		rdev->vm_manager.vram_base_offset = tmp;
2487 	} else
2488 		rdev->vm_manager.vram_base_offset = 0;
2489 	return 0;
2490 }
2491 
cayman_vm_fini(struct radeon_device * rdev)2492 void cayman_vm_fini(struct radeon_device *rdev)
2493 {
2494 }
2495 
2496 /**
2497  * cayman_vm_decode_fault - print human readable fault info
2498  *
2499  * @rdev: radeon_device pointer
2500  * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
2501  * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
2502  *
2503  * Print human readable fault information (cayman/TN).
2504  */
cayman_vm_decode_fault(struct radeon_device * rdev,u32 status,u32 addr)2505 void cayman_vm_decode_fault(struct radeon_device *rdev,
2506 			    u32 status, u32 addr)
2507 {
2508 	u32 mc_id = (status & MEMORY_CLIENT_ID_MASK) >> MEMORY_CLIENT_ID_SHIFT;
2509 	u32 vmid = (status & FAULT_VMID_MASK) >> FAULT_VMID_SHIFT;
2510 	u32 protections = (status & PROTECTIONS_MASK) >> PROTECTIONS_SHIFT;
2511 	char *block;
2512 
2513 	switch (mc_id) {
2514 	case 32:
2515 	case 16:
2516 	case 96:
2517 	case 80:
2518 	case 160:
2519 	case 144:
2520 	case 224:
2521 	case 208:
2522 		block = "CB";
2523 		break;
2524 	case 33:
2525 	case 17:
2526 	case 97:
2527 	case 81:
2528 	case 161:
2529 	case 145:
2530 	case 225:
2531 	case 209:
2532 		block = "CB_FMASK";
2533 		break;
2534 	case 34:
2535 	case 18:
2536 	case 98:
2537 	case 82:
2538 	case 162:
2539 	case 146:
2540 	case 226:
2541 	case 210:
2542 		block = "CB_CMASK";
2543 		break;
2544 	case 35:
2545 	case 19:
2546 	case 99:
2547 	case 83:
2548 	case 163:
2549 	case 147:
2550 	case 227:
2551 	case 211:
2552 		block = "CB_IMMED";
2553 		break;
2554 	case 36:
2555 	case 20:
2556 	case 100:
2557 	case 84:
2558 	case 164:
2559 	case 148:
2560 	case 228:
2561 	case 212:
2562 		block = "DB";
2563 		break;
2564 	case 37:
2565 	case 21:
2566 	case 101:
2567 	case 85:
2568 	case 165:
2569 	case 149:
2570 	case 229:
2571 	case 213:
2572 		block = "DB_HTILE";
2573 		break;
2574 	case 38:
2575 	case 22:
2576 	case 102:
2577 	case 86:
2578 	case 166:
2579 	case 150:
2580 	case 230:
2581 	case 214:
2582 		block = "SX";
2583 		break;
2584 	case 39:
2585 	case 23:
2586 	case 103:
2587 	case 87:
2588 	case 167:
2589 	case 151:
2590 	case 231:
2591 	case 215:
2592 		block = "DB_STEN";
2593 		break;
2594 	case 40:
2595 	case 24:
2596 	case 104:
2597 	case 88:
2598 	case 232:
2599 	case 216:
2600 	case 168:
2601 	case 152:
2602 		block = "TC_TFETCH";
2603 		break;
2604 	case 41:
2605 	case 25:
2606 	case 105:
2607 	case 89:
2608 	case 233:
2609 	case 217:
2610 	case 169:
2611 	case 153:
2612 		block = "TC_VFETCH";
2613 		break;
2614 	case 42:
2615 	case 26:
2616 	case 106:
2617 	case 90:
2618 	case 234:
2619 	case 218:
2620 	case 170:
2621 	case 154:
2622 		block = "VC";
2623 		break;
2624 	case 112:
2625 		block = "CP";
2626 		break;
2627 	case 113:
2628 	case 114:
2629 		block = "SH";
2630 		break;
2631 	case 115:
2632 		block = "VGT";
2633 		break;
2634 	case 178:
2635 		block = "IH";
2636 		break;
2637 	case 51:
2638 		block = "RLC";
2639 		break;
2640 	case 55:
2641 		block = "DMA";
2642 		break;
2643 	case 56:
2644 		block = "HDP";
2645 		break;
2646 	default:
2647 		block = "unknown";
2648 		break;
2649 	}
2650 
2651 	printk("VM fault (0x%02x, vmid %d) at page %u, %s from %s (%d)\n",
2652 	       protections, vmid, addr,
2653 	       (status & MEMORY_CLIENT_RW_MASK) ? "write" : "read",
2654 	       block, mc_id);
2655 }
2656 
2657 /*
2658  * cayman_vm_flush - vm flush using the CP
2659  *
2660  * Update the page table base and flush the VM TLB
2661  * using the CP (cayman-si).
2662  */
cayman_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)2663 void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2664 		     unsigned vm_id, uint64_t pd_addr)
2665 {
2666 	radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2667 	radeon_ring_write(ring, pd_addr >> 12);
2668 
2669 	/* flush hdp cache */
2670 	radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
2671 	radeon_ring_write(ring, 0x1);
2672 
2673 	/* bits 0-7 are the VM contexts0-7 */
2674 	radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2675 	radeon_ring_write(ring, 1 << vm_id);
2676 
2677 	/* wait for the invalidate to complete */
2678 	radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
2679 	radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) |  /* always */
2680 				 WAIT_REG_MEM_ENGINE(0))); /* me */
2681 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
2682 	radeon_ring_write(ring, 0);
2683 	radeon_ring_write(ring, 0); /* ref */
2684 	radeon_ring_write(ring, 0); /* mask */
2685 	radeon_ring_write(ring, 0x20); /* poll interval */
2686 
2687 	/* sync PFP to ME, otherwise we might get invalid PFP reads */
2688 	radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2689 	radeon_ring_write(ring, 0x0);
2690 }
2691 
tn_set_vce_clocks(struct radeon_device * rdev,u32 evclk,u32 ecclk)2692 int tn_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk)
2693 {
2694 	struct atom_clock_dividers dividers;
2695 	int r, i;
2696 
2697 	r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2698 					   ecclk, false, &dividers);
2699 	if (r)
2700 		return r;
2701 
2702 	for (i = 0; i < 100; i++) {
2703 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2704 			break;
2705 		mdelay(10);
2706 	}
2707 	if (i == 100)
2708 		return -ETIMEDOUT;
2709 
2710 	WREG32_P(CG_ECLK_CNTL, dividers.post_div, ~(ECLK_DIR_CNTL_EN|ECLK_DIVIDER_MASK));
2711 
2712 	for (i = 0; i < 100; i++) {
2713 		if (RREG32(CG_ECLK_STATUS) & ECLK_STATUS)
2714 			break;
2715 		mdelay(10);
2716 	}
2717 	if (i == 100)
2718 		return -ETIMEDOUT;
2719 
2720 	return 0;
2721 }
2722