xref: /dragonfly/sys/dev/drm/radeon/r600_cs.c (revision 0066c2fb)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <linux/kernel.h>
29 #include <drm/drmP.h>
30 #include "radeon.h"
31 #include "radeon_asic.h"
32 #include "r600d.h"
33 #include "r600_reg_safe.h"
34 
35 static int r600_nomm;
36 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
37 
38 
39 struct r600_cs_track {
40 	/* configuration we miror so that we use same code btw kms/ums */
41 	u32			group_size;
42 	u32			nbanks;
43 	u32			npipes;
44 	/* value we track */
45 	u32			sq_config;
46 	u32			log_nsamples;
47 	u32			nsamples;
48 	u32			cb_color_base_last[8];
49 	struct radeon_bo	*cb_color_bo[8];
50 	u64			cb_color_bo_mc[8];
51 	u64			cb_color_bo_offset[8];
52 	struct radeon_bo	*cb_color_frag_bo[8];
53 	u64			cb_color_frag_offset[8];
54 	struct radeon_bo	*cb_color_tile_bo[8];
55 	u64			cb_color_tile_offset[8];
56 	u32			cb_color_mask[8];
57 	u32			cb_color_info[8];
58 	u32			cb_color_view[8];
59 	u32			cb_color_size_idx[8]; /* unused */
60 	u32			cb_target_mask;
61 	u32			cb_shader_mask;  /* unused */
62 	bool			is_resolve;
63 	u32			cb_color_size[8];
64 	u32			vgt_strmout_en;
65 	u32			vgt_strmout_buffer_en;
66 	struct radeon_bo	*vgt_strmout_bo[4];
67 	u64			vgt_strmout_bo_mc[4]; /* unused */
68 	u32			vgt_strmout_bo_offset[4];
69 	u32			vgt_strmout_size[4];
70 	u32			db_depth_control;
71 	u32			db_depth_info;
72 	u32			db_depth_size_idx;
73 	u32			db_depth_view;
74 	u32			db_depth_size;
75 	u32			db_offset;
76 	struct radeon_bo	*db_bo;
77 	u64			db_bo_mc;
78 	bool			sx_misc_kill_all_prims;
79 	bool			cb_dirty;
80 	bool			db_dirty;
81 	bool			streamout_dirty;
82 	struct radeon_bo	*htile_bo;
83 	u64			htile_offset;
84 	u32			htile_surface;
85 };
86 
87 #define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
88 #define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
89 #define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
90 #define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
91 #define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
92 #define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
93 #define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
94 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
95 
96 struct gpu_formats {
97 	unsigned blockwidth;
98 	unsigned blockheight;
99 	unsigned blocksize;
100 	unsigned valid_color;
101 	enum radeon_family min_family;
102 };
103 
104 static const struct gpu_formats color_formats_table[] = {
105 	/* 8 bit */
106 	FMT_8_BIT(V_038004_COLOR_8, 1),
107 	FMT_8_BIT(V_038004_COLOR_4_4, 1),
108 	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
109 	FMT_8_BIT(V_038004_FMT_1, 0),
110 
111 	/* 16-bit */
112 	FMT_16_BIT(V_038004_COLOR_16, 1),
113 	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
114 	FMT_16_BIT(V_038004_COLOR_8_8, 1),
115 	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
116 	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
117 	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
118 	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
119 	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
120 
121 	/* 24-bit */
122 	FMT_24_BIT(V_038004_FMT_8_8_8),
123 
124 	/* 32-bit */
125 	FMT_32_BIT(V_038004_COLOR_32, 1),
126 	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
127 	FMT_32_BIT(V_038004_COLOR_16_16, 1),
128 	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
129 	FMT_32_BIT(V_038004_COLOR_8_24, 1),
130 	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
131 	FMT_32_BIT(V_038004_COLOR_24_8, 1),
132 	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
133 	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
134 	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
135 	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
136 	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
137 	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
138 	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
139 	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
140 	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
141 	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
142 	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
143 
144 	/* 48-bit */
145 	FMT_48_BIT(V_038004_FMT_16_16_16),
146 	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
147 
148 	/* 64-bit */
149 	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
150 	FMT_64_BIT(V_038004_COLOR_32_32, 1),
151 	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
152 	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
153 	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
154 
155 	FMT_96_BIT(V_038004_FMT_32_32_32),
156 	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
157 
158 	/* 128-bit */
159 	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
160 	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
161 
162 	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
163 	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
164 
165 	/* block compressed formats */
166 	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
167 	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
168 	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
169 	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
170 	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
171 	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
172 	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
173 
174 	/* The other Evergreen formats */
175 	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
176 };
177 
178 bool r600_fmt_is_valid_color(u32 format)
179 {
180 	if (format >= ARRAY_SIZE(color_formats_table))
181 		return false;
182 
183 	if (color_formats_table[format].valid_color)
184 		return true;
185 
186 	return false;
187 }
188 
189 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
190 {
191 	if (format >= ARRAY_SIZE(color_formats_table))
192 		return false;
193 
194 	if (family < color_formats_table[format].min_family)
195 		return false;
196 
197 	if (color_formats_table[format].blockwidth > 0)
198 		return true;
199 
200 	return false;
201 }
202 
203 int r600_fmt_get_blocksize(u32 format)
204 {
205 	if (format >= ARRAY_SIZE(color_formats_table))
206 		return 0;
207 
208 	return color_formats_table[format].blocksize;
209 }
210 
211 int r600_fmt_get_nblocksx(u32 format, u32 w)
212 {
213 	unsigned bw;
214 
215 	if (format >= ARRAY_SIZE(color_formats_table))
216 		return 0;
217 
218 	bw = color_formats_table[format].blockwidth;
219 	if (bw == 0)
220 		return 0;
221 
222 	return (w + bw - 1) / bw;
223 }
224 
225 int r600_fmt_get_nblocksy(u32 format, u32 h)
226 {
227 	unsigned bh;
228 
229 	if (format >= ARRAY_SIZE(color_formats_table))
230 		return 0;
231 
232 	bh = color_formats_table[format].blockheight;
233 	if (bh == 0)
234 		return 0;
235 
236 	return (h + bh - 1) / bh;
237 }
238 
239 struct array_mode_checker {
240 	int array_mode;
241 	u32 group_size;
242 	u32 nbanks;
243 	u32 npipes;
244 	u32 nsamples;
245 	u32 blocksize;
246 };
247 
248 /* returns alignment in pixels for pitch/height/depth and bytes for base */
249 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
250 						u32 *pitch_align,
251 						u32 *height_align,
252 						u32 *depth_align,
253 						u64 *base_align)
254 {
255 	u32 tile_width = 8;
256 	u32 tile_height = 8;
257 	u32 macro_tile_width = values->nbanks;
258 	u32 macro_tile_height = values->npipes;
259 	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
260 	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
261 
262 	switch (values->array_mode) {
263 	case ARRAY_LINEAR_GENERAL:
264 		/* technically tile_width/_height for pitch/height */
265 		*pitch_align = 1; /* tile_width */
266 		*height_align = 1; /* tile_height */
267 		*depth_align = 1;
268 		*base_align = 1;
269 		break;
270 	case ARRAY_LINEAR_ALIGNED:
271 		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
272 		*height_align = 1;
273 		*depth_align = 1;
274 		*base_align = values->group_size;
275 		break;
276 	case ARRAY_1D_TILED_THIN1:
277 		*pitch_align = max((u32)tile_width,
278 				   (u32)(values->group_size /
279 					 (tile_height * values->blocksize * values->nsamples)));
280 		*height_align = tile_height;
281 		*depth_align = 1;
282 		*base_align = values->group_size;
283 		break;
284 	case ARRAY_2D_TILED_THIN1:
285 		*pitch_align = max((u32)macro_tile_width * tile_width,
286 				(u32)((values->group_size * values->nbanks) /
287 				(values->blocksize * values->nsamples * tile_width)));
288 		*height_align = macro_tile_height * tile_height;
289 		*depth_align = 1;
290 		*base_align = max(macro_tile_bytes,
291 				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
292 		break;
293 	default:
294 		return -EINVAL;
295 	}
296 
297 	return 0;
298 }
299 
300 static void r600_cs_track_init(struct r600_cs_track *track)
301 {
302 	int i;
303 
304 	/* assume DX9 mode */
305 	track->sq_config = DX9_CONSTS;
306 	for (i = 0; i < 8; i++) {
307 		track->cb_color_base_last[i] = 0;
308 		track->cb_color_size[i] = 0;
309 		track->cb_color_size_idx[i] = 0;
310 		track->cb_color_info[i] = 0;
311 		track->cb_color_view[i] = 0xFFFFFFFF;
312 		track->cb_color_bo[i] = NULL;
313 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
314 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
315 		track->cb_color_frag_bo[i] = NULL;
316 		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
317 		track->cb_color_tile_bo[i] = NULL;
318 		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
319 		track->cb_color_mask[i] = 0xFFFFFFFF;
320 	}
321 	track->is_resolve = false;
322 	track->nsamples = 16;
323 	track->log_nsamples = 4;
324 	track->cb_target_mask = 0xFFFFFFFF;
325 	track->cb_shader_mask = 0xFFFFFFFF;
326 	track->cb_dirty = true;
327 	track->db_bo = NULL;
328 	track->db_bo_mc = 0xFFFFFFFF;
329 	/* assume the biggest format and that htile is enabled */
330 	track->db_depth_info = 7 | (1 << 25);
331 	track->db_depth_view = 0xFFFFC000;
332 	track->db_depth_size = 0xFFFFFFFF;
333 	track->db_depth_size_idx = 0;
334 	track->db_depth_control = 0xFFFFFFFF;
335 	track->db_dirty = true;
336 	track->htile_bo = NULL;
337 	track->htile_offset = 0xFFFFFFFF;
338 	track->htile_surface = 0;
339 
340 	for (i = 0; i < 4; i++) {
341 		track->vgt_strmout_size[i] = 0;
342 		track->vgt_strmout_bo[i] = NULL;
343 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
344 		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
345 	}
346 	track->streamout_dirty = true;
347 	track->sx_misc_kill_all_prims = false;
348 }
349 
350 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
351 {
352 	struct r600_cs_track *track = p->track;
353 	u32 slice_tile_max, size, tmp;
354 	u32 height, height_align, pitch, pitch_align, depth_align;
355 	u64 base_offset, base_align;
356 	struct array_mode_checker array_check;
357 	volatile u32 *ib = p->ib.ptr;
358 	unsigned array_mode;
359 	u32 format;
360 	/* When resolve is used, the second colorbuffer has always 1 sample. */
361 	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
362 
363 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
364 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
365 	if (!r600_fmt_is_valid_color(format)) {
366 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
367 			 __func__, __LINE__, format,
368 			i, track->cb_color_info[i]);
369 		return -EINVAL;
370 	}
371 	/* pitch in pixels */
372 	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
373 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
374 	slice_tile_max *= 64;
375 	height = slice_tile_max / pitch;
376 	if (height > 8192)
377 		height = 8192;
378 	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
379 
380 	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
381 	array_check.array_mode = array_mode;
382 	array_check.group_size = track->group_size;
383 	array_check.nbanks = track->nbanks;
384 	array_check.npipes = track->npipes;
385 	array_check.nsamples = nsamples;
386 	array_check.blocksize = r600_fmt_get_blocksize(format);
387 	if (r600_get_array_mode_alignment(&array_check,
388 					  &pitch_align, &height_align, &depth_align, &base_align)) {
389 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
390 			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
391 			 track->cb_color_info[i]);
392 		return -EINVAL;
393 	}
394 	switch (array_mode) {
395 	case V_0280A0_ARRAY_LINEAR_GENERAL:
396 		break;
397 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
398 		break;
399 	case V_0280A0_ARRAY_1D_TILED_THIN1:
400 		/* avoid breaking userspace */
401 		if (height > 7)
402 			height &= ~0x7;
403 		break;
404 	case V_0280A0_ARRAY_2D_TILED_THIN1:
405 		break;
406 	default:
407 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
408 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
409 			track->cb_color_info[i]);
410 		return -EINVAL;
411 	}
412 
413 	if (!IS_ALIGNED(pitch, pitch_align)) {
414 		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
415 			 __func__, __LINE__, pitch, pitch_align, array_mode);
416 		return -EINVAL;
417 	}
418 	if (!IS_ALIGNED(height, height_align)) {
419 		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
420 			 __func__, __LINE__, height, height_align, array_mode);
421 		return -EINVAL;
422 	}
423 	if (!IS_ALIGNED(base_offset, base_align)) {
424 		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
425 			 base_offset, base_align, array_mode);
426 		return -EINVAL;
427 	}
428 
429 	/* check offset */
430 	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
431 	      r600_fmt_get_blocksize(format) * nsamples;
432 	switch (array_mode) {
433 	default:
434 	case V_0280A0_ARRAY_LINEAR_GENERAL:
435 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
436 		tmp += track->cb_color_view[i] & 0xFF;
437 		break;
438 	case V_0280A0_ARRAY_1D_TILED_THIN1:
439 	case V_0280A0_ARRAY_2D_TILED_THIN1:
440 		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
441 		break;
442 	}
443 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
444 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
445 			/* the initial DDX does bad things with the CB size occasionally */
446 			/* it rounds up height too far for slice tile max but the BO is smaller */
447 			/* r600c,g also seem to flush at bad times in some apps resulting in
448 			 * bogus values here. So for linear just allow anything to avoid breaking
449 			 * broken userspace.
450 			 */
451 		} else {
452 			dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
453 				 __func__, i, array_mode,
454 				 track->cb_color_bo_offset[i], tmp,
455 				 radeon_bo_size(track->cb_color_bo[i]),
456 				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
457 				 r600_fmt_get_nblocksy(format, height),
458 				 r600_fmt_get_blocksize(format));
459 			return -EINVAL;
460 		}
461 	}
462 	/* limit max tile */
463 	tmp = (height * pitch) >> 6;
464 	if (tmp < slice_tile_max)
465 		slice_tile_max = tmp;
466 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
467 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
468 	ib[track->cb_color_size_idx[i]] = tmp;
469 
470 	/* FMASK/CMASK */
471 	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
472 	case V_0280A0_TILE_DISABLE:
473 		break;
474 	case V_0280A0_FRAG_ENABLE:
475 		if (track->nsamples > 1) {
476 			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
477 			/* the tile size is 8x8, but the size is in units of bits.
478 			 * for bytes, do just * 8. */
479 			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
480 
481 			if (bytes + track->cb_color_frag_offset[i] >
482 			    radeon_bo_size(track->cb_color_frag_bo[i])) {
483 				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
484 					 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
485 					 __func__, tile_max, bytes,
486 					 track->cb_color_frag_offset[i],
487 					 radeon_bo_size(track->cb_color_frag_bo[i]));
488 				return -EINVAL;
489 			}
490 		}
491 		/* fall through */
492 	case V_0280A0_CLEAR_ENABLE:
493 	{
494 		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
495 		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
496 		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
497 		uint32_t bytes = (block_max + 1) * 128;
498 
499 		if (bytes + track->cb_color_tile_offset[i] >
500 		    radeon_bo_size(track->cb_color_tile_bo[i])) {
501 			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
502 				 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
503 				 __func__, block_max, bytes,
504 				 track->cb_color_tile_offset[i],
505 				 radeon_bo_size(track->cb_color_tile_bo[i]));
506 			return -EINVAL;
507 		}
508 		break;
509 	}
510 	default:
511 		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
512 		return -EINVAL;
513 	}
514 	return 0;
515 }
516 
517 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
518 {
519 	struct r600_cs_track *track = p->track;
520 	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
521 	u32 height_align, pitch_align, depth_align;
522 	u32 pitch = 8192;
523 	u32 height = 8192;
524 	u64 base_offset, base_align;
525 	struct array_mode_checker array_check;
526 	int array_mode;
527 	volatile u32 *ib = p->ib.ptr;
528 
529 
530 	if (track->db_bo == NULL) {
531 		dev_warn(p->dev, "z/stencil with no depth buffer\n");
532 		return -EINVAL;
533 	}
534 	switch (G_028010_FORMAT(track->db_depth_info)) {
535 	case V_028010_DEPTH_16:
536 		bpe = 2;
537 		break;
538 	case V_028010_DEPTH_X8_24:
539 	case V_028010_DEPTH_8_24:
540 	case V_028010_DEPTH_X8_24_FLOAT:
541 	case V_028010_DEPTH_8_24_FLOAT:
542 	case V_028010_DEPTH_32_FLOAT:
543 		bpe = 4;
544 		break;
545 	case V_028010_DEPTH_X24_8_32_FLOAT:
546 		bpe = 8;
547 		break;
548 	default:
549 		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
550 		return -EINVAL;
551 	}
552 	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
553 		if (!track->db_depth_size_idx) {
554 			dev_warn(p->dev, "z/stencil buffer size not set\n");
555 			return -EINVAL;
556 		}
557 		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
558 		tmp = (tmp / bpe) >> 6;
559 		if (!tmp) {
560 			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
561 					track->db_depth_size, bpe, track->db_offset,
562 					radeon_bo_size(track->db_bo));
563 			return -EINVAL;
564 		}
565 		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
566 	} else {
567 		size = radeon_bo_size(track->db_bo);
568 		/* pitch in pixels */
569 		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
570 		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
571 		slice_tile_max *= 64;
572 		height = slice_tile_max / pitch;
573 		if (height > 8192)
574 			height = 8192;
575 		base_offset = track->db_bo_mc + track->db_offset;
576 		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
577 		array_check.array_mode = array_mode;
578 		array_check.group_size = track->group_size;
579 		array_check.nbanks = track->nbanks;
580 		array_check.npipes = track->npipes;
581 		array_check.nsamples = track->nsamples;
582 		array_check.blocksize = bpe;
583 		if (r600_get_array_mode_alignment(&array_check,
584 					&pitch_align, &height_align, &depth_align, &base_align)) {
585 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
586 					G_028010_ARRAY_MODE(track->db_depth_info),
587 					track->db_depth_info);
588 			return -EINVAL;
589 		}
590 		switch (array_mode) {
591 		case V_028010_ARRAY_1D_TILED_THIN1:
592 			/* don't break userspace */
593 			height &= ~0x7;
594 			break;
595 		case V_028010_ARRAY_2D_TILED_THIN1:
596 			break;
597 		default:
598 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
599 					G_028010_ARRAY_MODE(track->db_depth_info),
600 					track->db_depth_info);
601 			return -EINVAL;
602 		}
603 
604 		if (!IS_ALIGNED(pitch, pitch_align)) {
605 			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
606 					__func__, __LINE__, pitch, pitch_align, array_mode);
607 			return -EINVAL;
608 		}
609 		if (!IS_ALIGNED(height, height_align)) {
610 			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
611 					__func__, __LINE__, height, height_align, array_mode);
612 			return -EINVAL;
613 		}
614 		if (!IS_ALIGNED(base_offset, base_align)) {
615 			dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
616 					base_offset, base_align, array_mode);
617 			return -EINVAL;
618 		}
619 
620 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
621 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
622 		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
623 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
624 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
625 					array_mode,
626 					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
627 					radeon_bo_size(track->db_bo));
628 			return -EINVAL;
629 		}
630 	}
631 
632 	/* hyperz */
633 	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
634 		unsigned long size;
635 		unsigned nbx, nby;
636 
637 		if (track->htile_bo == NULL) {
638 			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
639 				 __func__, __LINE__, track->db_depth_info);
640 			return -EINVAL;
641 		}
642 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
643 			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
644 				 __func__, __LINE__, track->db_depth_size);
645 			return -EINVAL;
646 		}
647 
648 		nbx = pitch;
649 		nby = height;
650 		if (G_028D24_LINEAR(track->htile_surface)) {
651 			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
652 			nbx = round_up(nbx, 16 * 8);
653 			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
654 			nby = round_up(nby, track->npipes * 8);
655 		} else {
656 			/* always assume 8x8 htile */
657 			/* align is htile align * 8, htile align vary according to
658 			 * number of pipe and tile width and nby
659 			 */
660 			switch (track->npipes) {
661 			case 8:
662 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
663 				nbx = round_up(nbx, 64 * 8);
664 				nby = round_up(nby, 64 * 8);
665 				break;
666 			case 4:
667 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
668 				nbx = round_up(nbx, 64 * 8);
669 				nby = round_up(nby, 32 * 8);
670 				break;
671 			case 2:
672 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
673 				nbx = round_up(nbx, 32 * 8);
674 				nby = round_up(nby, 32 * 8);
675 				break;
676 			case 1:
677 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
678 				nbx = round_up(nbx, 32 * 8);
679 				nby = round_up(nby, 16 * 8);
680 				break;
681 			default:
682 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
683 					 __func__, __LINE__, track->npipes);
684 				return -EINVAL;
685 			}
686 		}
687 		/* compute number of htile */
688 		nbx = nbx >> 3;
689 		nby = nby >> 3;
690 		/* size must be aligned on npipes * 2K boundary */
691 		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
692 		size += track->htile_offset;
693 
694 		if (size > radeon_bo_size(track->htile_bo)) {
695 			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
696 				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
697 				 size, nbx, nby);
698 			return -EINVAL;
699 		}
700 	}
701 
702 	track->db_dirty = false;
703 	return 0;
704 }
705 
706 static int r600_cs_track_check(struct radeon_cs_parser *p)
707 {
708 	struct r600_cs_track *track = p->track;
709 	u32 tmp;
710 	int r, i;
711 
712 	/* on legacy kernel we don't perform advanced check */
713 	if (p->rdev == NULL)
714 		return 0;
715 
716 	/* check streamout */
717 	if (track->streamout_dirty && track->vgt_strmout_en) {
718 		for (i = 0; i < 4; i++) {
719 			if (track->vgt_strmout_buffer_en & (1 << i)) {
720 				if (track->vgt_strmout_bo[i]) {
721 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
722 						(u64)track->vgt_strmout_size[i];
723 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
724 						DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
725 							  i, offset,
726 							  radeon_bo_size(track->vgt_strmout_bo[i]));
727 						return -EINVAL;
728 					}
729 				} else {
730 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
731 					return -EINVAL;
732 				}
733 			}
734 		}
735 		track->streamout_dirty = false;
736 	}
737 
738 	if (track->sx_misc_kill_all_prims)
739 		return 0;
740 
741 	/* check that we have a cb for each enabled target, we don't check
742 	 * shader_mask because it seems mesa isn't always setting it :(
743 	 */
744 	if (track->cb_dirty) {
745 		tmp = track->cb_target_mask;
746 
747 		/* We must check both colorbuffers for RESOLVE. */
748 		if (track->is_resolve) {
749 			tmp |= 0xff;
750 		}
751 
752 		for (i = 0; i < 8; i++) {
753 			u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
754 
755 			if (format != V_0280A0_COLOR_INVALID &&
756 			    (tmp >> (i * 4)) & 0xF) {
757 				/* at least one component is enabled */
758 				if (track->cb_color_bo[i] == NULL) {
759 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
760 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
761 					return -EINVAL;
762 				}
763 				/* perform rewrite of CB_COLOR[0-7]_SIZE */
764 				r = r600_cs_track_validate_cb(p, i);
765 				if (r)
766 					return r;
767 			}
768 		}
769 		track->cb_dirty = false;
770 	}
771 
772 	/* Check depth buffer */
773 	if (track->db_dirty &&
774 	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
775 	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
776 	     G_028800_Z_ENABLE(track->db_depth_control))) {
777 		r = r600_cs_track_validate_db(p);
778 		if (r)
779 			return r;
780 	}
781 
782 	return 0;
783 }
784 
785 /**
786  * r600_cs_packet_parse_vline() - parse userspace VLINE packet
787  * @parser:		parser structure holding parsing context.
788  *
789  * This is an R600-specific function for parsing VLINE packets.
790  * Real work is done by r600_cs_common_vline_parse function.
791  * Here we just set up ASIC-specific register table and call
792  * the common implementation function.
793  */
794 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
795 {
796 	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
797 					      AVIVO_D2MODE_VLINE_START_END};
798 	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
799 					   AVIVO_D2MODE_VLINE_STATUS};
800 
801 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
802 }
803 
804 /**
805  * r600_cs_common_vline_parse() - common vline parser
806  * @parser:		parser structure holding parsing context.
807  * @vline_start_end:    table of vline_start_end registers
808  * @vline_status:       table of vline_status registers
809  *
810  * Userspace sends a special sequence for VLINE waits.
811  * PACKET0 - VLINE_START_END + value
812  * PACKET3 - WAIT_REG_MEM poll vline status reg
813  * RELOC (P3) - crtc_id in reloc.
814  *
815  * This function parses this and relocates the VLINE START END
816  * and WAIT_REG_MEM packets to the correct crtc.
817  * It also detects a switched off crtc and nulls out the
818  * wait in that case. This function is common for all ASICs that
819  * are R600 and newer. The parsing algorithm is the same, and only
820  * differs in which registers are used.
821  *
822  * Caller is the ASIC-specific function which passes the parser
823  * context and ASIC-specific register table
824  */
825 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
826 			       uint32_t *vline_start_end,
827 			       uint32_t *vline_status)
828 {
829 	struct drm_crtc *crtc;
830 	struct radeon_crtc *radeon_crtc;
831 	struct radeon_cs_packet p3reloc, wait_reg_mem;
832 	int crtc_id;
833 	int r;
834 	uint32_t header, h_idx, reg, wait_reg_mem_info;
835 	volatile uint32_t *ib;
836 
837 	ib = p->ib.ptr;
838 
839 	/* parse the WAIT_REG_MEM */
840 	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
841 	if (r)
842 		return r;
843 
844 	/* check its a WAIT_REG_MEM */
845 	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
846 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
847 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
848 		return -EINVAL;
849 	}
850 
851 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
852 	/* bit 4 is reg (0) or mem (1) */
853 	if (wait_reg_mem_info & 0x10) {
854 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
855 		return -EINVAL;
856 	}
857 	/* bit 8 is me (0) or pfp (1) */
858 	if (wait_reg_mem_info & 0x100) {
859 		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
860 		return -EINVAL;
861 	}
862 	/* waiting for value to be equal */
863 	if ((wait_reg_mem_info & 0x7) != 0x3) {
864 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
865 		return -EINVAL;
866 	}
867 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
868 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
869 		return -EINVAL;
870 	}
871 
872 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
873 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
874 		return -EINVAL;
875 	}
876 
877 	/* jump over the NOP */
878 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
879 	if (r)
880 		return r;
881 
882 	h_idx = p->idx - 2;
883 	p->idx += wait_reg_mem.count + 2;
884 	p->idx += p3reloc.count + 2;
885 
886 	header = radeon_get_ib_value(p, h_idx);
887 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
888 	reg = R600_CP_PACKET0_GET_REG(header);
889 
890 	crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
891 	if (!crtc) {
892 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
893 		return -ENOENT;
894 	}
895 	radeon_crtc = to_radeon_crtc(crtc);
896 	crtc_id = radeon_crtc->crtc_id;
897 
898 	if (!crtc->enabled) {
899 		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
900 		ib[h_idx + 2] = PACKET2(0);
901 		ib[h_idx + 3] = PACKET2(0);
902 		ib[h_idx + 4] = PACKET2(0);
903 		ib[h_idx + 5] = PACKET2(0);
904 		ib[h_idx + 6] = PACKET2(0);
905 		ib[h_idx + 7] = PACKET2(0);
906 		ib[h_idx + 8] = PACKET2(0);
907 	} else if (reg == vline_start_end[0]) {
908 		header &= ~R600_CP_PACKET0_REG_MASK;
909 		header |= vline_start_end[crtc_id] >> 2;
910 		ib[h_idx] = header;
911 		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
912 	} else {
913 		DRM_ERROR("unknown crtc reloc\n");
914 		return -EINVAL;
915 	}
916 	return 0;
917 }
918 
919 static int r600_packet0_check(struct radeon_cs_parser *p,
920 				struct radeon_cs_packet *pkt,
921 				unsigned idx, unsigned reg)
922 {
923 	int r;
924 
925 	switch (reg) {
926 	case AVIVO_D1MODE_VLINE_START_END:
927 		r = r600_cs_packet_parse_vline(p);
928 		if (r) {
929 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
930 					idx, reg);
931 			return r;
932 		}
933 		break;
934 	default:
935 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
936 		       reg, idx);
937 		return -EINVAL;
938 	}
939 	return 0;
940 }
941 
942 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
943 				struct radeon_cs_packet *pkt)
944 {
945 	unsigned reg, i;
946 	unsigned idx;
947 	int r;
948 
949 	idx = pkt->idx + 1;
950 	reg = pkt->reg;
951 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
952 		r = r600_packet0_check(p, pkt, idx, reg);
953 		if (r) {
954 			return r;
955 		}
956 	}
957 	return 0;
958 }
959 
960 /**
961  * r600_cs_check_reg() - check if register is authorized or not
962  * @parser: parser structure holding parsing context
963  * @reg: register we are testing
964  * @idx: index into the cs buffer
965  *
966  * This function will test against r600_reg_safe_bm and return 0
967  * if register is safe. If register is not flag as safe this function
968  * will test it against a list of register needind special handling.
969  */
970 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
971 {
972 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
973 	struct radeon_cs_reloc *reloc;
974 	u32 m, i, tmp, *ib;
975 	int r;
976 
977 	i = (reg >> 7);
978 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
979 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
980 		return -EINVAL;
981 	}
982 	m = 1 << ((reg >> 2) & 31);
983 	if (!(r600_reg_safe_bm[i] & m))
984 		return 0;
985 	ib = p->ib.ptr;
986 	switch (reg) {
987 	/* force following reg to 0 in an attempt to disable out buffer
988 	 * which will need us to better understand how it works to perform
989 	 * security check on it (Jerome)
990 	 */
991 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
992 	case R_008C44_SQ_ESGS_RING_SIZE:
993 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
994 	case R_008C54_SQ_ESTMP_RING_SIZE:
995 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
996 	case R_008C74_SQ_FBUF_RING_SIZE:
997 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
998 	case R_008C5C_SQ_GSTMP_RING_SIZE:
999 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1000 	case R_008C4C_SQ_GSVS_RING_SIZE:
1001 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1002 	case R_008C6C_SQ_PSTMP_RING_SIZE:
1003 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1004 	case R_008C7C_SQ_REDUC_RING_SIZE:
1005 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1006 	case R_008C64_SQ_VSTMP_RING_SIZE:
1007 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1008 		/* get value to populate the IB don't remove */
1009 		/*tmp =radeon_get_ib_value(p, idx);
1010 		  ib[idx] = 0;*/
1011 		break;
1012 	case SQ_ESGS_RING_BASE:
1013 	case SQ_GSVS_RING_BASE:
1014 	case SQ_ESTMP_RING_BASE:
1015 	case SQ_GSTMP_RING_BASE:
1016 	case SQ_PSTMP_RING_BASE:
1017 	case SQ_VSTMP_RING_BASE:
1018 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1019 		if (r) {
1020 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1021 					"0x%04X\n", reg);
1022 			return -EINVAL;
1023 		}
1024 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1025 		break;
1026 	case SQ_CONFIG:
1027 		track->sq_config = radeon_get_ib_value(p, idx);
1028 		break;
1029 	case R_028800_DB_DEPTH_CONTROL:
1030 		track->db_depth_control = radeon_get_ib_value(p, idx);
1031 		track->db_dirty = true;
1032 		break;
1033 	case R_028010_DB_DEPTH_INFO:
1034 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1035 		    radeon_cs_packet_next_is_pkt3_nop(p)) {
1036 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1037 			if (r) {
1038 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1039 					 "0x%04X\n", reg);
1040 				return -EINVAL;
1041 			}
1042 			track->db_depth_info = radeon_get_ib_value(p, idx);
1043 			ib[idx] &= C_028010_ARRAY_MODE;
1044 			track->db_depth_info &= C_028010_ARRAY_MODE;
1045 			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1046 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1047 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1048 			} else {
1049 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1050 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1051 			}
1052 		} else {
1053 			track->db_depth_info = radeon_get_ib_value(p, idx);
1054 		}
1055 		track->db_dirty = true;
1056 		break;
1057 	case R_028004_DB_DEPTH_VIEW:
1058 		track->db_depth_view = radeon_get_ib_value(p, idx);
1059 		track->db_dirty = true;
1060 		break;
1061 	case R_028000_DB_DEPTH_SIZE:
1062 		track->db_depth_size = radeon_get_ib_value(p, idx);
1063 		track->db_depth_size_idx = idx;
1064 		track->db_dirty = true;
1065 		break;
1066 	case R_028AB0_VGT_STRMOUT_EN:
1067 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1068 		track->streamout_dirty = true;
1069 		break;
1070 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
1071 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1072 		track->streamout_dirty = true;
1073 		break;
1074 	case VGT_STRMOUT_BUFFER_BASE_0:
1075 	case VGT_STRMOUT_BUFFER_BASE_1:
1076 	case VGT_STRMOUT_BUFFER_BASE_2:
1077 	case VGT_STRMOUT_BUFFER_BASE_3:
1078 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1079 		if (r) {
1080 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1081 					"0x%04X\n", reg);
1082 			return -EINVAL;
1083 		}
1084 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1085 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1086 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1087 		track->vgt_strmout_bo[tmp] = reloc->robj;
1088 		track->vgt_strmout_bo_mc[tmp] = reloc->gpu_offset;
1089 		track->streamout_dirty = true;
1090 		break;
1091 	case VGT_STRMOUT_BUFFER_SIZE_0:
1092 	case VGT_STRMOUT_BUFFER_SIZE_1:
1093 	case VGT_STRMOUT_BUFFER_SIZE_2:
1094 	case VGT_STRMOUT_BUFFER_SIZE_3:
1095 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1096 		/* size in register is DWs, convert to bytes */
1097 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1098 		track->streamout_dirty = true;
1099 		break;
1100 	case CP_COHER_BASE:
1101 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1102 		if (r) {
1103 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1104 					"0x%04X\n", reg);
1105 			return -EINVAL;
1106 		}
1107 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1108 		break;
1109 	case R_028238_CB_TARGET_MASK:
1110 		track->cb_target_mask = radeon_get_ib_value(p, idx);
1111 		track->cb_dirty = true;
1112 		break;
1113 	case R_02823C_CB_SHADER_MASK:
1114 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
1115 		break;
1116 	case R_028C04_PA_SC_AA_CONFIG:
1117 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1118 		track->log_nsamples = tmp;
1119 		track->nsamples = 1 << tmp;
1120 		track->cb_dirty = true;
1121 		break;
1122 	case R_028808_CB_COLOR_CONTROL:
1123 		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1124 		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1125 		track->cb_dirty = true;
1126 		break;
1127 	case R_0280A0_CB_COLOR0_INFO:
1128 	case R_0280A4_CB_COLOR1_INFO:
1129 	case R_0280A8_CB_COLOR2_INFO:
1130 	case R_0280AC_CB_COLOR3_INFO:
1131 	case R_0280B0_CB_COLOR4_INFO:
1132 	case R_0280B4_CB_COLOR5_INFO:
1133 	case R_0280B8_CB_COLOR6_INFO:
1134 	case R_0280BC_CB_COLOR7_INFO:
1135 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1136 		     radeon_cs_packet_next_is_pkt3_nop(p)) {
1137 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1138 			if (r) {
1139 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1140 				return -EINVAL;
1141 			}
1142 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1143 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1144 			if (reloc->tiling_flags & RADEON_TILING_MACRO) {
1145 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1146 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1147 			} else if (reloc->tiling_flags & RADEON_TILING_MICRO) {
1148 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1149 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1150 			}
1151 		} else {
1152 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1153 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1154 		}
1155 		track->cb_dirty = true;
1156 		break;
1157 	case R_028080_CB_COLOR0_VIEW:
1158 	case R_028084_CB_COLOR1_VIEW:
1159 	case R_028088_CB_COLOR2_VIEW:
1160 	case R_02808C_CB_COLOR3_VIEW:
1161 	case R_028090_CB_COLOR4_VIEW:
1162 	case R_028094_CB_COLOR5_VIEW:
1163 	case R_028098_CB_COLOR6_VIEW:
1164 	case R_02809C_CB_COLOR7_VIEW:
1165 		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1166 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1167 		track->cb_dirty = true;
1168 		break;
1169 	case R_028060_CB_COLOR0_SIZE:
1170 	case R_028064_CB_COLOR1_SIZE:
1171 	case R_028068_CB_COLOR2_SIZE:
1172 	case R_02806C_CB_COLOR3_SIZE:
1173 	case R_028070_CB_COLOR4_SIZE:
1174 	case R_028074_CB_COLOR5_SIZE:
1175 	case R_028078_CB_COLOR6_SIZE:
1176 	case R_02807C_CB_COLOR7_SIZE:
1177 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1178 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1179 		track->cb_color_size_idx[tmp] = idx;
1180 		track->cb_dirty = true;
1181 		break;
1182 		/* This register were added late, there is userspace
1183 		 * which does provide relocation for those but set
1184 		 * 0 offset. In order to avoid breaking old userspace
1185 		 * we detect this and set address to point to last
1186 		 * CB_COLOR0_BASE, note that if userspace doesn't set
1187 		 * CB_COLOR0_BASE before this register we will report
1188 		 * error. Old userspace always set CB_COLOR0_BASE
1189 		 * before any of this.
1190 		 */
1191 	case R_0280E0_CB_COLOR0_FRAG:
1192 	case R_0280E4_CB_COLOR1_FRAG:
1193 	case R_0280E8_CB_COLOR2_FRAG:
1194 	case R_0280EC_CB_COLOR3_FRAG:
1195 	case R_0280F0_CB_COLOR4_FRAG:
1196 	case R_0280F4_CB_COLOR5_FRAG:
1197 	case R_0280F8_CB_COLOR6_FRAG:
1198 	case R_0280FC_CB_COLOR7_FRAG:
1199 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1200 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1201 			if (!track->cb_color_base_last[tmp]) {
1202 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1203 				return -EINVAL;
1204 			}
1205 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1206 			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1207 			ib[idx] = track->cb_color_base_last[tmp];
1208 		} else {
1209 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1210 			if (r) {
1211 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1212 				return -EINVAL;
1213 			}
1214 			track->cb_color_frag_bo[tmp] = reloc->robj;
1215 			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1216 			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1217 		}
1218 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1219 			track->cb_dirty = true;
1220 		}
1221 		break;
1222 	case R_0280C0_CB_COLOR0_TILE:
1223 	case R_0280C4_CB_COLOR1_TILE:
1224 	case R_0280C8_CB_COLOR2_TILE:
1225 	case R_0280CC_CB_COLOR3_TILE:
1226 	case R_0280D0_CB_COLOR4_TILE:
1227 	case R_0280D4_CB_COLOR5_TILE:
1228 	case R_0280D8_CB_COLOR6_TILE:
1229 	case R_0280DC_CB_COLOR7_TILE:
1230 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1231 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1232 			if (!track->cb_color_base_last[tmp]) {
1233 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1234 				return -EINVAL;
1235 			}
1236 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1237 			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1238 			ib[idx] = track->cb_color_base_last[tmp];
1239 		} else {
1240 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1241 			if (r) {
1242 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1243 				return -EINVAL;
1244 			}
1245 			track->cb_color_tile_bo[tmp] = reloc->robj;
1246 			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1247 			ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1248 		}
1249 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1250 			track->cb_dirty = true;
1251 		}
1252 		break;
1253 	case R_028100_CB_COLOR0_MASK:
1254 	case R_028104_CB_COLOR1_MASK:
1255 	case R_028108_CB_COLOR2_MASK:
1256 	case R_02810C_CB_COLOR3_MASK:
1257 	case R_028110_CB_COLOR4_MASK:
1258 	case R_028114_CB_COLOR5_MASK:
1259 	case R_028118_CB_COLOR6_MASK:
1260 	case R_02811C_CB_COLOR7_MASK:
1261 		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1262 		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1263 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1264 			track->cb_dirty = true;
1265 		}
1266 		break;
1267 	case CB_COLOR0_BASE:
1268 	case CB_COLOR1_BASE:
1269 	case CB_COLOR2_BASE:
1270 	case CB_COLOR3_BASE:
1271 	case CB_COLOR4_BASE:
1272 	case CB_COLOR5_BASE:
1273 	case CB_COLOR6_BASE:
1274 	case CB_COLOR7_BASE:
1275 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1276 		if (r) {
1277 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1278 					"0x%04X\n", reg);
1279 			return -EINVAL;
1280 		}
1281 		tmp = (reg - CB_COLOR0_BASE) / 4;
1282 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1283 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1284 		track->cb_color_base_last[tmp] = ib[idx];
1285 		track->cb_color_bo[tmp] = reloc->robj;
1286 		track->cb_color_bo_mc[tmp] = reloc->gpu_offset;
1287 		track->cb_dirty = true;
1288 		break;
1289 	case DB_DEPTH_BASE:
1290 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1291 		if (r) {
1292 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1293 					"0x%04X\n", reg);
1294 			return -EINVAL;
1295 		}
1296 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1297 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1298 		track->db_bo = reloc->robj;
1299 		track->db_bo_mc = reloc->gpu_offset;
1300 		track->db_dirty = true;
1301 		break;
1302 	case DB_HTILE_DATA_BASE:
1303 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1304 		if (r) {
1305 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1306 					"0x%04X\n", reg);
1307 			return -EINVAL;
1308 		}
1309 		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1310 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1311 		track->htile_bo = reloc->robj;
1312 		track->db_dirty = true;
1313 		break;
1314 	case DB_HTILE_SURFACE:
1315 		track->htile_surface = radeon_get_ib_value(p, idx);
1316 		/* force 8x8 htile width and height */
1317 		ib[idx] |= 3;
1318 		track->db_dirty = true;
1319 		break;
1320 	case SQ_PGM_START_FS:
1321 	case SQ_PGM_START_ES:
1322 	case SQ_PGM_START_VS:
1323 	case SQ_PGM_START_GS:
1324 	case SQ_PGM_START_PS:
1325 	case SQ_ALU_CONST_CACHE_GS_0:
1326 	case SQ_ALU_CONST_CACHE_GS_1:
1327 	case SQ_ALU_CONST_CACHE_GS_2:
1328 	case SQ_ALU_CONST_CACHE_GS_3:
1329 	case SQ_ALU_CONST_CACHE_GS_4:
1330 	case SQ_ALU_CONST_CACHE_GS_5:
1331 	case SQ_ALU_CONST_CACHE_GS_6:
1332 	case SQ_ALU_CONST_CACHE_GS_7:
1333 	case SQ_ALU_CONST_CACHE_GS_8:
1334 	case SQ_ALU_CONST_CACHE_GS_9:
1335 	case SQ_ALU_CONST_CACHE_GS_10:
1336 	case SQ_ALU_CONST_CACHE_GS_11:
1337 	case SQ_ALU_CONST_CACHE_GS_12:
1338 	case SQ_ALU_CONST_CACHE_GS_13:
1339 	case SQ_ALU_CONST_CACHE_GS_14:
1340 	case SQ_ALU_CONST_CACHE_GS_15:
1341 	case SQ_ALU_CONST_CACHE_PS_0:
1342 	case SQ_ALU_CONST_CACHE_PS_1:
1343 	case SQ_ALU_CONST_CACHE_PS_2:
1344 	case SQ_ALU_CONST_CACHE_PS_3:
1345 	case SQ_ALU_CONST_CACHE_PS_4:
1346 	case SQ_ALU_CONST_CACHE_PS_5:
1347 	case SQ_ALU_CONST_CACHE_PS_6:
1348 	case SQ_ALU_CONST_CACHE_PS_7:
1349 	case SQ_ALU_CONST_CACHE_PS_8:
1350 	case SQ_ALU_CONST_CACHE_PS_9:
1351 	case SQ_ALU_CONST_CACHE_PS_10:
1352 	case SQ_ALU_CONST_CACHE_PS_11:
1353 	case SQ_ALU_CONST_CACHE_PS_12:
1354 	case SQ_ALU_CONST_CACHE_PS_13:
1355 	case SQ_ALU_CONST_CACHE_PS_14:
1356 	case SQ_ALU_CONST_CACHE_PS_15:
1357 	case SQ_ALU_CONST_CACHE_VS_0:
1358 	case SQ_ALU_CONST_CACHE_VS_1:
1359 	case SQ_ALU_CONST_CACHE_VS_2:
1360 	case SQ_ALU_CONST_CACHE_VS_3:
1361 	case SQ_ALU_CONST_CACHE_VS_4:
1362 	case SQ_ALU_CONST_CACHE_VS_5:
1363 	case SQ_ALU_CONST_CACHE_VS_6:
1364 	case SQ_ALU_CONST_CACHE_VS_7:
1365 	case SQ_ALU_CONST_CACHE_VS_8:
1366 	case SQ_ALU_CONST_CACHE_VS_9:
1367 	case SQ_ALU_CONST_CACHE_VS_10:
1368 	case SQ_ALU_CONST_CACHE_VS_11:
1369 	case SQ_ALU_CONST_CACHE_VS_12:
1370 	case SQ_ALU_CONST_CACHE_VS_13:
1371 	case SQ_ALU_CONST_CACHE_VS_14:
1372 	case SQ_ALU_CONST_CACHE_VS_15:
1373 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1374 		if (r) {
1375 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1376 					"0x%04X\n", reg);
1377 			return -EINVAL;
1378 		}
1379 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1380 		break;
1381 	case SX_MEMORY_EXPORT_BASE:
1382 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1383 		if (r) {
1384 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1385 					"0x%04X\n", reg);
1386 			return -EINVAL;
1387 		}
1388 		ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1389 		break;
1390 	case SX_MISC:
1391 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1392 		break;
1393 	default:
1394 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1395 		return -EINVAL;
1396 	}
1397 	return 0;
1398 }
1399 
1400 unsigned r600_mip_minify(unsigned size, unsigned level)
1401 {
1402 	unsigned val;
1403 
1404 	val = max(1U, size >> level);
1405 	if (level > 0)
1406 		val = roundup_pow_of_two(val);
1407 	return val;
1408 }
1409 
1410 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1411 			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1412 			      unsigned block_align, unsigned height_align, unsigned base_align,
1413 			      unsigned *l0_size, unsigned *mipmap_size)
1414 {
1415 	unsigned offset, i, level;
1416 	unsigned width, height, depth, size;
1417 	unsigned blocksize;
1418 	unsigned nbx, nby;
1419 	unsigned nlevels = llevel - blevel + 1;
1420 
1421 	*l0_size = -1;
1422 	blocksize = r600_fmt_get_blocksize(format);
1423 
1424 	w0 = r600_mip_minify(w0, 0);
1425 	h0 = r600_mip_minify(h0, 0);
1426 	d0 = r600_mip_minify(d0, 0);
1427 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1428 		width = r600_mip_minify(w0, i);
1429 		nbx = r600_fmt_get_nblocksx(format, width);
1430 
1431 		nbx = round_up(nbx, block_align);
1432 
1433 		height = r600_mip_minify(h0, i);
1434 		nby = r600_fmt_get_nblocksy(format, height);
1435 		nby = round_up(nby, height_align);
1436 
1437 		depth = r600_mip_minify(d0, i);
1438 
1439 		size = nbx * nby * blocksize * nsamples;
1440 		if (nfaces)
1441 			size *= nfaces;
1442 		else
1443 			size *= depth;
1444 
1445 		if (i == 0)
1446 			*l0_size = size;
1447 
1448 		if (i == 0 || i == 1)
1449 			offset = round_up(offset, base_align);
1450 
1451 		offset += size;
1452 	}
1453 	*mipmap_size = offset;
1454 	if (llevel == 0)
1455 		*mipmap_size = *l0_size;
1456 	if (!blevel)
1457 		*mipmap_size -= *l0_size;
1458 }
1459 
1460 /**
1461  * r600_check_texture_resource() - check if register is authorized or not
1462  * @p: parser structure holding parsing context
1463  * @idx: index into the cs buffer
1464  * @texture: texture's bo structure
1465  * @mipmap: mipmap's bo structure
1466  *
1467  * This function will check that the resource has valid field and that
1468  * the texture and mipmap bo object are big enough to cover this resource.
1469  */
1470 static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1471 					      struct radeon_bo *texture,
1472 					      struct radeon_bo *mipmap,
1473 					      u64 base_offset,
1474 					      u64 mip_offset,
1475 					      u32 tiling_flags)
1476 {
1477 	struct r600_cs_track *track = p->track;
1478 	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1479 	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1480 	u32 height_align, pitch, pitch_align, depth_align;
1481 	u32 barray, larray;
1482 	u64 base_align;
1483 	struct array_mode_checker array_check;
1484 	u32 format;
1485 	bool is_array;
1486 
1487 	/* on legacy kernel we don't perform advanced check */
1488 	if (p->rdev == NULL)
1489 		return 0;
1490 
1491 	/* convert to bytes */
1492 	base_offset <<= 8;
1493 	mip_offset <<= 8;
1494 
1495 	word0 = radeon_get_ib_value(p, idx + 0);
1496 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1497 		if (tiling_flags & RADEON_TILING_MACRO)
1498 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1499 		else if (tiling_flags & RADEON_TILING_MICRO)
1500 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1501 	}
1502 	word1 = radeon_get_ib_value(p, idx + 1);
1503 	word2 = radeon_get_ib_value(p, idx + 2) << 8;
1504 	word3 = radeon_get_ib_value(p, idx + 3) << 8;
1505 	word4 = radeon_get_ib_value(p, idx + 4);
1506 	word5 = radeon_get_ib_value(p, idx + 5);
1507 	dim = G_038000_DIM(word0);
1508 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1509 	pitch = (G_038000_PITCH(word0) + 1) * 8;
1510 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1511 	d0 = G_038004_TEX_DEPTH(word1);
1512 	format = G_038004_DATA_FORMAT(word1);
1513 	blevel = G_038010_BASE_LEVEL(word4);
1514 	llevel = G_038014_LAST_LEVEL(word5);
1515 	/* pitch in texels */
1516 	array_check.array_mode = G_038000_TILE_MODE(word0);
1517 	array_check.group_size = track->group_size;
1518 	array_check.nbanks = track->nbanks;
1519 	array_check.npipes = track->npipes;
1520 	array_check.nsamples = 1;
1521 	array_check.blocksize = r600_fmt_get_blocksize(format);
1522 	nfaces = 1;
1523 	is_array = false;
1524 	switch (dim) {
1525 	case V_038000_SQ_TEX_DIM_1D:
1526 	case V_038000_SQ_TEX_DIM_2D:
1527 	case V_038000_SQ_TEX_DIM_3D:
1528 		break;
1529 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1530 		if (p->family >= CHIP_RV770)
1531 			nfaces = 8;
1532 		else
1533 			nfaces = 6;
1534 		break;
1535 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1536 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1537 		is_array = true;
1538 		break;
1539 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1540 		is_array = true;
1541 		/* fall through */
1542 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1543 		array_check.nsamples = 1 << llevel;
1544 		llevel = 0;
1545 		break;
1546 	default:
1547 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1548 		return -EINVAL;
1549 	}
1550 	if (!r600_fmt_is_valid_texture(format, p->family)) {
1551 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1552 			 __func__, __LINE__, format);
1553 		return -EINVAL;
1554 	}
1555 
1556 	if (r600_get_array_mode_alignment(&array_check,
1557 					  &pitch_align, &height_align, &depth_align, &base_align)) {
1558 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1559 			 __func__, __LINE__, G_038000_TILE_MODE(word0));
1560 		return -EINVAL;
1561 	}
1562 
1563 	/* XXX check height as well... */
1564 
1565 	if (!IS_ALIGNED(pitch, pitch_align)) {
1566 		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1567 			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1568 		return -EINVAL;
1569 	}
1570 	if (!IS_ALIGNED(base_offset, base_align)) {
1571 		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1572 			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1573 		return -EINVAL;
1574 	}
1575 	if (!IS_ALIGNED(mip_offset, base_align)) {
1576 		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1577 			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1578 		return -EINVAL;
1579 	}
1580 
1581 	if (blevel > llevel) {
1582 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1583 			 blevel, llevel);
1584 	}
1585 	if (is_array) {
1586 		barray = G_038014_BASE_ARRAY(word5);
1587 		larray = G_038014_LAST_ARRAY(word5);
1588 
1589 		nfaces = larray - barray + 1;
1590 	}
1591 	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1592 			  pitch_align, height_align, base_align,
1593 			  &l0_size, &mipmap_size);
1594 	/* using get ib will give us the offset into the texture bo */
1595 	if ((l0_size + word2) > radeon_bo_size(texture)) {
1596 		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1597 			 w0, h0, pitch_align, height_align,
1598 			 array_check.array_mode, format, word2,
1599 			 l0_size, radeon_bo_size(texture));
1600 		dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
1601 		return -EINVAL;
1602 	}
1603 	/* using get ib will give us the offset into the mipmap bo */
1604 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1605 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1606 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1607 	}
1608 	return 0;
1609 }
1610 
1611 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1612 {
1613 	u32 m, i;
1614 
1615 	i = (reg >> 7);
1616 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1617 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1618 		return false;
1619 	}
1620 	m = 1 << ((reg >> 2) & 31);
1621 	if (!(r600_reg_safe_bm[i] & m))
1622 		return true;
1623 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1624 	return false;
1625 }
1626 
1627 static int r600_packet3_check(struct radeon_cs_parser *p,
1628 				struct radeon_cs_packet *pkt)
1629 {
1630 	struct radeon_cs_reloc *reloc;
1631 	struct r600_cs_track *track;
1632 	volatile u32 *ib;
1633 	unsigned idx;
1634 	unsigned i;
1635 	unsigned start_reg, end_reg, reg;
1636 	int r;
1637 	u32 idx_value;
1638 
1639 	track = (struct r600_cs_track *)p->track;
1640 	ib = p->ib.ptr;
1641 	idx = pkt->idx + 1;
1642 	idx_value = radeon_get_ib_value(p, idx);
1643 
1644 	switch (pkt->opcode) {
1645 	case PACKET3_SET_PREDICATION:
1646 	{
1647 		int pred_op;
1648 		int tmp;
1649 		uint64_t offset;
1650 
1651 		if (pkt->count != 1) {
1652 			DRM_ERROR("bad SET PREDICATION\n");
1653 			return -EINVAL;
1654 		}
1655 
1656 		tmp = radeon_get_ib_value(p, idx + 1);
1657 		pred_op = (tmp >> 16) & 0x7;
1658 
1659 		/* for the clear predicate operation */
1660 		if (pred_op == 0)
1661 			return 0;
1662 
1663 		if (pred_op > 2) {
1664 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1665 			return -EINVAL;
1666 		}
1667 
1668 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1669 		if (r) {
1670 			DRM_ERROR("bad SET PREDICATION\n");
1671 			return -EINVAL;
1672 		}
1673 
1674 		offset = reloc->gpu_offset +
1675 		         (idx_value & 0xfffffff0) +
1676 		         ((u64)(tmp & 0xff) << 32);
1677 
1678 		ib[idx + 0] = offset;
1679 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1680 	}
1681 	break;
1682 
1683 	case PACKET3_START_3D_CMDBUF:
1684 		if (p->family >= CHIP_RV770 || pkt->count) {
1685 			DRM_ERROR("bad START_3D\n");
1686 			return -EINVAL;
1687 		}
1688 		break;
1689 	case PACKET3_CONTEXT_CONTROL:
1690 		if (pkt->count != 1) {
1691 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1692 			return -EINVAL;
1693 		}
1694 		break;
1695 	case PACKET3_INDEX_TYPE:
1696 	case PACKET3_NUM_INSTANCES:
1697 		if (pkt->count) {
1698 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1699 			return -EINVAL;
1700 		}
1701 		break;
1702 	case PACKET3_DRAW_INDEX:
1703 	{
1704 		uint64_t offset;
1705 		if (pkt->count != 3) {
1706 			DRM_ERROR("bad DRAW_INDEX\n");
1707 			return -EINVAL;
1708 		}
1709 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1710 		if (r) {
1711 			DRM_ERROR("bad DRAW_INDEX\n");
1712 			return -EINVAL;
1713 		}
1714 
1715 		offset = reloc->gpu_offset +
1716 		         idx_value +
1717 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1718 
1719 		ib[idx+0] = offset;
1720 		ib[idx+1] = upper_32_bits(offset) & 0xff;
1721 
1722 		r = r600_cs_track_check(p);
1723 		if (r) {
1724 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1725 			return r;
1726 		}
1727 		break;
1728 	}
1729 	case PACKET3_DRAW_INDEX_AUTO:
1730 		if (pkt->count != 1) {
1731 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1732 			return -EINVAL;
1733 		}
1734 		r = r600_cs_track_check(p);
1735 		if (r) {
1736 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1737 			return r;
1738 		}
1739 		break;
1740 	case PACKET3_DRAW_INDEX_IMMD_BE:
1741 	case PACKET3_DRAW_INDEX_IMMD:
1742 		if (pkt->count < 2) {
1743 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1744 			return -EINVAL;
1745 		}
1746 		r = r600_cs_track_check(p);
1747 		if (r) {
1748 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1749 			return r;
1750 		}
1751 		break;
1752 	case PACKET3_WAIT_REG_MEM:
1753 		if (pkt->count != 5) {
1754 			DRM_ERROR("bad WAIT_REG_MEM\n");
1755 			return -EINVAL;
1756 		}
1757 		/* bit 4 is reg (0) or mem (1) */
1758 		if (idx_value & 0x10) {
1759 			uint64_t offset;
1760 
1761 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1762 			if (r) {
1763 				DRM_ERROR("bad WAIT_REG_MEM\n");
1764 				return -EINVAL;
1765 			}
1766 
1767 			offset = reloc->gpu_offset +
1768 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1769 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1770 
1771 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1772 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1773 		} else if (idx_value & 0x100) {
1774 			DRM_ERROR("cannot use PFP on REG wait\n");
1775 			return -EINVAL;
1776 		}
1777 		break;
1778 	case PACKET3_CP_DMA:
1779 	{
1780 		u32 command, size;
1781 		u64 offset, tmp;
1782 		if (pkt->count != 4) {
1783 			DRM_ERROR("bad CP DMA\n");
1784 			return -EINVAL;
1785 		}
1786 		command = radeon_get_ib_value(p, idx+4);
1787 		size = command & 0x1fffff;
1788 		if (command & PACKET3_CP_DMA_CMD_SAS) {
1789 			/* src address space is register */
1790 			DRM_ERROR("CP DMA SAS not supported\n");
1791 			return -EINVAL;
1792 		} else {
1793 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
1794 				DRM_ERROR("CP DMA SAIC only supported for registers\n");
1795 				return -EINVAL;
1796 			}
1797 			/* src address space is memory */
1798 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1799 			if (r) {
1800 				DRM_ERROR("bad CP DMA SRC\n");
1801 				return -EINVAL;
1802 			}
1803 
1804 			tmp = radeon_get_ib_value(p, idx) +
1805 				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1806 
1807 			offset = reloc->gpu_offset + tmp;
1808 
1809 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1810 				dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1811 					 tmp + size, radeon_bo_size(reloc->robj));
1812 				return -EINVAL;
1813 			}
1814 
1815 			ib[idx] = offset;
1816 			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1817 		}
1818 		if (command & PACKET3_CP_DMA_CMD_DAS) {
1819 			/* dst address space is register */
1820 			DRM_ERROR("CP DMA DAS not supported\n");
1821 			return -EINVAL;
1822 		} else {
1823 			/* dst address space is memory */
1824 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
1825 				DRM_ERROR("CP DMA DAIC only supported for registers\n");
1826 				return -EINVAL;
1827 			}
1828 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1829 			if (r) {
1830 				DRM_ERROR("bad CP DMA DST\n");
1831 				return -EINVAL;
1832 			}
1833 
1834 			tmp = radeon_get_ib_value(p, idx+2) +
1835 				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1836 
1837 			offset = reloc->gpu_offset + tmp;
1838 
1839 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1840 				dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1841 					 tmp + size, radeon_bo_size(reloc->robj));
1842 				return -EINVAL;
1843 			}
1844 
1845 			ib[idx+2] = offset;
1846 			ib[idx+3] = upper_32_bits(offset) & 0xff;
1847 		}
1848 		break;
1849 	}
1850 	case PACKET3_SURFACE_SYNC:
1851 		if (pkt->count != 3) {
1852 			DRM_ERROR("bad SURFACE_SYNC\n");
1853 			return -EINVAL;
1854 		}
1855 		/* 0xffffffff/0x0 is flush all cache flag */
1856 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1857 		    radeon_get_ib_value(p, idx + 2) != 0) {
1858 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1859 			if (r) {
1860 				DRM_ERROR("bad SURFACE_SYNC\n");
1861 				return -EINVAL;
1862 			}
1863 			ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1864 		}
1865 		break;
1866 	case PACKET3_EVENT_WRITE:
1867 		if (pkt->count != 2 && pkt->count != 0) {
1868 			DRM_ERROR("bad EVENT_WRITE\n");
1869 			return -EINVAL;
1870 		}
1871 		if (pkt->count) {
1872 			uint64_t offset;
1873 
1874 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1875 			if (r) {
1876 				DRM_ERROR("bad EVENT_WRITE\n");
1877 				return -EINVAL;
1878 			}
1879 			offset = reloc->gpu_offset +
1880 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1881 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1882 
1883 			ib[idx+1] = offset & 0xfffffff8;
1884 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1885 		}
1886 		break;
1887 	case PACKET3_EVENT_WRITE_EOP:
1888 	{
1889 		uint64_t offset;
1890 
1891 		if (pkt->count != 4) {
1892 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
1893 			return -EINVAL;
1894 		}
1895 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1896 		if (r) {
1897 			DRM_ERROR("bad EVENT_WRITE\n");
1898 			return -EINVAL;
1899 		}
1900 
1901 		offset = reloc->gpu_offset +
1902 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1903 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1904 
1905 		ib[idx+1] = offset & 0xfffffffc;
1906 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1907 		break;
1908 	}
1909 	case PACKET3_SET_CONFIG_REG:
1910 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1911 		end_reg = 4 * pkt->count + start_reg - 4;
1912 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1913 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1914 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1915 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1916 			return -EINVAL;
1917 		}
1918 		for (i = 0; i < pkt->count; i++) {
1919 			reg = start_reg + (4 * i);
1920 			r = r600_cs_check_reg(p, reg, idx+1+i);
1921 			if (r)
1922 				return r;
1923 		}
1924 		break;
1925 	case PACKET3_SET_CONTEXT_REG:
1926 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1927 		end_reg = 4 * pkt->count + start_reg - 4;
1928 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1929 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1930 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1931 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1932 			return -EINVAL;
1933 		}
1934 		for (i = 0; i < pkt->count; i++) {
1935 			reg = start_reg + (4 * i);
1936 			r = r600_cs_check_reg(p, reg, idx+1+i);
1937 			if (r)
1938 				return r;
1939 		}
1940 		break;
1941 	case PACKET3_SET_RESOURCE:
1942 		if (pkt->count % 7) {
1943 			DRM_ERROR("bad SET_RESOURCE\n");
1944 			return -EINVAL;
1945 		}
1946 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1947 		end_reg = 4 * pkt->count + start_reg - 4;
1948 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1949 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
1950 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
1951 			DRM_ERROR("bad SET_RESOURCE\n");
1952 			return -EINVAL;
1953 		}
1954 		for (i = 0; i < (pkt->count / 7); i++) {
1955 			struct radeon_bo *texture, *mipmap;
1956 			u32 size, offset, base_offset, mip_offset;
1957 
1958 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1959 			case SQ_TEX_VTX_VALID_TEXTURE:
1960 				/* tex base */
1961 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1962 				if (r) {
1963 					DRM_ERROR("bad SET_RESOURCE\n");
1964 					return -EINVAL;
1965 				}
1966 				base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1967 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1968 					if (reloc->tiling_flags & RADEON_TILING_MACRO)
1969 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1970 					else if (reloc->tiling_flags & RADEON_TILING_MICRO)
1971 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1972 				}
1973 				texture = reloc->robj;
1974 				/* tex mip base */
1975 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1976 				if (r) {
1977 					DRM_ERROR("bad SET_RESOURCE\n");
1978 					return -EINVAL;
1979 				}
1980 				mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
1981 				mipmap = reloc->robj;
1982 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1983 								texture, mipmap,
1984 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1985 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1986 								reloc->tiling_flags);
1987 				if (r)
1988 					return r;
1989 				ib[idx+1+(i*7)+2] += base_offset;
1990 				ib[idx+1+(i*7)+3] += mip_offset;
1991 				break;
1992 			case SQ_TEX_VTX_VALID_BUFFER:
1993 			{
1994 				uint64_t offset64;
1995 				/* vtx base */
1996 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1997 				if (r) {
1998 					DRM_ERROR("bad SET_RESOURCE\n");
1999 					return -EINVAL;
2000 				}
2001 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
2002 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
2003 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2004 					/* force size to size of the buffer */
2005 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
2006 						 size + offset, radeon_bo_size(reloc->robj));
2007 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
2008 				}
2009 
2010 				offset64 = reloc->gpu_offset + offset;
2011 				ib[idx+1+(i*8)+0] = offset64;
2012 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2013 						    (upper_32_bits(offset64) & 0xff);
2014 				break;
2015 			}
2016 			case SQ_TEX_VTX_INVALID_TEXTURE:
2017 			case SQ_TEX_VTX_INVALID_BUFFER:
2018 			default:
2019 				DRM_ERROR("bad SET_RESOURCE\n");
2020 				return -EINVAL;
2021 			}
2022 		}
2023 		break;
2024 	case PACKET3_SET_ALU_CONST:
2025 		if (track->sq_config & DX9_CONSTS) {
2026 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2027 			end_reg = 4 * pkt->count + start_reg - 4;
2028 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2029 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2030 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2031 				DRM_ERROR("bad SET_ALU_CONST\n");
2032 				return -EINVAL;
2033 			}
2034 		}
2035 		break;
2036 	case PACKET3_SET_BOOL_CONST:
2037 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2038 		end_reg = 4 * pkt->count + start_reg - 4;
2039 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2040 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2041 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2042 			DRM_ERROR("bad SET_BOOL_CONST\n");
2043 			return -EINVAL;
2044 		}
2045 		break;
2046 	case PACKET3_SET_LOOP_CONST:
2047 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2048 		end_reg = 4 * pkt->count + start_reg - 4;
2049 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2050 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2051 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2052 			DRM_ERROR("bad SET_LOOP_CONST\n");
2053 			return -EINVAL;
2054 		}
2055 		break;
2056 	case PACKET3_SET_CTL_CONST:
2057 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2058 		end_reg = 4 * pkt->count + start_reg - 4;
2059 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2060 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2061 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2062 			DRM_ERROR("bad SET_CTL_CONST\n");
2063 			return -EINVAL;
2064 		}
2065 		break;
2066 	case PACKET3_SET_SAMPLER:
2067 		if (pkt->count % 3) {
2068 			DRM_ERROR("bad SET_SAMPLER\n");
2069 			return -EINVAL;
2070 		}
2071 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2072 		end_reg = 4 * pkt->count + start_reg - 4;
2073 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2074 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
2075 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
2076 			DRM_ERROR("bad SET_SAMPLER\n");
2077 			return -EINVAL;
2078 		}
2079 		break;
2080 	case PACKET3_STRMOUT_BASE_UPDATE:
2081 		/* RS780 and RS880 also need this */
2082 		if (p->family < CHIP_RS780) {
2083 			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2084 			return -EINVAL;
2085 		}
2086 		if (pkt->count != 1) {
2087 			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2088 			return -EINVAL;
2089 		}
2090 		if (idx_value > 3) {
2091 			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2092 			return -EINVAL;
2093 		}
2094 		{
2095 			u64 offset;
2096 
2097 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2098 			if (r) {
2099 				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2100 				return -EINVAL;
2101 			}
2102 
2103 			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2104 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2105 				return -EINVAL;
2106 			}
2107 
2108 			offset = radeon_get_ib_value(p, idx+1) << 8;
2109 			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2110 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2111 					  offset, track->vgt_strmout_bo_offset[idx_value]);
2112 				return -EINVAL;
2113 			}
2114 
2115 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2116 				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2117 					  offset + 4, radeon_bo_size(reloc->robj));
2118 				return -EINVAL;
2119 			}
2120 			ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
2121 		}
2122 		break;
2123 	case PACKET3_SURFACE_BASE_UPDATE:
2124 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2125 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2126 			return -EINVAL;
2127 		}
2128 		if (pkt->count) {
2129 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2130 			return -EINVAL;
2131 		}
2132 		break;
2133 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2134 		if (pkt->count != 4) {
2135 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2136 			return -EINVAL;
2137 		}
2138 		/* Updating memory at DST_ADDRESS. */
2139 		if (idx_value & 0x1) {
2140 			u64 offset;
2141 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2142 			if (r) {
2143 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2144 				return -EINVAL;
2145 			}
2146 			offset = radeon_get_ib_value(p, idx+1);
2147 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2148 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2149 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2150 					  offset + 4, radeon_bo_size(reloc->robj));
2151 				return -EINVAL;
2152 			}
2153 			offset += reloc->gpu_offset;
2154 			ib[idx+1] = offset;
2155 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2156 		}
2157 		/* Reading data from SRC_ADDRESS. */
2158 		if (((idx_value >> 1) & 0x3) == 2) {
2159 			u64 offset;
2160 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2161 			if (r) {
2162 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2163 				return -EINVAL;
2164 			}
2165 			offset = radeon_get_ib_value(p, idx+3);
2166 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2167 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2168 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2169 					  offset + 4, radeon_bo_size(reloc->robj));
2170 				return -EINVAL;
2171 			}
2172 			offset += reloc->gpu_offset;
2173 			ib[idx+3] = offset;
2174 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2175 		}
2176 		break;
2177 	case PACKET3_MEM_WRITE:
2178 	{
2179 		u64 offset;
2180 
2181 		if (pkt->count != 3) {
2182 			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2183 			return -EINVAL;
2184 		}
2185 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2186 		if (r) {
2187 			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2188 			return -EINVAL;
2189 		}
2190 		offset = radeon_get_ib_value(p, idx+0);
2191 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2192 		if (offset & 0x7) {
2193 			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2194 			return -EINVAL;
2195 		}
2196 		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2197 			DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2198 				  offset + 8, radeon_bo_size(reloc->robj));
2199 			return -EINVAL;
2200 		}
2201 		offset += reloc->gpu_offset;
2202 		ib[idx+0] = offset;
2203 		ib[idx+1] = upper_32_bits(offset) & 0xff;
2204 		break;
2205 	}
2206 	case PACKET3_COPY_DW:
2207 		if (pkt->count != 4) {
2208 			DRM_ERROR("bad COPY_DW (invalid count)\n");
2209 			return -EINVAL;
2210 		}
2211 		if (idx_value & 0x1) {
2212 			u64 offset;
2213 			/* SRC is memory. */
2214 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2215 			if (r) {
2216 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2217 				return -EINVAL;
2218 			}
2219 			offset = radeon_get_ib_value(p, idx+1);
2220 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2221 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2222 				DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2223 					  offset + 4, radeon_bo_size(reloc->robj));
2224 				return -EINVAL;
2225 			}
2226 			offset += reloc->gpu_offset;
2227 			ib[idx+1] = offset;
2228 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2229 		} else {
2230 			/* SRC is a reg. */
2231 			reg = radeon_get_ib_value(p, idx+1) << 2;
2232 			if (!r600_is_safe_reg(p, reg, idx+1))
2233 				return -EINVAL;
2234 		}
2235 		if (idx_value & 0x2) {
2236 			u64 offset;
2237 			/* DST is memory. */
2238 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2239 			if (r) {
2240 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2241 				return -EINVAL;
2242 			}
2243 			offset = radeon_get_ib_value(p, idx+3);
2244 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2245 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2246 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2247 					  offset + 4, radeon_bo_size(reloc->robj));
2248 				return -EINVAL;
2249 			}
2250 			offset += reloc->gpu_offset;
2251 			ib[idx+3] = offset;
2252 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2253 		} else {
2254 			/* DST is a reg. */
2255 			reg = radeon_get_ib_value(p, idx+3) << 2;
2256 			if (!r600_is_safe_reg(p, reg, idx+3))
2257 				return -EINVAL;
2258 		}
2259 		break;
2260 	case PACKET3_NOP:
2261 		break;
2262 	default:
2263 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2264 		return -EINVAL;
2265 	}
2266 	return 0;
2267 }
2268 
2269 int r600_cs_parse(struct radeon_cs_parser *p)
2270 {
2271 	struct radeon_cs_packet pkt;
2272 	struct r600_cs_track *track;
2273 	int r;
2274 
2275 	if (p->track == NULL) {
2276 		/* initialize tracker, we are in kms */
2277 		track = kzalloc(sizeof(*track), GFP_KERNEL);
2278 		if (track == NULL)
2279 			return -ENOMEM;
2280 		r600_cs_track_init(track);
2281 		if (p->rdev->family < CHIP_RV770) {
2282 			track->npipes = p->rdev->config.r600.tiling_npipes;
2283 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
2284 			track->group_size = p->rdev->config.r600.tiling_group_size;
2285 		} else if (p->rdev->family <= CHIP_RV740) {
2286 			track->npipes = p->rdev->config.rv770.tiling_npipes;
2287 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2288 			track->group_size = p->rdev->config.rv770.tiling_group_size;
2289 		}
2290 		p->track = track;
2291 	}
2292 	do {
2293 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
2294 		if (r) {
2295 			kfree(p->track);
2296 			p->track = NULL;
2297 			return r;
2298 		}
2299 		p->idx += pkt.count + 2;
2300 		switch (pkt.type) {
2301 		case RADEON_PACKET_TYPE0:
2302 			r = r600_cs_parse_packet0(p, &pkt);
2303 			break;
2304 		case RADEON_PACKET_TYPE2:
2305 			break;
2306 		case RADEON_PACKET_TYPE3:
2307 			r = r600_packet3_check(p, &pkt);
2308 			break;
2309 		default:
2310 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2311 			kfree(p->track);
2312 			p->track = NULL;
2313 			return -EINVAL;
2314 		}
2315 		if (r) {
2316 			kfree(p->track);
2317 			p->track = NULL;
2318 			return r;
2319 		}
2320 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2321 #if 0
2322 	for (r = 0; r < p->ib.length_dw; r++) {
2323 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2324 		mdelay(1);
2325 	}
2326 #endif
2327 	kfree(p->track);
2328 	p->track = NULL;
2329 	return 0;
2330 }
2331 
2332 /*
2333  *  DMA
2334  */
2335 /**
2336  * r600_dma_cs_next_reloc() - parse next reloc
2337  * @p:		parser structure holding parsing context.
2338  * @cs_reloc:		reloc informations
2339  *
2340  * Return the next reloc, do bo validation and compute
2341  * GPU offset using the provided start.
2342  **/
2343 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2344 			   struct radeon_cs_reloc **cs_reloc)
2345 {
2346 	struct radeon_cs_chunk *relocs_chunk;
2347 	unsigned idx;
2348 
2349 	*cs_reloc = NULL;
2350 	if (p->chunk_relocs_idx == -1) {
2351 		DRM_ERROR("No relocation chunk !\n");
2352 		return -EINVAL;
2353 	}
2354 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2355 	idx = p->dma_reloc_idx;
2356 	if (idx >= p->nrelocs) {
2357 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2358 			  idx, p->nrelocs);
2359 		return -EINVAL;
2360 	}
2361 	*cs_reloc = p->relocs_ptr[idx];
2362 	p->dma_reloc_idx++;
2363 	return 0;
2364 }
2365 
2366 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2367 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2368 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2369 
2370 /**
2371  * r600_dma_cs_parse() - parse the DMA IB
2372  * @p:		parser structure holding parsing context.
2373  *
2374  * Parses the DMA IB from the CS ioctl and updates
2375  * the GPU addresses based on the reloc information and
2376  * checks for errors. (R6xx-R7xx)
2377  * Returns 0 for success and an error on failure.
2378  **/
2379 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2380 {
2381 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2382 	struct radeon_cs_reloc *src_reloc, *dst_reloc;
2383 	u32 header, cmd, count, tiled;
2384 	volatile u32 *ib = p->ib.ptr;
2385 	u32 idx, idx_value;
2386 	u64 src_offset, dst_offset;
2387 	int r;
2388 
2389 	do {
2390 		if (p->idx >= ib_chunk->length_dw) {
2391 			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2392 				  p->idx, ib_chunk->length_dw);
2393 			return -EINVAL;
2394 		}
2395 		idx = p->idx;
2396 		header = radeon_get_ib_value(p, idx);
2397 		cmd = GET_DMA_CMD(header);
2398 		count = GET_DMA_COUNT(header);
2399 		tiled = GET_DMA_T(header);
2400 
2401 		switch (cmd) {
2402 		case DMA_PACKET_WRITE:
2403 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2404 			if (r) {
2405 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2406 				return -EINVAL;
2407 			}
2408 			if (tiled) {
2409 				dst_offset = radeon_get_ib_value(p, idx+1);
2410 				dst_offset <<= 8;
2411 
2412 				ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2413 				p->idx += count + 5;
2414 			} else {
2415 				dst_offset = radeon_get_ib_value(p, idx+1);
2416 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2417 
2418 				ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2419 				ib[idx+2] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2420 				p->idx += count + 3;
2421 			}
2422 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2423 				dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2424 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2425 				return -EINVAL;
2426 			}
2427 			break;
2428 		case DMA_PACKET_COPY:
2429 			r = r600_dma_cs_next_reloc(p, &src_reloc);
2430 			if (r) {
2431 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2432 				return -EINVAL;
2433 			}
2434 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2435 			if (r) {
2436 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2437 				return -EINVAL;
2438 			}
2439 			if (tiled) {
2440 				idx_value = radeon_get_ib_value(p, idx + 2);
2441 				/* detile bit */
2442 				if (idx_value & (1 << 31)) {
2443 					/* tiled src, linear dst */
2444 					src_offset = radeon_get_ib_value(p, idx+1);
2445 					src_offset <<= 8;
2446 					ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
2447 
2448 					dst_offset = radeon_get_ib_value(p, idx+5);
2449 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2450 					ib[idx+5] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2451 					ib[idx+6] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2452 				} else {
2453 					/* linear src, tiled dst */
2454 					src_offset = radeon_get_ib_value(p, idx+5);
2455 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2456 					ib[idx+5] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2457 					ib[idx+6] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2458 
2459 					dst_offset = radeon_get_ib_value(p, idx+1);
2460 					dst_offset <<= 8;
2461 					ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
2462 				}
2463 				p->idx += 7;
2464 			} else {
2465 				if (p->family >= CHIP_RV770) {
2466 					src_offset = radeon_get_ib_value(p, idx+2);
2467 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2468 					dst_offset = radeon_get_ib_value(p, idx+1);
2469 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2470 
2471 					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2472 					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2473 					ib[idx+3] += upper_32_bits(dst_reloc->gpu_offset) & 0xff;
2474 					ib[idx+4] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2475 					p->idx += 5;
2476 				} else {
2477 					src_offset = radeon_get_ib_value(p, idx+2);
2478 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2479 					dst_offset = radeon_get_ib_value(p, idx+1);
2480 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2481 
2482 					ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2483 					ib[idx+2] += (u32)(src_reloc->gpu_offset & 0xfffffffc);
2484 					ib[idx+3] += upper_32_bits(src_reloc->gpu_offset) & 0xff;
2485 					ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) & 0xff) << 16;
2486 					p->idx += 4;
2487 				}
2488 			}
2489 			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2490 				dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2491 					 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2492 				return -EINVAL;
2493 			}
2494 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2495 				dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2496 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2497 				return -EINVAL;
2498 			}
2499 			break;
2500 		case DMA_PACKET_CONSTANT_FILL:
2501 			if (p->family < CHIP_RV770) {
2502 				DRM_ERROR("Constant Fill is 7xx only !\n");
2503 				return -EINVAL;
2504 			}
2505 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2506 			if (r) {
2507 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2508 				return -EINVAL;
2509 			}
2510 			dst_offset = radeon_get_ib_value(p, idx+1);
2511 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2512 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2513 				dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2514 					 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2515 				return -EINVAL;
2516 			}
2517 			ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
2518 			ib[idx+3] += (upper_32_bits(dst_reloc->gpu_offset) << 16) & 0x00ff0000;
2519 			p->idx += 4;
2520 			break;
2521 		case DMA_PACKET_NOP:
2522 			p->idx += 1;
2523 			break;
2524 		default:
2525 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2526 			return -EINVAL;
2527 		}
2528 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2529 #if 0
2530 	for (r = 0; r < p->ib->length_dw; r++) {
2531 		printk(KERN_INFO "%05d  0x%08X\n", r, p->ib.ptr[r]);
2532 		mdelay(1);
2533 	}
2534 #endif
2535 	return 0;
2536 }
2537