xref: /dragonfly/sys/dev/drm/radeon/r600_cs.c (revision 267c04fd)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *
28  * $FreeBSD: head/sys/dev/drm2/radeon/r600_cs.c 254885 2013-08-25 19:37:15Z dumbbell $
29  */
30 
31 #include <drm/drmP.h>
32 #include "radeon.h"
33 #include "radeon_asic.h"
34 #include "r600d.h"
35 #include "r600_reg_safe.h"
36 
37 static int r600_nomm;
38 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
39 
40 
41 struct r600_cs_track {
42 	/* configuration we miror so that we use same code btw kms/ums */
43 	u32			group_size;
44 	u32			nbanks;
45 	u32			npipes;
46 	/* value we track */
47 	u32			sq_config;
48 	u32			log_nsamples;
49 	u32			nsamples;
50 	u32			cb_color_base_last[8];
51 	struct radeon_bo	*cb_color_bo[8];
52 	u64			cb_color_bo_mc[8];
53 	u64			cb_color_bo_offset[8];
54 	struct radeon_bo	*cb_color_frag_bo[8];
55 	u64			cb_color_frag_offset[8];
56 	struct radeon_bo	*cb_color_tile_bo[8];
57 	u64			cb_color_tile_offset[8];
58 	u32			cb_color_mask[8];
59 	u32			cb_color_info[8];
60 	u32			cb_color_view[8];
61 	u32			cb_color_size_idx[8]; /* unused */
62 	u32			cb_target_mask;
63 	u32			cb_shader_mask;  /* unused */
64 	bool			is_resolve;
65 	u32			cb_color_size[8];
66 	u32			vgt_strmout_en;
67 	u32			vgt_strmout_buffer_en;
68 	struct radeon_bo	*vgt_strmout_bo[4];
69 	u64			vgt_strmout_bo_mc[4]; /* unused */
70 	u32			vgt_strmout_bo_offset[4];
71 	u32			vgt_strmout_size[4];
72 	u32			db_depth_control;
73 	u32			db_depth_info;
74 	u32			db_depth_size_idx;
75 	u32			db_depth_view;
76 	u32			db_depth_size;
77 	u32			db_offset;
78 	struct radeon_bo	*db_bo;
79 	u64			db_bo_mc;
80 	bool			sx_misc_kill_all_prims;
81 	bool			cb_dirty;
82 	bool			db_dirty;
83 	bool			streamout_dirty;
84 	struct radeon_bo	*htile_bo;
85 	u64			htile_offset;
86 	u32			htile_surface;
87 };
88 
89 #define FMT_8_BIT(fmt, vc)   [fmt] = { 1, 1, 1, vc, CHIP_R600 }
90 #define FMT_16_BIT(fmt, vc)  [fmt] = { 1, 1, 2, vc, CHIP_R600 }
91 #define FMT_24_BIT(fmt)      [fmt] = { 1, 1, 4,  0, CHIP_R600 }
92 #define FMT_32_BIT(fmt, vc)  [fmt] = { 1, 1, 4, vc, CHIP_R600 }
93 #define FMT_48_BIT(fmt)      [fmt] = { 1, 1, 8,  0, CHIP_R600 }
94 #define FMT_64_BIT(fmt, vc)  [fmt] = { 1, 1, 8, vc, CHIP_R600 }
95 #define FMT_96_BIT(fmt)      [fmt] = { 1, 1, 12, 0, CHIP_R600 }
96 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
97 
98 struct gpu_formats {
99 	unsigned blockwidth;
100 	unsigned blockheight;
101 	unsigned blocksize;
102 	unsigned valid_color;
103 	enum radeon_family min_family;
104 };
105 
106 static const struct gpu_formats color_formats_table[] = {
107 	/* 8 bit */
108 	FMT_8_BIT(V_038004_COLOR_8, 1),
109 	FMT_8_BIT(V_038004_COLOR_4_4, 1),
110 	FMT_8_BIT(V_038004_COLOR_3_3_2, 1),
111 	FMT_8_BIT(V_038004_FMT_1, 0),
112 
113 	/* 16-bit */
114 	FMT_16_BIT(V_038004_COLOR_16, 1),
115 	FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1),
116 	FMT_16_BIT(V_038004_COLOR_8_8, 1),
117 	FMT_16_BIT(V_038004_COLOR_5_6_5, 1),
118 	FMT_16_BIT(V_038004_COLOR_6_5_5, 1),
119 	FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1),
120 	FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1),
121 	FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1),
122 
123 	/* 24-bit */
124 	FMT_24_BIT(V_038004_FMT_8_8_8),
125 
126 	/* 32-bit */
127 	FMT_32_BIT(V_038004_COLOR_32, 1),
128 	FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1),
129 	FMT_32_BIT(V_038004_COLOR_16_16, 1),
130 	FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1),
131 	FMT_32_BIT(V_038004_COLOR_8_24, 1),
132 	FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1),
133 	FMT_32_BIT(V_038004_COLOR_24_8, 1),
134 	FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1),
135 	FMT_32_BIT(V_038004_COLOR_10_11_11, 1),
136 	FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1),
137 	FMT_32_BIT(V_038004_COLOR_11_11_10, 1),
138 	FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1),
139 	FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1),
140 	FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1),
141 	FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1),
142 	FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0),
143 	FMT_32_BIT(V_038004_FMT_32_AS_8, 0),
144 	FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0),
145 
146 	/* 48-bit */
147 	FMT_48_BIT(V_038004_FMT_16_16_16),
148 	FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT),
149 
150 	/* 64-bit */
151 	FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1),
152 	FMT_64_BIT(V_038004_COLOR_32_32, 1),
153 	FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1),
154 	FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1),
155 	FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1),
156 
157 	FMT_96_BIT(V_038004_FMT_32_32_32),
158 	FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT),
159 
160 	/* 128-bit */
161 	FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1),
162 	FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1),
163 
164 	[V_038004_FMT_GB_GR] = { 2, 1, 4, 0 },
165 	[V_038004_FMT_BG_RG] = { 2, 1, 4, 0 },
166 
167 	/* block compressed formats */
168 	[V_038004_FMT_BC1] = { 4, 4, 8, 0 },
169 	[V_038004_FMT_BC2] = { 4, 4, 16, 0 },
170 	[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
171 	[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
172 	[V_038004_FMT_BC5] = { 4, 4, 16, 0},
173 	[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
174 	[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
175 
176 	/* The other Evergreen formats */
177 	[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
178 };
179 
180 bool r600_fmt_is_valid_color(u32 format)
181 {
182 	if (format >= ARRAY_SIZE(color_formats_table))
183 		return false;
184 
185 	if (color_formats_table[format].valid_color)
186 		return true;
187 
188 	return false;
189 }
190 
191 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family)
192 {
193 	if (format >= ARRAY_SIZE(color_formats_table))
194 		return false;
195 
196 	if (family < color_formats_table[format].min_family)
197 		return false;
198 
199 	if (color_formats_table[format].blockwidth > 0)
200 		return true;
201 
202 	return false;
203 }
204 
205 int r600_fmt_get_blocksize(u32 format)
206 {
207 	if (format >= ARRAY_SIZE(color_formats_table))
208 		return 0;
209 
210 	return color_formats_table[format].blocksize;
211 }
212 
213 int r600_fmt_get_nblocksx(u32 format, u32 w)
214 {
215 	unsigned bw;
216 
217 	if (format >= ARRAY_SIZE(color_formats_table))
218 		return 0;
219 
220 	bw = color_formats_table[format].blockwidth;
221 	if (bw == 0)
222 		return 0;
223 
224 	return (w + bw - 1) / bw;
225 }
226 
227 int r600_fmt_get_nblocksy(u32 format, u32 h)
228 {
229 	unsigned bh;
230 
231 	if (format >= ARRAY_SIZE(color_formats_table))
232 		return 0;
233 
234 	bh = color_formats_table[format].blockheight;
235 	if (bh == 0)
236 		return 0;
237 
238 	return (h + bh - 1) / bh;
239 }
240 
241 struct array_mode_checker {
242 	int array_mode;
243 	u32 group_size;
244 	u32 nbanks;
245 	u32 npipes;
246 	u32 nsamples;
247 	u32 blocksize;
248 };
249 
250 /* returns alignment in pixels for pitch/height/depth and bytes for base */
251 static int r600_get_array_mode_alignment(struct array_mode_checker *values,
252 						u32 *pitch_align,
253 						u32 *height_align,
254 						u32 *depth_align,
255 						u64 *base_align)
256 {
257 	u32 tile_width = 8;
258 	u32 tile_height = 8;
259 	u32 macro_tile_width = values->nbanks;
260 	u32 macro_tile_height = values->npipes;
261 	u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples;
262 	u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes;
263 
264 	switch (values->array_mode) {
265 	case ARRAY_LINEAR_GENERAL:
266 		/* technically tile_width/_height for pitch/height */
267 		*pitch_align = 1; /* tile_width */
268 		*height_align = 1; /* tile_height */
269 		*depth_align = 1;
270 		*base_align = 1;
271 		break;
272 	case ARRAY_LINEAR_ALIGNED:
273 		*pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize));
274 		*height_align = 1;
275 		*depth_align = 1;
276 		*base_align = values->group_size;
277 		break;
278 	case ARRAY_1D_TILED_THIN1:
279 		*pitch_align = max((u32)tile_width,
280 				   (u32)(values->group_size /
281 					 (tile_height * values->blocksize * values->nsamples)));
282 		*height_align = tile_height;
283 		*depth_align = 1;
284 		*base_align = values->group_size;
285 		break;
286 	case ARRAY_2D_TILED_THIN1:
287 		*pitch_align = max((u32)macro_tile_width * tile_width,
288 				(u32)((values->group_size * values->nbanks) /
289 				(values->blocksize * values->nsamples * tile_width)));
290 		*height_align = macro_tile_height * tile_height;
291 		*depth_align = 1;
292 		*base_align = max(macro_tile_bytes,
293 				  (*pitch_align) * values->blocksize * (*height_align) * values->nsamples);
294 		break;
295 	default:
296 		return -EINVAL;
297 	}
298 
299 	return 0;
300 }
301 
302 static void r600_cs_track_init(struct r600_cs_track *track)
303 {
304 	int i;
305 
306 	/* assume DX9 mode */
307 	track->sq_config = DX9_CONSTS;
308 	for (i = 0; i < 8; i++) {
309 		track->cb_color_base_last[i] = 0;
310 		track->cb_color_size[i] = 0;
311 		track->cb_color_size_idx[i] = 0;
312 		track->cb_color_info[i] = 0;
313 		track->cb_color_view[i] = 0xFFFFFFFF;
314 		track->cb_color_bo[i] = NULL;
315 		track->cb_color_bo_offset[i] = 0xFFFFFFFF;
316 		track->cb_color_bo_mc[i] = 0xFFFFFFFF;
317 		track->cb_color_frag_bo[i] = NULL;
318 		track->cb_color_frag_offset[i] = 0xFFFFFFFF;
319 		track->cb_color_tile_bo[i] = NULL;
320 		track->cb_color_tile_offset[i] = 0xFFFFFFFF;
321 		track->cb_color_mask[i] = 0xFFFFFFFF;
322 	}
323 	track->is_resolve = false;
324 	track->nsamples = 16;
325 	track->log_nsamples = 4;
326 	track->cb_target_mask = 0xFFFFFFFF;
327 	track->cb_shader_mask = 0xFFFFFFFF;
328 	track->cb_dirty = true;
329 	track->db_bo = NULL;
330 	track->db_bo_mc = 0xFFFFFFFF;
331 	/* assume the biggest format and that htile is enabled */
332 	track->db_depth_info = 7 | (1 << 25);
333 	track->db_depth_view = 0xFFFFC000;
334 	track->db_depth_size = 0xFFFFFFFF;
335 	track->db_depth_size_idx = 0;
336 	track->db_depth_control = 0xFFFFFFFF;
337 	track->db_dirty = true;
338 	track->htile_bo = NULL;
339 	track->htile_offset = 0xFFFFFFFF;
340 	track->htile_surface = 0;
341 
342 	for (i = 0; i < 4; i++) {
343 		track->vgt_strmout_size[i] = 0;
344 		track->vgt_strmout_bo[i] = NULL;
345 		track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
346 		track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF;
347 	}
348 	track->streamout_dirty = true;
349 	track->sx_misc_kill_all_prims = false;
350 }
351 
352 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
353 {
354 	struct r600_cs_track *track = p->track;
355 	u32 slice_tile_max, size, tmp;
356 	u32 height, height_align, pitch, pitch_align, depth_align;
357 	u64 base_offset, base_align;
358 	struct array_mode_checker array_check;
359 	volatile u32 *ib = p->ib.ptr;
360 	unsigned array_mode;
361 	u32 format;
362 	/* When resolve is used, the second colorbuffer has always 1 sample. */
363 	unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples;
364 
365 	size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
366 	format = G_0280A0_FORMAT(track->cb_color_info[i]);
367 	if (!r600_fmt_is_valid_color(format)) {
368 		dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
369 			 __func__, __LINE__, format,
370 			i, track->cb_color_info[i]);
371 		return -EINVAL;
372 	}
373 	/* pitch in pixels */
374 	pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8;
375 	slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
376 	slice_tile_max *= 64;
377 	height = slice_tile_max / pitch;
378 	if (height > 8192)
379 		height = 8192;
380 	array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]);
381 
382 	base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i];
383 	array_check.array_mode = array_mode;
384 	array_check.group_size = track->group_size;
385 	array_check.nbanks = track->nbanks;
386 	array_check.npipes = track->npipes;
387 	array_check.nsamples = nsamples;
388 	array_check.blocksize = r600_fmt_get_blocksize(format);
389 	if (r600_get_array_mode_alignment(&array_check,
390 					  &pitch_align, &height_align, &depth_align, &base_align)) {
391 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
392 			 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
393 			 track->cb_color_info[i]);
394 		return -EINVAL;
395 	}
396 	switch (array_mode) {
397 	case V_0280A0_ARRAY_LINEAR_GENERAL:
398 		break;
399 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
400 		break;
401 	case V_0280A0_ARRAY_1D_TILED_THIN1:
402 		/* avoid breaking userspace */
403 		if (height > 7)
404 			height &= ~0x7;
405 		break;
406 	case V_0280A0_ARRAY_2D_TILED_THIN1:
407 		break;
408 	default:
409 		dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
410 			G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
411 			track->cb_color_info[i]);
412 		return -EINVAL;
413 	}
414 
415 	if (!IS_ALIGNED(pitch, pitch_align)) {
416 		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
417 			 __func__, __LINE__, pitch, pitch_align, array_mode);
418 		return -EINVAL;
419 	}
420 	if (!IS_ALIGNED(height, height_align)) {
421 		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
422 			 __func__, __LINE__, height, height_align, array_mode);
423 		return -EINVAL;
424 	}
425 	if (!IS_ALIGNED(base_offset, base_align)) {
426 		dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i,
427 			 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
428 		return -EINVAL;
429 	}
430 
431 	/* check offset */
432 	tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) *
433 	      r600_fmt_get_blocksize(format) * nsamples;
434 	switch (array_mode) {
435 	default:
436 	case V_0280A0_ARRAY_LINEAR_GENERAL:
437 	case V_0280A0_ARRAY_LINEAR_ALIGNED:
438 		tmp += track->cb_color_view[i] & 0xFF;
439 		break;
440 	case V_0280A0_ARRAY_1D_TILED_THIN1:
441 	case V_0280A0_ARRAY_2D_TILED_THIN1:
442 		tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp;
443 		break;
444 	}
445 	if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
446 		if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
447 			/* the initial DDX does bad things with the CB size occasionally */
448 			/* it rounds up height too far for slice tile max but the BO is smaller */
449 			/* r600c,g also seem to flush at bad times in some apps resulting in
450 			 * bogus values here. So for linear just allow anything to avoid breaking
451 			 * broken userspace.
452 			 */
453 		} else {
454 			dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n",
455 				 __func__, i, array_mode,
456 				 (uintmax_t)track->cb_color_bo_offset[i], tmp,
457 				 radeon_bo_size(track->cb_color_bo[i]),
458 				 pitch, height, r600_fmt_get_nblocksx(format, pitch),
459 				 r600_fmt_get_nblocksy(format, height),
460 				 r600_fmt_get_blocksize(format));
461 			return -EINVAL;
462 		}
463 	}
464 	/* limit max tile */
465 	tmp = (height * pitch) >> 6;
466 	if (tmp < slice_tile_max)
467 		slice_tile_max = tmp;
468 	tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) |
469 		S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
470 	ib[track->cb_color_size_idx[i]] = tmp;
471 
472 	/* FMASK/CMASK */
473 	switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
474 	case V_0280A0_TILE_DISABLE:
475 		break;
476 	case V_0280A0_FRAG_ENABLE:
477 		if (track->nsamples > 1) {
478 			uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]);
479 			/* the tile size is 8x8, but the size is in units of bits.
480 			 * for bytes, do just * 8. */
481 			uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1);
482 
483 			if (bytes + track->cb_color_frag_offset[i] >
484 			    radeon_bo_size(track->cb_color_frag_bo[i])) {
485 				dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
486 					 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
487 					 __func__, tile_max, bytes,
488 					 (uintmax_t)track->cb_color_frag_offset[i],
489 					 radeon_bo_size(track->cb_color_frag_bo[i]));
490 				return -EINVAL;
491 			}
492 		}
493 		/* fall through */
494 	case V_0280A0_CLEAR_ENABLE:
495 	{
496 		uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]);
497 		/* One block = 128x128 pixels, one 8x8 tile has 4 bits..
498 		 * (128*128) / (8*8) / 2 = 128 bytes per block. */
499 		uint32_t bytes = (block_max + 1) * 128;
500 
501 		if (bytes + track->cb_color_tile_offset[i] >
502 		    radeon_bo_size(track->cb_color_tile_bo[i])) {
503 			dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
504 				 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n",
505 				 __func__, block_max, bytes,
506 				 (uintmax_t)track->cb_color_tile_offset[i],
507 				 radeon_bo_size(track->cb_color_tile_bo[i]));
508 			return -EINVAL;
509 		}
510 		break;
511 	}
512 	default:
513 		dev_warn(p->dev, "%s invalid tile mode\n", __func__);
514 		return -EINVAL;
515 	}
516 	return 0;
517 }
518 
519 static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
520 {
521 	struct r600_cs_track *track = p->track;
522 	u32 nviews, bpe, ntiles, size, slice_tile_max, tmp;
523 	u32 height_align, pitch_align, depth_align;
524 	u32 pitch = 8192;
525 	u32 height = 8192;
526 	u64 base_offset, base_align;
527 	struct array_mode_checker array_check;
528 	int array_mode;
529 	volatile u32 *ib = p->ib.ptr;
530 
531 
532 	if (track->db_bo == NULL) {
533 		dev_warn(p->dev, "z/stencil with no depth buffer\n");
534 		return -EINVAL;
535 	}
536 	switch (G_028010_FORMAT(track->db_depth_info)) {
537 	case V_028010_DEPTH_16:
538 		bpe = 2;
539 		break;
540 	case V_028010_DEPTH_X8_24:
541 	case V_028010_DEPTH_8_24:
542 	case V_028010_DEPTH_X8_24_FLOAT:
543 	case V_028010_DEPTH_8_24_FLOAT:
544 	case V_028010_DEPTH_32_FLOAT:
545 		bpe = 4;
546 		break;
547 	case V_028010_DEPTH_X24_8_32_FLOAT:
548 		bpe = 8;
549 		break;
550 	default:
551 		dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
552 		return -EINVAL;
553 	}
554 	if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
555 		if (!track->db_depth_size_idx) {
556 			dev_warn(p->dev, "z/stencil buffer size not set\n");
557 			return -EINVAL;
558 		}
559 		tmp = radeon_bo_size(track->db_bo) - track->db_offset;
560 		tmp = (tmp / bpe) >> 6;
561 		if (!tmp) {
562 			dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
563 					track->db_depth_size, bpe, track->db_offset,
564 					radeon_bo_size(track->db_bo));
565 			return -EINVAL;
566 		}
567 		ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
568 	} else {
569 		size = radeon_bo_size(track->db_bo);
570 		/* pitch in pixels */
571 		pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8;
572 		slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
573 		slice_tile_max *= 64;
574 		height = slice_tile_max / pitch;
575 		if (height > 8192)
576 			height = 8192;
577 		base_offset = track->db_bo_mc + track->db_offset;
578 		array_mode = G_028010_ARRAY_MODE(track->db_depth_info);
579 		array_check.array_mode = array_mode;
580 		array_check.group_size = track->group_size;
581 		array_check.nbanks = track->nbanks;
582 		array_check.npipes = track->npipes;
583 		array_check.nsamples = track->nsamples;
584 		array_check.blocksize = bpe;
585 		if (r600_get_array_mode_alignment(&array_check,
586 					&pitch_align, &height_align, &depth_align, &base_align)) {
587 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
588 					G_028010_ARRAY_MODE(track->db_depth_info),
589 					track->db_depth_info);
590 			return -EINVAL;
591 		}
592 		switch (array_mode) {
593 		case V_028010_ARRAY_1D_TILED_THIN1:
594 			/* don't break userspace */
595 			height &= ~0x7;
596 			break;
597 		case V_028010_ARRAY_2D_TILED_THIN1:
598 			break;
599 		default:
600 			dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
601 					G_028010_ARRAY_MODE(track->db_depth_info),
602 					track->db_depth_info);
603 			return -EINVAL;
604 		}
605 
606 		if (!IS_ALIGNED(pitch, pitch_align)) {
607 			dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
608 					__func__, __LINE__, pitch, pitch_align, array_mode);
609 			return -EINVAL;
610 		}
611 		if (!IS_ALIGNED(height, height_align)) {
612 			dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
613 					__func__, __LINE__, height, height_align, array_mode);
614 			return -EINVAL;
615 		}
616 		if (!IS_ALIGNED(base_offset, base_align)) {
617 			dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__,
618 					(uintmax_t)base_offset, (uintmax_t)base_align, array_mode);
619 			return -EINVAL;
620 		}
621 
622 		ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
623 		nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
624 		tmp = ntiles * bpe * 64 * nviews * track->nsamples;
625 		if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
626 			dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
627 					array_mode,
628 					track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
629 					radeon_bo_size(track->db_bo));
630 			return -EINVAL;
631 		}
632 	}
633 
634 	/* hyperz */
635 	if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
636 		unsigned long size;
637 		unsigned nbx, nby;
638 
639 		if (track->htile_bo == NULL) {
640 			dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
641 				 __func__, __LINE__, track->db_depth_info);
642 			return -EINVAL;
643 		}
644 		if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
645 			dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
646 				 __func__, __LINE__, track->db_depth_size);
647 			return -EINVAL;
648 		}
649 
650 		nbx = pitch;
651 		nby = height;
652 		if (G_028D24_LINEAR(track->htile_surface)) {
653 			/* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */
654 			nbx = round_up(nbx, 16 * 8);
655 			/* nby is npipes htiles aligned == npipes * 8 pixel aligned */
656 			nby = round_up(nby, track->npipes * 8);
657 		} else {
658 			/* always assume 8x8 htile */
659 			/* align is htile align * 8, htile align vary according to
660 			 * number of pipe and tile width and nby
661 			 */
662 			switch (track->npipes) {
663 			case 8:
664 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
665 				nbx = round_up(nbx, 64 * 8);
666 				nby = round_up(nby, 64 * 8);
667 				break;
668 			case 4:
669 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
670 				nbx = round_up(nbx, 64 * 8);
671 				nby = round_up(nby, 32 * 8);
672 				break;
673 			case 2:
674 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
675 				nbx = round_up(nbx, 32 * 8);
676 				nby = round_up(nby, 32 * 8);
677 				break;
678 			case 1:
679 				/* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
680 				nbx = round_up(nbx, 32 * 8);
681 				nby = round_up(nby, 16 * 8);
682 				break;
683 			default:
684 				dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
685 					 __func__, __LINE__, track->npipes);
686 				return -EINVAL;
687 			}
688 		}
689 		/* compute number of htile */
690 		nbx = nbx >> 3;
691 		nby = nby >> 3;
692 		/* size must be aligned on npipes * 2K boundary */
693 		size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
694 		size += track->htile_offset;
695 
696 		if (size > radeon_bo_size(track->htile_bo)) {
697 			dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
698 				 __func__, __LINE__, radeon_bo_size(track->htile_bo),
699 				 size, nbx, nby);
700 			return -EINVAL;
701 		}
702 	}
703 
704 	track->db_dirty = false;
705 	return 0;
706 }
707 
708 static int r600_cs_track_check(struct radeon_cs_parser *p)
709 {
710 	struct r600_cs_track *track = p->track;
711 	u32 tmp;
712 	int r, i;
713 
714 	/* on legacy kernel we don't perform advanced check */
715 	if (p->rdev == NULL)
716 		return 0;
717 
718 	/* check streamout */
719 	if (track->streamout_dirty && track->vgt_strmout_en) {
720 		for (i = 0; i < 4; i++) {
721 			if (track->vgt_strmout_buffer_en & (1 << i)) {
722 				if (track->vgt_strmout_bo[i]) {
723 					u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
724 						(u64)track->vgt_strmout_size[i];
725 					if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
726 						DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n",
727 							  i, (uintmax_t)offset,
728 							  radeon_bo_size(track->vgt_strmout_bo[i]));
729 						return -EINVAL;
730 					}
731 				} else {
732 					dev_warn(p->dev, "No buffer for streamout %d\n", i);
733 					return -EINVAL;
734 				}
735 			}
736 		}
737 		track->streamout_dirty = false;
738 	}
739 
740 	if (track->sx_misc_kill_all_prims)
741 		return 0;
742 
743 	/* check that we have a cb for each enabled target, we don't check
744 	 * shader_mask because it seems mesa isn't always setting it :(
745 	 */
746 	if (track->cb_dirty) {
747 		tmp = track->cb_target_mask;
748 
749 		/* We must check both colorbuffers for RESOLVE. */
750 		if (track->is_resolve) {
751 			tmp |= 0xff;
752 		}
753 
754 		for (i = 0; i < 8; i++) {
755 			if ((tmp >> (i * 4)) & 0xF) {
756 				/* at least one component is enabled */
757 				if (track->cb_color_bo[i] == NULL) {
758 					dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
759 						__func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
760 					return -EINVAL;
761 				}
762 				/* perform rewrite of CB_COLOR[0-7]_SIZE */
763 				r = r600_cs_track_validate_cb(p, i);
764 				if (r)
765 					return r;
766 			}
767 		}
768 		track->cb_dirty = false;
769 	}
770 
771 	/* Check depth buffer */
772 	if (track->db_dirty &&
773 	    G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID &&
774 	    (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
775 	     G_028800_Z_ENABLE(track->db_depth_control))) {
776 		r = r600_cs_track_validate_db(p);
777 		if (r)
778 			return r;
779 	}
780 
781 	return 0;
782 }
783 
784 /**
785  * r600_cs_packet_parse_vline() - parse userspace VLINE packet
786  * @parser:		parser structure holding parsing context.
787  *
788  * This is an R600-specific function for parsing VLINE packets.
789  * Real work is done by r600_cs_common_vline_parse function.
790  * Here we just set up ASIC-specific register table and call
791  * the common implementation function.
792  */
793 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
794 {
795 	static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
796 					      AVIVO_D2MODE_VLINE_START_END};
797 	static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
798 					   AVIVO_D2MODE_VLINE_STATUS};
799 
800 	return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
801 }
802 
803 /**
804  * r600_cs_common_vline_parse() - common vline parser
805  * @parser:		parser structure holding parsing context.
806  * @vline_start_end:    table of vline_start_end registers
807  * @vline_status:       table of vline_status registers
808  *
809  * Userspace sends a special sequence for VLINE waits.
810  * PACKET0 - VLINE_START_END + value
811  * PACKET3 - WAIT_REG_MEM poll vline status reg
812  * RELOC (P3) - crtc_id in reloc.
813  *
814  * This function parses this and relocates the VLINE START END
815  * and WAIT_REG_MEM packets to the correct crtc.
816  * It also detects a switched off crtc and nulls out the
817  * wait in that case. This function is common for all ASICs that
818  * are R600 and newer. The parsing algorithm is the same, and only
819  * differs in which registers are used.
820  *
821  * Caller is the ASIC-specific function which passes the parser
822  * context and ASIC-specific register table
823  */
824 int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
825 			       uint32_t *vline_start_end,
826 			       uint32_t *vline_status)
827 {
828 	struct drm_mode_object *obj;
829 	struct drm_crtc *crtc;
830 	struct radeon_crtc *radeon_crtc;
831 	struct radeon_cs_packet p3reloc, wait_reg_mem;
832 	int crtc_id;
833 	int r;
834 	uint32_t header, h_idx, reg, wait_reg_mem_info;
835 	volatile uint32_t *ib;
836 
837 	ib = p->ib.ptr;
838 
839 	/* parse the WAIT_REG_MEM */
840 	r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
841 	if (r)
842 		return r;
843 
844 	/* check its a WAIT_REG_MEM */
845 	if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
846 	    wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
847 		DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
848 		return -EINVAL;
849 	}
850 
851 	wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
852 	/* bit 4 is reg (0) or mem (1) */
853 	if (wait_reg_mem_info & 0x10) {
854 		DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
855 		return -EINVAL;
856 	}
857 	/* bit 8 is me (0) or pfp (1) */
858 	if (wait_reg_mem_info & 0x100) {
859 		DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
860 		return -EINVAL;
861 	}
862 	/* waiting for value to be equal */
863 	if ((wait_reg_mem_info & 0x7) != 0x3) {
864 		DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
865 		return -EINVAL;
866 	}
867 	if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
868 		DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
869 		return -EINVAL;
870 	}
871 
872 	if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
873 		DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
874 		return -EINVAL;
875 	}
876 
877 	/* jump over the NOP */
878 	r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
879 	if (r)
880 		return r;
881 
882 	h_idx = p->idx - 2;
883 	p->idx += wait_reg_mem.count + 2;
884 	p->idx += p3reloc.count + 2;
885 
886 	header = radeon_get_ib_value(p, h_idx);
887 	crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
888 	reg = R600_CP_PACKET0_GET_REG(header);
889 
890 	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
891 	if (!obj) {
892 		DRM_ERROR("cannot find crtc %d\n", crtc_id);
893 		return -EINVAL;
894 	}
895 	crtc = obj_to_crtc(obj);
896 	radeon_crtc = to_radeon_crtc(crtc);
897 	crtc_id = radeon_crtc->crtc_id;
898 
899 	if (!crtc->enabled) {
900 		/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
901 		ib[h_idx + 2] = PACKET2(0);
902 		ib[h_idx + 3] = PACKET2(0);
903 		ib[h_idx + 4] = PACKET2(0);
904 		ib[h_idx + 5] = PACKET2(0);
905 		ib[h_idx + 6] = PACKET2(0);
906 		ib[h_idx + 7] = PACKET2(0);
907 		ib[h_idx + 8] = PACKET2(0);
908 	} else if (reg == vline_start_end[0]) {
909 		header &= ~R600_CP_PACKET0_REG_MASK;
910 		header |= vline_start_end[crtc_id] >> 2;
911 		ib[h_idx] = header;
912 		ib[h_idx + 4] = vline_status[crtc_id] >> 2;
913 	} else {
914 		DRM_ERROR("unknown crtc reloc\n");
915 		return -EINVAL;
916 	}
917 	return 0;
918 }
919 
920 static int r600_packet0_check(struct radeon_cs_parser *p,
921 				struct radeon_cs_packet *pkt,
922 				unsigned idx, unsigned reg)
923 {
924 	int r;
925 
926 	switch (reg) {
927 	case AVIVO_D1MODE_VLINE_START_END:
928 		r = r600_cs_packet_parse_vline(p);
929 		if (r) {
930 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
931 					idx, reg);
932 			return r;
933 		}
934 		break;
935 	default:
936 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
937 		       reg, idx);
938 		return -EINVAL;
939 	}
940 	return 0;
941 }
942 
943 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
944 				struct radeon_cs_packet *pkt)
945 {
946 	unsigned reg, i;
947 	unsigned idx;
948 	int r;
949 
950 	idx = pkt->idx + 1;
951 	reg = pkt->reg;
952 	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
953 		r = r600_packet0_check(p, pkt, idx, reg);
954 		if (r) {
955 			return r;
956 		}
957 	}
958 	return 0;
959 }
960 
961 /**
962  * r600_cs_check_reg() - check if register is authorized or not
963  * @parser: parser structure holding parsing context
964  * @reg: register we are testing
965  * @idx: index into the cs buffer
966  *
967  * This function will test against r600_reg_safe_bm and return 0
968  * if register is safe. If register is not flag as safe this function
969  * will test it against a list of register needind special handling.
970  */
971 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
972 {
973 	struct r600_cs_track *track = (struct r600_cs_track *)p->track;
974 	struct radeon_cs_reloc *reloc;
975 	u32 m, i, tmp, *ib;
976 	int r;
977 
978 	i = (reg >> 7);
979 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
980 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
981 		return -EINVAL;
982 	}
983 	m = 1 << ((reg >> 2) & 31);
984 	if (!(r600_reg_safe_bm[i] & m))
985 		return 0;
986 	ib = p->ib.ptr;
987 	switch (reg) {
988 	/* force following reg to 0 in an attempt to disable out buffer
989 	 * which will need us to better understand how it works to perform
990 	 * security check on it (Jerome)
991 	 */
992 	case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
993 	case R_008C44_SQ_ESGS_RING_SIZE:
994 	case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
995 	case R_008C54_SQ_ESTMP_RING_SIZE:
996 	case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
997 	case R_008C74_SQ_FBUF_RING_SIZE:
998 	case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
999 	case R_008C5C_SQ_GSTMP_RING_SIZE:
1000 	case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
1001 	case R_008C4C_SQ_GSVS_RING_SIZE:
1002 	case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
1003 	case R_008C6C_SQ_PSTMP_RING_SIZE:
1004 	case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
1005 	case R_008C7C_SQ_REDUC_RING_SIZE:
1006 	case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
1007 	case R_008C64_SQ_VSTMP_RING_SIZE:
1008 	case R_0288C8_SQ_GS_VERT_ITEMSIZE:
1009 		/* get value to populate the IB don't remove */
1010 		tmp =radeon_get_ib_value(p, idx);
1011 		ib[idx] = 0;
1012 		break;
1013 	case SQ_CONFIG:
1014 		track->sq_config = radeon_get_ib_value(p, idx);
1015 		break;
1016 	case R_028800_DB_DEPTH_CONTROL:
1017 		track->db_depth_control = radeon_get_ib_value(p, idx);
1018 		track->db_dirty = true;
1019 		break;
1020 	case R_028010_DB_DEPTH_INFO:
1021 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1022 		    radeon_cs_packet_next_is_pkt3_nop(p)) {
1023 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1024 			if (r) {
1025 				dev_warn(p->dev, "bad SET_CONTEXT_REG "
1026 					 "0x%04X\n", reg);
1027 				return -EINVAL;
1028 			}
1029 			track->db_depth_info = radeon_get_ib_value(p, idx);
1030 			ib[idx] &= C_028010_ARRAY_MODE;
1031 			track->db_depth_info &= C_028010_ARRAY_MODE;
1032 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1033 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1034 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
1035 			} else {
1036 				ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1037 				track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
1038 			}
1039 		} else {
1040 			track->db_depth_info = radeon_get_ib_value(p, idx);
1041 		}
1042 		track->db_dirty = true;
1043 		break;
1044 	case R_028004_DB_DEPTH_VIEW:
1045 		track->db_depth_view = radeon_get_ib_value(p, idx);
1046 		track->db_dirty = true;
1047 		break;
1048 	case R_028000_DB_DEPTH_SIZE:
1049 		track->db_depth_size = radeon_get_ib_value(p, idx);
1050 		track->db_depth_size_idx = idx;
1051 		track->db_dirty = true;
1052 		break;
1053 	case R_028AB0_VGT_STRMOUT_EN:
1054 		track->vgt_strmout_en = radeon_get_ib_value(p, idx);
1055 		track->streamout_dirty = true;
1056 		break;
1057 	case R_028B20_VGT_STRMOUT_BUFFER_EN:
1058 		track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
1059 		track->streamout_dirty = true;
1060 		break;
1061 	case VGT_STRMOUT_BUFFER_BASE_0:
1062 	case VGT_STRMOUT_BUFFER_BASE_1:
1063 	case VGT_STRMOUT_BUFFER_BASE_2:
1064 	case VGT_STRMOUT_BUFFER_BASE_3:
1065 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1066 		if (r) {
1067 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1068 					"0x%04X\n", reg);
1069 			return -EINVAL;
1070 		}
1071 		tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1072 		track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1073 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1074 		track->vgt_strmout_bo[tmp] = reloc->robj;
1075 		track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset;
1076 		track->streamout_dirty = true;
1077 		break;
1078 	case VGT_STRMOUT_BUFFER_SIZE_0:
1079 	case VGT_STRMOUT_BUFFER_SIZE_1:
1080 	case VGT_STRMOUT_BUFFER_SIZE_2:
1081 	case VGT_STRMOUT_BUFFER_SIZE_3:
1082 		tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1083 		/* size in register is DWs, convert to bytes */
1084 		track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
1085 		track->streamout_dirty = true;
1086 		break;
1087 	case CP_COHER_BASE:
1088 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1089 		if (r) {
1090 			dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1091 					"0x%04X\n", reg);
1092 			return -EINVAL;
1093 		}
1094 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1095 		break;
1096 	case R_028238_CB_TARGET_MASK:
1097 		track->cb_target_mask = radeon_get_ib_value(p, idx);
1098 		track->cb_dirty = true;
1099 		break;
1100 	case R_02823C_CB_SHADER_MASK:
1101 		track->cb_shader_mask = radeon_get_ib_value(p, idx);
1102 		break;
1103 	case R_028C04_PA_SC_AA_CONFIG:
1104 		tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
1105 		track->log_nsamples = tmp;
1106 		track->nsamples = 1 << tmp;
1107 		track->cb_dirty = true;
1108 		break;
1109 	case R_028808_CB_COLOR_CONTROL:
1110 		tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx));
1111 		track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX;
1112 		track->cb_dirty = true;
1113 		break;
1114 	case R_0280A0_CB_COLOR0_INFO:
1115 	case R_0280A4_CB_COLOR1_INFO:
1116 	case R_0280A8_CB_COLOR2_INFO:
1117 	case R_0280AC_CB_COLOR3_INFO:
1118 	case R_0280B0_CB_COLOR4_INFO:
1119 	case R_0280B4_CB_COLOR5_INFO:
1120 	case R_0280B8_CB_COLOR6_INFO:
1121 	case R_0280BC_CB_COLOR7_INFO:
1122 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1123 		     radeon_cs_packet_next_is_pkt3_nop(p)) {
1124 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1125 			if (r) {
1126 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1127 				return -EINVAL;
1128 			}
1129 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1130 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1131 			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1132 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1133 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
1134 			} else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1135 				ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1136 				track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
1137 			}
1138 		} else {
1139 			tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
1140 			track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1141 		}
1142 		track->cb_dirty = true;
1143 		break;
1144 	case R_028080_CB_COLOR0_VIEW:
1145 	case R_028084_CB_COLOR1_VIEW:
1146 	case R_028088_CB_COLOR2_VIEW:
1147 	case R_02808C_CB_COLOR3_VIEW:
1148 	case R_028090_CB_COLOR4_VIEW:
1149 	case R_028094_CB_COLOR5_VIEW:
1150 	case R_028098_CB_COLOR6_VIEW:
1151 	case R_02809C_CB_COLOR7_VIEW:
1152 		tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4;
1153 		track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
1154 		track->cb_dirty = true;
1155 		break;
1156 	case R_028060_CB_COLOR0_SIZE:
1157 	case R_028064_CB_COLOR1_SIZE:
1158 	case R_028068_CB_COLOR2_SIZE:
1159 	case R_02806C_CB_COLOR3_SIZE:
1160 	case R_028070_CB_COLOR4_SIZE:
1161 	case R_028074_CB_COLOR5_SIZE:
1162 	case R_028078_CB_COLOR6_SIZE:
1163 	case R_02807C_CB_COLOR7_SIZE:
1164 		tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
1165 		track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
1166 		track->cb_color_size_idx[tmp] = idx;
1167 		track->cb_dirty = true;
1168 		break;
1169 		/* This register were added late, there is userspace
1170 		 * which does provide relocation for those but set
1171 		 * 0 offset. In order to avoid breaking old userspace
1172 		 * we detect this and set address to point to last
1173 		 * CB_COLOR0_BASE, note that if userspace doesn't set
1174 		 * CB_COLOR0_BASE before this register we will report
1175 		 * error. Old userspace always set CB_COLOR0_BASE
1176 		 * before any of this.
1177 		 */
1178 	case R_0280E0_CB_COLOR0_FRAG:
1179 	case R_0280E4_CB_COLOR1_FRAG:
1180 	case R_0280E8_CB_COLOR2_FRAG:
1181 	case R_0280EC_CB_COLOR3_FRAG:
1182 	case R_0280F0_CB_COLOR4_FRAG:
1183 	case R_0280F4_CB_COLOR5_FRAG:
1184 	case R_0280F8_CB_COLOR6_FRAG:
1185 	case R_0280FC_CB_COLOR7_FRAG:
1186 		tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1187 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1188 			if (!track->cb_color_base_last[tmp]) {
1189 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1190 				return -EINVAL;
1191 			}
1192 			track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
1193 			track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1194 			ib[idx] = track->cb_color_base_last[tmp];
1195 		} else {
1196 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1197 			if (r) {
1198 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1199 				return -EINVAL;
1200 			}
1201 			track->cb_color_frag_bo[tmp] = reloc->robj;
1202 			track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8;
1203 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1204 		}
1205 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1206 			track->cb_dirty = true;
1207 		}
1208 		break;
1209 	case R_0280C0_CB_COLOR0_TILE:
1210 	case R_0280C4_CB_COLOR1_TILE:
1211 	case R_0280C8_CB_COLOR2_TILE:
1212 	case R_0280CC_CB_COLOR3_TILE:
1213 	case R_0280D0_CB_COLOR4_TILE:
1214 	case R_0280D4_CB_COLOR5_TILE:
1215 	case R_0280D8_CB_COLOR6_TILE:
1216 	case R_0280DC_CB_COLOR7_TILE:
1217 		tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1218 		if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1219 			if (!track->cb_color_base_last[tmp]) {
1220 				dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1221 				return -EINVAL;
1222 			}
1223 			track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
1224 			track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1225 			ib[idx] = track->cb_color_base_last[tmp];
1226 		} else {
1227 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1228 			if (r) {
1229 				dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1230 				return -EINVAL;
1231 			}
1232 			track->cb_color_tile_bo[tmp] = reloc->robj;
1233 			track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8;
1234 			ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1235 		}
1236 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1237 			track->cb_dirty = true;
1238 		}
1239 		break;
1240 	case R_028100_CB_COLOR0_MASK:
1241 	case R_028104_CB_COLOR1_MASK:
1242 	case R_028108_CB_COLOR2_MASK:
1243 	case R_02810C_CB_COLOR3_MASK:
1244 	case R_028110_CB_COLOR4_MASK:
1245 	case R_028114_CB_COLOR5_MASK:
1246 	case R_028118_CB_COLOR6_MASK:
1247 	case R_02811C_CB_COLOR7_MASK:
1248 		tmp = (reg - R_028100_CB_COLOR0_MASK) / 4;
1249 		track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx);
1250 		if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) {
1251 			track->cb_dirty = true;
1252 		}
1253 		break;
1254 	case CB_COLOR0_BASE:
1255 	case CB_COLOR1_BASE:
1256 	case CB_COLOR2_BASE:
1257 	case CB_COLOR3_BASE:
1258 	case CB_COLOR4_BASE:
1259 	case CB_COLOR5_BASE:
1260 	case CB_COLOR6_BASE:
1261 	case CB_COLOR7_BASE:
1262 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1263 		if (r) {
1264 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1265 					"0x%04X\n", reg);
1266 			return -EINVAL;
1267 		}
1268 		tmp = (reg - CB_COLOR0_BASE) / 4;
1269 		track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1270 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1271 		track->cb_color_base_last[tmp] = ib[idx];
1272 		track->cb_color_bo[tmp] = reloc->robj;
1273 		track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset;
1274 		track->cb_dirty = true;
1275 		break;
1276 	case DB_DEPTH_BASE:
1277 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1278 		if (r) {
1279 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1280 					"0x%04X\n", reg);
1281 			return -EINVAL;
1282 		}
1283 		track->db_offset = radeon_get_ib_value(p, idx) << 8;
1284 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1285 		track->db_bo = reloc->robj;
1286 		track->db_bo_mc = reloc->lobj.gpu_offset;
1287 		track->db_dirty = true;
1288 		break;
1289 	case DB_HTILE_DATA_BASE:
1290 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1291 		if (r) {
1292 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1293 					"0x%04X\n", reg);
1294 			return -EINVAL;
1295 		}
1296 		track->htile_offset = radeon_get_ib_value(p, idx) << 8;
1297 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1298 		track->htile_bo = reloc->robj;
1299 		track->db_dirty = true;
1300 		break;
1301 	case DB_HTILE_SURFACE:
1302 		track->htile_surface = radeon_get_ib_value(p, idx);
1303 		/* force 8x8 htile width and height */
1304 		ib[idx] |= 3;
1305 		track->db_dirty = true;
1306 		break;
1307 	case SQ_PGM_START_FS:
1308 	case SQ_PGM_START_ES:
1309 	case SQ_PGM_START_VS:
1310 	case SQ_PGM_START_GS:
1311 	case SQ_PGM_START_PS:
1312 	case SQ_ALU_CONST_CACHE_GS_0:
1313 	case SQ_ALU_CONST_CACHE_GS_1:
1314 	case SQ_ALU_CONST_CACHE_GS_2:
1315 	case SQ_ALU_CONST_CACHE_GS_3:
1316 	case SQ_ALU_CONST_CACHE_GS_4:
1317 	case SQ_ALU_CONST_CACHE_GS_5:
1318 	case SQ_ALU_CONST_CACHE_GS_6:
1319 	case SQ_ALU_CONST_CACHE_GS_7:
1320 	case SQ_ALU_CONST_CACHE_GS_8:
1321 	case SQ_ALU_CONST_CACHE_GS_9:
1322 	case SQ_ALU_CONST_CACHE_GS_10:
1323 	case SQ_ALU_CONST_CACHE_GS_11:
1324 	case SQ_ALU_CONST_CACHE_GS_12:
1325 	case SQ_ALU_CONST_CACHE_GS_13:
1326 	case SQ_ALU_CONST_CACHE_GS_14:
1327 	case SQ_ALU_CONST_CACHE_GS_15:
1328 	case SQ_ALU_CONST_CACHE_PS_0:
1329 	case SQ_ALU_CONST_CACHE_PS_1:
1330 	case SQ_ALU_CONST_CACHE_PS_2:
1331 	case SQ_ALU_CONST_CACHE_PS_3:
1332 	case SQ_ALU_CONST_CACHE_PS_4:
1333 	case SQ_ALU_CONST_CACHE_PS_5:
1334 	case SQ_ALU_CONST_CACHE_PS_6:
1335 	case SQ_ALU_CONST_CACHE_PS_7:
1336 	case SQ_ALU_CONST_CACHE_PS_8:
1337 	case SQ_ALU_CONST_CACHE_PS_9:
1338 	case SQ_ALU_CONST_CACHE_PS_10:
1339 	case SQ_ALU_CONST_CACHE_PS_11:
1340 	case SQ_ALU_CONST_CACHE_PS_12:
1341 	case SQ_ALU_CONST_CACHE_PS_13:
1342 	case SQ_ALU_CONST_CACHE_PS_14:
1343 	case SQ_ALU_CONST_CACHE_PS_15:
1344 	case SQ_ALU_CONST_CACHE_VS_0:
1345 	case SQ_ALU_CONST_CACHE_VS_1:
1346 	case SQ_ALU_CONST_CACHE_VS_2:
1347 	case SQ_ALU_CONST_CACHE_VS_3:
1348 	case SQ_ALU_CONST_CACHE_VS_4:
1349 	case SQ_ALU_CONST_CACHE_VS_5:
1350 	case SQ_ALU_CONST_CACHE_VS_6:
1351 	case SQ_ALU_CONST_CACHE_VS_7:
1352 	case SQ_ALU_CONST_CACHE_VS_8:
1353 	case SQ_ALU_CONST_CACHE_VS_9:
1354 	case SQ_ALU_CONST_CACHE_VS_10:
1355 	case SQ_ALU_CONST_CACHE_VS_11:
1356 	case SQ_ALU_CONST_CACHE_VS_12:
1357 	case SQ_ALU_CONST_CACHE_VS_13:
1358 	case SQ_ALU_CONST_CACHE_VS_14:
1359 	case SQ_ALU_CONST_CACHE_VS_15:
1360 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1361 		if (r) {
1362 			dev_warn(p->dev, "bad SET_CONTEXT_REG "
1363 					"0x%04X\n", reg);
1364 			return -EINVAL;
1365 		}
1366 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1367 		break;
1368 	case SX_MEMORY_EXPORT_BASE:
1369 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1370 		if (r) {
1371 			dev_warn(p->dev, "bad SET_CONFIG_REG "
1372 					"0x%04X\n", reg);
1373 			return -EINVAL;
1374 		}
1375 		ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1376 		break;
1377 	case SX_MISC:
1378 		track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1379 		break;
1380 	default:
1381 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1382 		return -EINVAL;
1383 	}
1384 	return 0;
1385 }
1386 
1387 unsigned r600_mip_minify(unsigned size, unsigned level)
1388 {
1389 	unsigned val;
1390 
1391 	val = max(1U, size >> level);
1392 	if (level > 0)
1393 		val = roundup_pow_of_two(val);
1394 	return val;
1395 }
1396 
1397 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
1398 			      unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format,
1399 			      unsigned block_align, unsigned height_align, unsigned base_align,
1400 			      unsigned *l0_size, unsigned *mipmap_size)
1401 {
1402 	unsigned offset, i, level;
1403 	unsigned width, height, depth, size;
1404 	unsigned blocksize;
1405 	unsigned nbx, nby;
1406 	unsigned nlevels = llevel - blevel + 1;
1407 
1408 	*l0_size = -1;
1409 	blocksize = r600_fmt_get_blocksize(format);
1410 
1411 	w0 = r600_mip_minify(w0, 0);
1412 	h0 = r600_mip_minify(h0, 0);
1413 	d0 = r600_mip_minify(d0, 0);
1414 	for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
1415 		width = r600_mip_minify(w0, i);
1416 		nbx = r600_fmt_get_nblocksx(format, width);
1417 
1418 		nbx = round_up(nbx, block_align);
1419 
1420 		height = r600_mip_minify(h0, i);
1421 		nby = r600_fmt_get_nblocksy(format, height);
1422 		nby = round_up(nby, height_align);
1423 
1424 		depth = r600_mip_minify(d0, i);
1425 
1426 		size = nbx * nby * blocksize * nsamples;
1427 		if (nfaces)
1428 			size *= nfaces;
1429 		else
1430 			size *= depth;
1431 
1432 		if (i == 0)
1433 			*l0_size = size;
1434 
1435 		if (i == 0 || i == 1)
1436 			offset = round_up(offset, base_align);
1437 
1438 		offset += size;
1439 	}
1440 	*mipmap_size = offset;
1441 	if (llevel == 0)
1442 		*mipmap_size = *l0_size;
1443 	if (!blevel)
1444 		*mipmap_size -= *l0_size;
1445 }
1446 
1447 /**
1448  * r600_check_texture_resource() - check if register is authorized or not
1449  * @p: parser structure holding parsing context
1450  * @idx: index into the cs buffer
1451  * @texture: texture's bo structure
1452  * @mipmap: mipmap's bo structure
1453  *
1454  * This function will check that the resource has valid field and that
1455  * the texture and mipmap bo object are big enough to cover this resource.
1456  */
1457 static int r600_check_texture_resource(struct radeon_cs_parser *p,  u32 idx,
1458 					      struct radeon_bo *texture,
1459 					      struct radeon_bo *mipmap,
1460 					      u64 base_offset,
1461 					      u64 mip_offset,
1462 					      u32 tiling_flags)
1463 {
1464 	struct r600_cs_track *track = p->track;
1465 	u32 dim, nfaces, llevel, blevel, w0, h0, d0;
1466 	u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5;
1467 	u32 height_align, pitch, pitch_align, depth_align;
1468 	u32 barray, larray;
1469 	u64 base_align;
1470 	struct array_mode_checker array_check;
1471 	u32 format;
1472 	bool is_array;
1473 
1474 	/* on legacy kernel we don't perform advanced check */
1475 	if (p->rdev == NULL)
1476 		return 0;
1477 
1478 	/* convert to bytes */
1479 	base_offset <<= 8;
1480 	mip_offset <<= 8;
1481 
1482 	word0 = radeon_get_ib_value(p, idx + 0);
1483 	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1484 		if (tiling_flags & RADEON_TILING_MACRO)
1485 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1486 		else if (tiling_flags & RADEON_TILING_MICRO)
1487 			word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1488 	}
1489 	word1 = radeon_get_ib_value(p, idx + 1);
1490 	word2 = radeon_get_ib_value(p, idx + 2) << 8;
1491 	word3 = radeon_get_ib_value(p, idx + 3) << 8;
1492 	word4 = radeon_get_ib_value(p, idx + 4);
1493 	word5 = radeon_get_ib_value(p, idx + 5);
1494 	dim = G_038000_DIM(word0);
1495 	w0 = G_038000_TEX_WIDTH(word0) + 1;
1496 	pitch = (G_038000_PITCH(word0) + 1) * 8;
1497 	h0 = G_038004_TEX_HEIGHT(word1) + 1;
1498 	d0 = G_038004_TEX_DEPTH(word1);
1499 	format = G_038004_DATA_FORMAT(word1);
1500 	blevel = G_038010_BASE_LEVEL(word4);
1501 	llevel = G_038014_LAST_LEVEL(word5);
1502 	/* pitch in texels */
1503 	array_check.array_mode = G_038000_TILE_MODE(word0);
1504 	array_check.group_size = track->group_size;
1505 	array_check.nbanks = track->nbanks;
1506 	array_check.npipes = track->npipes;
1507 	array_check.nsamples = 1;
1508 	array_check.blocksize = r600_fmt_get_blocksize(format);
1509 	nfaces = 1;
1510 	is_array = false;
1511 	switch (dim) {
1512 	case V_038000_SQ_TEX_DIM_1D:
1513 	case V_038000_SQ_TEX_DIM_2D:
1514 	case V_038000_SQ_TEX_DIM_3D:
1515 		break;
1516 	case V_038000_SQ_TEX_DIM_CUBEMAP:
1517 		if (p->family >= CHIP_RV770)
1518 			nfaces = 8;
1519 		else
1520 			nfaces = 6;
1521 		break;
1522 	case V_038000_SQ_TEX_DIM_1D_ARRAY:
1523 	case V_038000_SQ_TEX_DIM_2D_ARRAY:
1524 		is_array = true;
1525 		break;
1526 	case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
1527 		is_array = true;
1528 		/* fall through */
1529 	case V_038000_SQ_TEX_DIM_2D_MSAA:
1530 		array_check.nsamples = 1 << llevel;
1531 		llevel = 0;
1532 		break;
1533 	default:
1534 		dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
1535 		return -EINVAL;
1536 	}
1537 	if (!r600_fmt_is_valid_texture(format, p->family)) {
1538 		dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1539 			 __func__, __LINE__, format);
1540 		return -EINVAL;
1541 	}
1542 
1543 	if (r600_get_array_mode_alignment(&array_check,
1544 					  &pitch_align, &height_align, &depth_align, &base_align)) {
1545 		dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
1546 			 __func__, __LINE__, G_038000_TILE_MODE(word0));
1547 		return -EINVAL;
1548 	}
1549 
1550 	/* XXX check height as well... */
1551 
1552 	if (!IS_ALIGNED(pitch, pitch_align)) {
1553 		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1554 			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1555 		return -EINVAL;
1556 	}
1557 	if (!IS_ALIGNED(base_offset, base_align)) {
1558 		dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n",
1559 			 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
1560 		return -EINVAL;
1561 	}
1562 	if (!IS_ALIGNED(mip_offset, base_align)) {
1563 		dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n",
1564 			 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0));
1565 		return -EINVAL;
1566 	}
1567 
1568 	if (blevel > llevel) {
1569 		dev_warn(p->dev, "texture blevel %d > llevel %d\n",
1570 			 blevel, llevel);
1571 	}
1572 	if (is_array) {
1573 		barray = G_038014_BASE_ARRAY(word5);
1574 		larray = G_038014_LAST_ARRAY(word5);
1575 
1576 		nfaces = larray - barray + 1;
1577 	}
1578 	r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format,
1579 			  pitch_align, height_align, base_align,
1580 			  &l0_size, &mipmap_size);
1581 	/* using get ib will give us the offset into the texture bo */
1582 	if ((l0_size + word2) > radeon_bo_size(texture)) {
1583 		dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
1584 			 w0, h0, pitch_align, height_align,
1585 			 array_check.array_mode, format, word2,
1586 			 l0_size, radeon_bo_size(texture));
1587 		dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align);
1588 		return -EINVAL;
1589 	}
1590 	/* using get ib will give us the offset into the mipmap bo */
1591 	if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
1592 		/*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
1593 		  w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
1594 	}
1595 	return 0;
1596 }
1597 
1598 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1599 {
1600 	u32 m, i;
1601 
1602 	i = (reg >> 7);
1603 	if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
1604 		dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1605 		return false;
1606 	}
1607 	m = 1 << ((reg >> 2) & 31);
1608 	if (!(r600_reg_safe_bm[i] & m))
1609 		return true;
1610 	dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1611 	return false;
1612 }
1613 
1614 static int r600_packet3_check(struct radeon_cs_parser *p,
1615 				struct radeon_cs_packet *pkt)
1616 {
1617 	struct radeon_cs_reloc *reloc;
1618 	struct r600_cs_track *track;
1619 	volatile u32 *ib;
1620 	unsigned idx;
1621 	unsigned i;
1622 	unsigned start_reg, end_reg, reg;
1623 	int r;
1624 	u32 idx_value;
1625 
1626 	track = (struct r600_cs_track *)p->track;
1627 	ib = p->ib.ptr;
1628 	idx = pkt->idx + 1;
1629 	idx_value = radeon_get_ib_value(p, idx);
1630 
1631 	switch (pkt->opcode) {
1632 	case PACKET3_SET_PREDICATION:
1633 	{
1634 		int pred_op;
1635 		int tmp;
1636 		uint64_t offset;
1637 
1638 		if (pkt->count != 1) {
1639 			DRM_ERROR("bad SET PREDICATION\n");
1640 			return -EINVAL;
1641 		}
1642 
1643 		tmp = radeon_get_ib_value(p, idx + 1);
1644 		pred_op = (tmp >> 16) & 0x7;
1645 
1646 		/* for the clear predicate operation */
1647 		if (pred_op == 0)
1648 			return 0;
1649 
1650 		if (pred_op > 2) {
1651 			DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1652 			return -EINVAL;
1653 		}
1654 
1655 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1656 		if (r) {
1657 			DRM_ERROR("bad SET PREDICATION\n");
1658 			return -EINVAL;
1659 		}
1660 
1661 		offset = reloc->lobj.gpu_offset +
1662 		         (idx_value & 0xfffffff0) +
1663 		         ((u64)(tmp & 0xff) << 32);
1664 
1665 		ib[idx + 0] = offset;
1666 		ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1667 	}
1668 	break;
1669 
1670 	case PACKET3_START_3D_CMDBUF:
1671 		if (p->family >= CHIP_RV770 || pkt->count) {
1672 			DRM_ERROR("bad START_3D\n");
1673 			return -EINVAL;
1674 		}
1675 		break;
1676 	case PACKET3_CONTEXT_CONTROL:
1677 		if (pkt->count != 1) {
1678 			DRM_ERROR("bad CONTEXT_CONTROL\n");
1679 			return -EINVAL;
1680 		}
1681 		break;
1682 	case PACKET3_INDEX_TYPE:
1683 	case PACKET3_NUM_INSTANCES:
1684 		if (pkt->count) {
1685 			DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
1686 			return -EINVAL;
1687 		}
1688 		break;
1689 	case PACKET3_DRAW_INDEX:
1690 	{
1691 		uint64_t offset;
1692 		if (pkt->count != 3) {
1693 			DRM_ERROR("bad DRAW_INDEX\n");
1694 			return -EINVAL;
1695 		}
1696 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1697 		if (r) {
1698 			DRM_ERROR("bad DRAW_INDEX\n");
1699 			return -EINVAL;
1700 		}
1701 
1702 		offset = reloc->lobj.gpu_offset +
1703 		         idx_value +
1704 		         ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1705 
1706 		ib[idx+0] = offset;
1707 		ib[idx+1] = upper_32_bits(offset) & 0xff;
1708 
1709 		r = r600_cs_track_check(p);
1710 		if (r) {
1711 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1712 			return r;
1713 		}
1714 		break;
1715 	}
1716 	case PACKET3_DRAW_INDEX_AUTO:
1717 		if (pkt->count != 1) {
1718 			DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1719 			return -EINVAL;
1720 		}
1721 		r = r600_cs_track_check(p);
1722 		if (r) {
1723 			dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
1724 			return r;
1725 		}
1726 		break;
1727 	case PACKET3_DRAW_INDEX_IMMD_BE:
1728 	case PACKET3_DRAW_INDEX_IMMD:
1729 		if (pkt->count < 2) {
1730 			DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1731 			return -EINVAL;
1732 		}
1733 		r = r600_cs_track_check(p);
1734 		if (r) {
1735 			dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1736 			return r;
1737 		}
1738 		break;
1739 	case PACKET3_WAIT_REG_MEM:
1740 		if (pkt->count != 5) {
1741 			DRM_ERROR("bad WAIT_REG_MEM\n");
1742 			return -EINVAL;
1743 		}
1744 		/* bit 4 is reg (0) or mem (1) */
1745 		if (idx_value & 0x10) {
1746 			uint64_t offset;
1747 
1748 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1749 			if (r) {
1750 				DRM_ERROR("bad WAIT_REG_MEM\n");
1751 				return -EINVAL;
1752 			}
1753 
1754 			offset = reloc->lobj.gpu_offset +
1755 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff0) +
1756 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1757 
1758 			ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1759 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1760 		} else if (idx_value & 0x100) {
1761 			DRM_ERROR("cannot use PFP on REG wait\n");
1762 			return -EINVAL;
1763 		}
1764 		break;
1765 	case PACKET3_CP_DMA:
1766 	{
1767 		u32 command, size;
1768 		u64 offset, tmp;
1769 		if (pkt->count != 4) {
1770 			DRM_ERROR("bad CP DMA\n");
1771 			return -EINVAL;
1772 		}
1773 		command = radeon_get_ib_value(p, idx+4);
1774 		size = command & 0x1fffff;
1775 		if (command & PACKET3_CP_DMA_CMD_SAS) {
1776 			/* src address space is register */
1777 			DRM_ERROR("CP DMA SAS not supported\n");
1778 			return -EINVAL;
1779 		} else {
1780 			if (command & PACKET3_CP_DMA_CMD_SAIC) {
1781 				DRM_ERROR("CP DMA SAIC only supported for registers\n");
1782 				return -EINVAL;
1783 			}
1784 			/* src address space is memory */
1785 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1786 			if (r) {
1787 				DRM_ERROR("bad CP DMA SRC\n");
1788 				return -EINVAL;
1789 			}
1790 
1791 			tmp = radeon_get_ib_value(p, idx) +
1792 				((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1793 
1794 			offset = reloc->lobj.gpu_offset + tmp;
1795 
1796 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1797 				dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n",
1798 					 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
1799 				return -EINVAL;
1800 			}
1801 
1802 			ib[idx] = offset;
1803 			ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1804 		}
1805 		if (command & PACKET3_CP_DMA_CMD_DAS) {
1806 			/* dst address space is register */
1807 			DRM_ERROR("CP DMA DAS not supported\n");
1808 			return -EINVAL;
1809 		} else {
1810 			/* dst address space is memory */
1811 			if (command & PACKET3_CP_DMA_CMD_DAIC) {
1812 				DRM_ERROR("CP DMA DAIC only supported for registers\n");
1813 				return -EINVAL;
1814 			}
1815 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1816 			if (r) {
1817 				DRM_ERROR("bad CP DMA DST\n");
1818 				return -EINVAL;
1819 			}
1820 
1821 			tmp = radeon_get_ib_value(p, idx+2) +
1822 				((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1823 
1824 			offset = reloc->lobj.gpu_offset + tmp;
1825 
1826 			if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1827 				dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n",
1828 					 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj));
1829 				return -EINVAL;
1830 			}
1831 
1832 			ib[idx+2] = offset;
1833 			ib[idx+3] = upper_32_bits(offset) & 0xff;
1834 		}
1835 		break;
1836 	}
1837 	case PACKET3_SURFACE_SYNC:
1838 		if (pkt->count != 3) {
1839 			DRM_ERROR("bad SURFACE_SYNC\n");
1840 			return -EINVAL;
1841 		}
1842 		/* 0xffffffff/0x0 is flush all cache flag */
1843 		if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1844 		    radeon_get_ib_value(p, idx + 2) != 0) {
1845 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1846 			if (r) {
1847 				DRM_ERROR("bad SURFACE_SYNC\n");
1848 				return -EINVAL;
1849 			}
1850 			ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1851 		}
1852 		break;
1853 	case PACKET3_EVENT_WRITE:
1854 		if (pkt->count != 2 && pkt->count != 0) {
1855 			DRM_ERROR("bad EVENT_WRITE\n");
1856 			return -EINVAL;
1857 		}
1858 		if (pkt->count) {
1859 			uint64_t offset;
1860 
1861 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1862 			if (r) {
1863 				DRM_ERROR("bad EVENT_WRITE\n");
1864 				return -EINVAL;
1865 			}
1866 			offset = reloc->lobj.gpu_offset +
1867 			         (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
1868 			         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1869 
1870 			ib[idx+1] = offset & 0xfffffff8;
1871 			ib[idx+2] = upper_32_bits(offset) & 0xff;
1872 		}
1873 		break;
1874 	case PACKET3_EVENT_WRITE_EOP:
1875 	{
1876 		uint64_t offset;
1877 
1878 		if (pkt->count != 4) {
1879 			DRM_ERROR("bad EVENT_WRITE_EOP\n");
1880 			return -EINVAL;
1881 		}
1882 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1883 		if (r) {
1884 			DRM_ERROR("bad EVENT_WRITE\n");
1885 			return -EINVAL;
1886 		}
1887 
1888 		offset = reloc->lobj.gpu_offset +
1889 		         (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
1890 		         ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1891 
1892 		ib[idx+1] = offset & 0xfffffffc;
1893 		ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1894 		break;
1895 	}
1896 	case PACKET3_SET_CONFIG_REG:
1897 		start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
1898 		end_reg = 4 * pkt->count + start_reg - 4;
1899 		if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
1900 		    (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
1901 		    (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
1902 			DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1903 			return -EINVAL;
1904 		}
1905 		for (i = 0; i < pkt->count; i++) {
1906 			reg = start_reg + (4 * i);
1907 			r = r600_cs_check_reg(p, reg, idx+1+i);
1908 			if (r)
1909 				return r;
1910 		}
1911 		break;
1912 	case PACKET3_SET_CONTEXT_REG:
1913 		start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
1914 		end_reg = 4 * pkt->count + start_reg - 4;
1915 		if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
1916 		    (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
1917 		    (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
1918 			DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1919 			return -EINVAL;
1920 		}
1921 		for (i = 0; i < pkt->count; i++) {
1922 			reg = start_reg + (4 * i);
1923 			r = r600_cs_check_reg(p, reg, idx+1+i);
1924 			if (r)
1925 				return r;
1926 		}
1927 		break;
1928 	case PACKET3_SET_RESOURCE:
1929 		if (pkt->count % 7) {
1930 			DRM_ERROR("bad SET_RESOURCE\n");
1931 			return -EINVAL;
1932 		}
1933 		start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
1934 		end_reg = 4 * pkt->count + start_reg - 4;
1935 		if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
1936 		    (start_reg >= PACKET3_SET_RESOURCE_END) ||
1937 		    (end_reg >= PACKET3_SET_RESOURCE_END)) {
1938 			DRM_ERROR("bad SET_RESOURCE\n");
1939 			return -EINVAL;
1940 		}
1941 		for (i = 0; i < (pkt->count / 7); i++) {
1942 			struct radeon_bo *texture, *mipmap;
1943 			u32 size, offset, base_offset, mip_offset;
1944 
1945 			switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
1946 			case SQ_TEX_VTX_VALID_TEXTURE:
1947 				/* tex base */
1948 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1949 				if (r) {
1950 					DRM_ERROR("bad SET_RESOURCE\n");
1951 					return -EINVAL;
1952 				}
1953 				base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1954 				if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1955 					if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1956 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1957 					else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1958 						ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1959 				}
1960 				texture = reloc->robj;
1961 				/* tex mip base */
1962 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1963 				if (r) {
1964 					DRM_ERROR("bad SET_RESOURCE\n");
1965 					return -EINVAL;
1966 				}
1967 				mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1968 				mipmap = reloc->robj;
1969 				r = r600_check_texture_resource(p,  idx+(i*7)+1,
1970 								texture, mipmap,
1971 								base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2),
1972 								mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3),
1973 								reloc->lobj.tiling_flags);
1974 				if (r)
1975 					return r;
1976 				ib[idx+1+(i*7)+2] += base_offset;
1977 				ib[idx+1+(i*7)+3] += mip_offset;
1978 				break;
1979 			case SQ_TEX_VTX_VALID_BUFFER:
1980 			{
1981 				uint64_t offset64;
1982 				/* vtx base */
1983 				r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1984 				if (r) {
1985 					DRM_ERROR("bad SET_RESOURCE\n");
1986 					return -EINVAL;
1987 				}
1988 				offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
1989 				size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
1990 				if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
1991 					/* force size to size of the buffer */
1992 					dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
1993 						 size + offset, radeon_bo_size(reloc->robj));
1994 					ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
1995 				}
1996 
1997 				offset64 = reloc->lobj.gpu_offset + offset;
1998 				ib[idx+1+(i*8)+0] = offset64;
1999 				ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2000 						    (upper_32_bits(offset64) & 0xff);
2001 				break;
2002 			}
2003 			case SQ_TEX_VTX_INVALID_TEXTURE:
2004 			case SQ_TEX_VTX_INVALID_BUFFER:
2005 			default:
2006 				DRM_ERROR("bad SET_RESOURCE\n");
2007 				return -EINVAL;
2008 			}
2009 		}
2010 		break;
2011 	case PACKET3_SET_ALU_CONST:
2012 		if (track->sq_config & DX9_CONSTS) {
2013 			start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
2014 			end_reg = 4 * pkt->count + start_reg - 4;
2015 			if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
2016 			    (start_reg >= PACKET3_SET_ALU_CONST_END) ||
2017 			    (end_reg >= PACKET3_SET_ALU_CONST_END)) {
2018 				DRM_ERROR("bad SET_ALU_CONST\n");
2019 				return -EINVAL;
2020 			}
2021 		}
2022 		break;
2023 	case PACKET3_SET_BOOL_CONST:
2024 		start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
2025 		end_reg = 4 * pkt->count + start_reg - 4;
2026 		if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
2027 		    (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2028 		    (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2029 			DRM_ERROR("bad SET_BOOL_CONST\n");
2030 			return -EINVAL;
2031 		}
2032 		break;
2033 	case PACKET3_SET_LOOP_CONST:
2034 		start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
2035 		end_reg = 4 * pkt->count + start_reg - 4;
2036 		if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
2037 		    (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2038 		    (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2039 			DRM_ERROR("bad SET_LOOP_CONST\n");
2040 			return -EINVAL;
2041 		}
2042 		break;
2043 	case PACKET3_SET_CTL_CONST:
2044 		start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
2045 		end_reg = 4 * pkt->count + start_reg - 4;
2046 		if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
2047 		    (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2048 		    (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2049 			DRM_ERROR("bad SET_CTL_CONST\n");
2050 			return -EINVAL;
2051 		}
2052 		break;
2053 	case PACKET3_SET_SAMPLER:
2054 		if (pkt->count % 3) {
2055 			DRM_ERROR("bad SET_SAMPLER\n");
2056 			return -EINVAL;
2057 		}
2058 		start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
2059 		end_reg = 4 * pkt->count + start_reg - 4;
2060 		if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
2061 		    (start_reg >= PACKET3_SET_SAMPLER_END) ||
2062 		    (end_reg >= PACKET3_SET_SAMPLER_END)) {
2063 			DRM_ERROR("bad SET_SAMPLER\n");
2064 			return -EINVAL;
2065 		}
2066 		break;
2067 	case PACKET3_STRMOUT_BASE_UPDATE:
2068 		/* RS780 and RS880 also need this */
2069 		if (p->family < CHIP_RS780) {
2070 			DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2071 			return -EINVAL;
2072 		}
2073 		if (pkt->count != 1) {
2074 			DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2075 			return -EINVAL;
2076 		}
2077 		if (idx_value > 3) {
2078 			DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2079 			return -EINVAL;
2080 		}
2081 		{
2082 			u64 offset;
2083 
2084 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2085 			if (r) {
2086 				DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2087 				return -EINVAL;
2088 			}
2089 
2090 			if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2091 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2092 				return -EINVAL;
2093 			}
2094 
2095 			offset = radeon_get_ib_value(p, idx+1) << 8;
2096 			if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2097 				DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n",
2098 					  (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]);
2099 				return -EINVAL;
2100 			}
2101 
2102 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2103 				DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n",
2104 					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2105 				return -EINVAL;
2106 			}
2107 			ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2108 		}
2109 		break;
2110 	case PACKET3_SURFACE_BASE_UPDATE:
2111 		if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2112 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2113 			return -EINVAL;
2114 		}
2115 		if (pkt->count) {
2116 			DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
2117 			return -EINVAL;
2118 		}
2119 		break;
2120 	case PACKET3_STRMOUT_BUFFER_UPDATE:
2121 		if (pkt->count != 4) {
2122 			DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2123 			return -EINVAL;
2124 		}
2125 		/* Updating memory at DST_ADDRESS. */
2126 		if (idx_value & 0x1) {
2127 			u64 offset;
2128 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2129 			if (r) {
2130 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2131 				return -EINVAL;
2132 			}
2133 			offset = radeon_get_ib_value(p, idx+1);
2134 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2135 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2136 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n",
2137 					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2138 				return -EINVAL;
2139 			}
2140 			offset += reloc->lobj.gpu_offset;
2141 			ib[idx+1] = offset;
2142 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2143 		}
2144 		/* Reading data from SRC_ADDRESS. */
2145 		if (((idx_value >> 1) & 0x3) == 2) {
2146 			u64 offset;
2147 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2148 			if (r) {
2149 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2150 				return -EINVAL;
2151 			}
2152 			offset = radeon_get_ib_value(p, idx+3);
2153 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2154 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2155 				DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n",
2156 					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2157 				return -EINVAL;
2158 			}
2159 			offset += reloc->lobj.gpu_offset;
2160 			ib[idx+3] = offset;
2161 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2162 		}
2163 		break;
2164 	case PACKET3_MEM_WRITE:
2165 	{
2166 		u64 offset;
2167 
2168 		if (pkt->count != 3) {
2169 			DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2170 			return -EINVAL;
2171 		}
2172 		r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2173 		if (r) {
2174 			DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2175 			return -EINVAL;
2176 		}
2177 		offset = radeon_get_ib_value(p, idx+0);
2178 		offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2179 		if (offset & 0x7) {
2180 			DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2181 			return -EINVAL;
2182 		}
2183 		if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2184 			DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n",
2185 				  (uintmax_t)offset + 8, radeon_bo_size(reloc->robj));
2186 			return -EINVAL;
2187 		}
2188 		offset += reloc->lobj.gpu_offset;
2189 		ib[idx+0] = offset;
2190 		ib[idx+1] = upper_32_bits(offset) & 0xff;
2191 		break;
2192 	}
2193 	case PACKET3_COPY_DW:
2194 		if (pkt->count != 4) {
2195 			DRM_ERROR("bad COPY_DW (invalid count)\n");
2196 			return -EINVAL;
2197 		}
2198 		if (idx_value & 0x1) {
2199 			u64 offset;
2200 			/* SRC is memory. */
2201 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2202 			if (r) {
2203 				DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2204 				return -EINVAL;
2205 			}
2206 			offset = radeon_get_ib_value(p, idx+1);
2207 			offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2208 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2209 				DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n",
2210 					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2211 				return -EINVAL;
2212 			}
2213 			offset += reloc->lobj.gpu_offset;
2214 			ib[idx+1] = offset;
2215 			ib[idx+2] = upper_32_bits(offset) & 0xff;
2216 		} else {
2217 			/* SRC is a reg. */
2218 			reg = radeon_get_ib_value(p, idx+1) << 2;
2219 			if (!r600_is_safe_reg(p, reg, idx+1))
2220 				return -EINVAL;
2221 		}
2222 		if (idx_value & 0x2) {
2223 			u64 offset;
2224 			/* DST is memory. */
2225 			r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2226 			if (r) {
2227 				DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2228 				return -EINVAL;
2229 			}
2230 			offset = radeon_get_ib_value(p, idx+3);
2231 			offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2232 			if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2233 				DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n",
2234 					  (uintmax_t)offset + 4, radeon_bo_size(reloc->robj));
2235 				return -EINVAL;
2236 			}
2237 			offset += reloc->lobj.gpu_offset;
2238 			ib[idx+3] = offset;
2239 			ib[idx+4] = upper_32_bits(offset) & 0xff;
2240 		} else {
2241 			/* DST is a reg. */
2242 			reg = radeon_get_ib_value(p, idx+3) << 2;
2243 			if (!r600_is_safe_reg(p, reg, idx+3))
2244 				return -EINVAL;
2245 		}
2246 		break;
2247 	case PACKET3_NOP:
2248 		break;
2249 	default:
2250 		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2251 		return -EINVAL;
2252 	}
2253 	return 0;
2254 }
2255 
2256 int r600_cs_parse(struct radeon_cs_parser *p)
2257 {
2258 	struct radeon_cs_packet pkt;
2259 	struct r600_cs_track *track;
2260 	int r;
2261 
2262 	if (p->track == NULL) {
2263 		/* initialize tracker, we are in kms */
2264 		track = kzalloc(sizeof(*track), GFP_KERNEL);
2265 		if (track == NULL)
2266 			return -ENOMEM;
2267 		r600_cs_track_init(track);
2268 		if (p->rdev->family < CHIP_RV770) {
2269 			track->npipes = p->rdev->config.r600.tiling_npipes;
2270 			track->nbanks = p->rdev->config.r600.tiling_nbanks;
2271 			track->group_size = p->rdev->config.r600.tiling_group_size;
2272 		} else if (p->rdev->family <= CHIP_RV740) {
2273 			track->npipes = p->rdev->config.rv770.tiling_npipes;
2274 			track->nbanks = p->rdev->config.rv770.tiling_nbanks;
2275 			track->group_size = p->rdev->config.rv770.tiling_group_size;
2276 		}
2277 		p->track = track;
2278 	}
2279 	do {
2280 		r = radeon_cs_packet_parse(p, &pkt, p->idx);
2281 		if (r) {
2282 			kfree(p->track);
2283 			p->track = NULL;
2284 			return r;
2285 		}
2286 		p->idx += pkt.count + 2;
2287 		switch (pkt.type) {
2288 		case RADEON_PACKET_TYPE0:
2289 			r = r600_cs_parse_packet0(p, &pkt);
2290 			break;
2291 		case RADEON_PACKET_TYPE2:
2292 			break;
2293 		case RADEON_PACKET_TYPE3:
2294 			r = r600_packet3_check(p, &pkt);
2295 			break;
2296 		default:
2297 			DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2298 			kfree(p->track);
2299 			p->track = NULL;
2300 			return -EINVAL;
2301 		}
2302 		if (r) {
2303 			kfree(p->track);
2304 			p->track = NULL;
2305 			return r;
2306 		}
2307 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2308 #if 0
2309 	for (r = 0; r < p->ib.length_dw; r++) {
2310 		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
2311 		mdelay(1);
2312 	}
2313 #endif
2314 	kfree(p->track);
2315 	p->track = NULL;
2316 	return 0;
2317 }
2318 
2319 #ifdef CONFIG_DRM_RADEON_UMS
2320 
2321 /**
2322  * cs_parser_fini() - clean parser states
2323  * @parser:	parser structure holding parsing context.
2324  * @error:	error number
2325  *
2326  * If error is set than unvalidate buffer, otherwise just free memory
2327  * used by parsing context.
2328  **/
2329 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2330 {
2331 	unsigned i;
2332 
2333 	kfree(parser->relocs);
2334 	for (i = 0; i < parser->nchunks; i++) {
2335 		kfree(parser->chunks[i].kdata);
2336 		if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2337 			kfree(parser->chunks[i].kpage[0]);
2338 			kfree(parser->chunks[i].kpage[1]);
2339 		}
2340 	}
2341 	kfree(parser->chunks);
2342 	kfree(parser->chunks_array);
2343 	kfree(parser->track);
2344 }
2345 
2346 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2347 {
2348 	if (p->chunk_relocs_idx == -1) {
2349 		return 0;
2350 	}
2351 	p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2352 	if (p->relocs == NULL) {
2353 		return -ENOMEM;
2354 	}
2355 	return 0;
2356 }
2357 
2358 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2359 			unsigned family, u32 *ib, int *l)
2360 {
2361 	struct radeon_cs_parser parser;
2362 	struct radeon_cs_chunk *ib_chunk;
2363 	struct r600_cs_track *track;
2364 	int r;
2365 
2366 	/* initialize tracker */
2367 	track = kzalloc(sizeof(*track), GFP_KERNEL);
2368 	if (track == NULL)
2369 		return -ENOMEM;
2370 	r600_cs_track_init(track);
2371 	r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
2372 	/* initialize parser */
2373 	memset(&parser, 0, sizeof(struct radeon_cs_parser));
2374 	parser.filp = filp;
2375 	parser.dev = dev->dev;
2376 	parser.rdev = NULL;
2377 	parser.family = family;
2378 	parser.track = track;
2379 	parser.ib.ptr = ib;
2380 	r = radeon_cs_parser_init(&parser, data);
2381 	if (r) {
2382 		DRM_ERROR("Failed to initialize parser !\n");
2383 		r600_cs_parser_fini(&parser, r);
2384 		return r;
2385 	}
2386 	r = r600_cs_parser_relocs_legacy(&parser);
2387 	if (r) {
2388 		DRM_ERROR("Failed to parse relocation !\n");
2389 		r600_cs_parser_fini(&parser, r);
2390 		return r;
2391 	}
2392 	/* Copy the packet into the IB, the parser will read from the
2393 	 * input memory (cached) and write to the IB (which can be
2394 	 * uncached). */
2395 	ib_chunk = &parser.chunks[parser.chunk_ib_idx];
2396 	parser.ib.length_dw = ib_chunk->length_dw;
2397 	*l = parser.ib.length_dw;
2398 	r = r600_cs_parse(&parser);
2399 	if (r) {
2400 		DRM_ERROR("Invalid command stream !\n");
2401 		r600_cs_parser_fini(&parser, r);
2402 		return r;
2403 	}
2404 	r = radeon_cs_finish_pages(&parser);
2405 	if (r) {
2406 		DRM_ERROR("Invalid command stream !\n");
2407 		r600_cs_parser_fini(&parser, r);
2408 		return r;
2409 	}
2410 	r600_cs_parser_fini(&parser, r);
2411 	return r;
2412 }
2413 
2414 void r600_cs_legacy_init(void)
2415 {
2416 	r600_nomm = 1;
2417 }
2418 
2419 #endif
2420 
2421 /*
2422  *  DMA
2423  */
2424 /**
2425  * r600_dma_cs_next_reloc() - parse next reloc
2426  * @p:		parser structure holding parsing context.
2427  * @cs_reloc:		reloc informations
2428  *
2429  * Return the next reloc, do bo validation and compute
2430  * GPU offset using the provided start.
2431  **/
2432 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2433 			   struct radeon_cs_reloc **cs_reloc)
2434 {
2435 	struct radeon_cs_chunk *relocs_chunk;
2436 	unsigned idx;
2437 
2438 	*cs_reloc = NULL;
2439 	if (p->chunk_relocs_idx == -1) {
2440 		DRM_ERROR("No relocation chunk !\n");
2441 		return -EINVAL;
2442 	}
2443 	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2444 	idx = p->dma_reloc_idx;
2445 	if (idx >= p->nrelocs) {
2446 		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2447 			  idx, p->nrelocs);
2448 		return -EINVAL;
2449 	}
2450 	*cs_reloc = p->relocs_ptr[idx];
2451 	p->dma_reloc_idx++;
2452 	return 0;
2453 }
2454 
2455 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2456 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2457 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2458 
2459 /**
2460  * r600_dma_cs_parse() - parse the DMA IB
2461  * @p:		parser structure holding parsing context.
2462  *
2463  * Parses the DMA IB from the CS ioctl and updates
2464  * the GPU addresses based on the reloc information and
2465  * checks for errors. (R6xx-R7xx)
2466  * Returns 0 for success and an error on failure.
2467  **/
2468 int r600_dma_cs_parse(struct radeon_cs_parser *p)
2469 {
2470 	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2471 	struct radeon_cs_reloc *src_reloc, *dst_reloc;
2472 	u32 header, cmd, count, tiled;
2473 	volatile u32 *ib = p->ib.ptr;
2474 	u32 idx, idx_value;
2475 	u64 src_offset, dst_offset;
2476 	int r;
2477 
2478 	do {
2479 		if (p->idx >= ib_chunk->length_dw) {
2480 			DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2481 				  p->idx, ib_chunk->length_dw);
2482 			return -EINVAL;
2483 		}
2484 		idx = p->idx;
2485 		header = radeon_get_ib_value(p, idx);
2486 		cmd = GET_DMA_CMD(header);
2487 		count = GET_DMA_COUNT(header);
2488 		tiled = GET_DMA_T(header);
2489 
2490 		switch (cmd) {
2491 		case DMA_PACKET_WRITE:
2492 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2493 			if (r) {
2494 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2495 				return -EINVAL;
2496 			}
2497 			if (tiled) {
2498 				dst_offset = radeon_get_ib_value(p, idx+1);
2499 				dst_offset <<= 8;
2500 
2501 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2502 				p->idx += count + 5;
2503 			} else {
2504 				dst_offset = radeon_get_ib_value(p, idx+1);
2505 				dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2506 
2507 				ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2508 				ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2509 				p->idx += count + 3;
2510 			}
2511 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2512 				dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n",
2513 					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2514 				return -EINVAL;
2515 			}
2516 			break;
2517 		case DMA_PACKET_COPY:
2518 			r = r600_dma_cs_next_reloc(p, &src_reloc);
2519 			if (r) {
2520 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2521 				return -EINVAL;
2522 			}
2523 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2524 			if (r) {
2525 				DRM_ERROR("bad DMA_PACKET_COPY\n");
2526 				return -EINVAL;
2527 			}
2528 			if (tiled) {
2529 				idx_value = radeon_get_ib_value(p, idx + 2);
2530 				/* detile bit */
2531 				if (idx_value & (1 << 31)) {
2532 					/* tiled src, linear dst */
2533 					src_offset = radeon_get_ib_value(p, idx+1);
2534 					src_offset <<= 8;
2535 					ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2536 
2537 					dst_offset = radeon_get_ib_value(p, idx+5);
2538 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2539 					ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2540 					ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2541 				} else {
2542 					/* linear src, tiled dst */
2543 					src_offset = radeon_get_ib_value(p, idx+5);
2544 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
2545 					ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2546 					ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2547 
2548 					dst_offset = radeon_get_ib_value(p, idx+1);
2549 					dst_offset <<= 8;
2550 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2551 				}
2552 				p->idx += 7;
2553 			} else {
2554 				if (p->family >= CHIP_RV770) {
2555 					src_offset = radeon_get_ib_value(p, idx+2);
2556 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2557 					dst_offset = radeon_get_ib_value(p, idx+1);
2558 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2559 
2560 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2561 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2562 					ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2563 					ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2564 					p->idx += 5;
2565 				} else {
2566 					src_offset = radeon_get_ib_value(p, idx+2);
2567 					src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2568 					dst_offset = radeon_get_ib_value(p, idx+1);
2569 					dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16;
2570 
2571 					ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2572 					ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2573 					ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2574 					ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2575 					p->idx += 4;
2576 				}
2577 			}
2578 			if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2579 				dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n",
2580 					 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2581 				return -EINVAL;
2582 			}
2583 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2584 				dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n",
2585 					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2586 				return -EINVAL;
2587 			}
2588 			break;
2589 		case DMA_PACKET_CONSTANT_FILL:
2590 			if (p->family < CHIP_RV770) {
2591 				DRM_ERROR("Constant Fill is 7xx only !\n");
2592 				return -EINVAL;
2593 			}
2594 			r = r600_dma_cs_next_reloc(p, &dst_reloc);
2595 			if (r) {
2596 				DRM_ERROR("bad DMA_PACKET_WRITE\n");
2597 				return -EINVAL;
2598 			}
2599 			dst_offset = radeon_get_ib_value(p, idx+1);
2600 			dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
2601 			if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2602 				dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n",
2603 					 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2604 				return -EINVAL;
2605 			}
2606 			ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2607 			ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2608 			p->idx += 4;
2609 			break;
2610 		case DMA_PACKET_NOP:
2611 			p->idx += 1;
2612 			break;
2613 		default:
2614 			DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2615 			return -EINVAL;
2616 		}
2617 	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2618 #if 0
2619 	for (r = 0; r < p->ib->length_dw; r++) {
2620 		DRM_INFO("%05d  0x%08X\n", r, p->ib.ptr[r]);
2621 		mdelay(1);
2622 	}
2623 #endif
2624 	return 0;
2625 }
2626