xref: /dragonfly/sys/dev/drm/radeon/r200.c (revision 279dd846)
1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include <drm/drmP.h>
29 #include <uapi_drm/radeon_drm.h>
30 #include "radeon_reg.h"
31 #include "radeon.h"
32 #include "radeon_asic.h"
33 
34 #include "r100d.h"
35 #include "r200_reg_safe.h"
36 
37 #include "r100_track.h"
38 
39 static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
40 {
41 	int vtx_size, i;
42 	vtx_size = 2;
43 
44 	if (vtx_fmt_0 & R200_VTX_Z0)
45 		vtx_size++;
46 	if (vtx_fmt_0 & R200_VTX_W0)
47 		vtx_size++;
48 	/* blend weight */
49 	if (vtx_fmt_0 & (0x7 << R200_VTX_WEIGHT_COUNT_SHIFT))
50 		vtx_size += (vtx_fmt_0 >> R200_VTX_WEIGHT_COUNT_SHIFT) & 0x7;
51 	if (vtx_fmt_0 & R200_VTX_PV_MATRIX_SEL)
52 		vtx_size++;
53 	if (vtx_fmt_0 & R200_VTX_N0)
54 		vtx_size += 3;
55 	if (vtx_fmt_0 & R200_VTX_POINT_SIZE)
56 		vtx_size++;
57 	if (vtx_fmt_0 & R200_VTX_DISCRETE_FOG)
58 		vtx_size++;
59 	if (vtx_fmt_0 & R200_VTX_SHININESS_0)
60 		vtx_size++;
61 	if (vtx_fmt_0 & R200_VTX_SHININESS_1)
62 		vtx_size++;
63 	for (i = 0; i < 8; i++) {
64 		int color_size = (vtx_fmt_0 >> (11 + 2*i)) & 0x3;
65 		switch (color_size) {
66 		case 0: break;
67 		case 1: vtx_size++; break;
68 		case 2: vtx_size += 3; break;
69 		case 3: vtx_size += 4; break;
70 		}
71 	}
72 	if (vtx_fmt_0 & R200_VTX_XY1)
73 		vtx_size += 2;
74 	if (vtx_fmt_0 & R200_VTX_Z1)
75 		vtx_size++;
76 	if (vtx_fmt_0 & R200_VTX_W1)
77 		vtx_size++;
78 	if (vtx_fmt_0 & R200_VTX_N1)
79 		vtx_size += 3;
80 	return vtx_size;
81 }
82 
83 int r200_copy_dma(struct radeon_device *rdev,
84 		  uint64_t src_offset,
85 		  uint64_t dst_offset,
86 		  unsigned num_gpu_pages,
87 		  struct radeon_fence **fence)
88 {
89 	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90 	uint32_t size;
91 	uint32_t cur_size;
92 	int i, num_loops;
93 	int r = 0;
94 
95 	/* radeon pitch is /64 */
96 	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97 	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98 	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
99 	if (r) {
100 		DRM_ERROR("radeon: moving bo (%d).\n", r);
101 		return r;
102 	}
103 	/* Must wait for 2D idle & clean before DMA or hangs might happen */
104 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
105 	radeon_ring_write(ring, (1 << 16));
106 	for (i = 0; i < num_loops; i++) {
107 		cur_size = size;
108 		if (cur_size > 0x1FFFFF) {
109 			cur_size = 0x1FFFFF;
110 		}
111 		size -= cur_size;
112 		radeon_ring_write(ring, PACKET0(0x720, 2));
113 		radeon_ring_write(ring, src_offset);
114 		radeon_ring_write(ring, dst_offset);
115 		radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
116 		src_offset += cur_size;
117 		dst_offset += cur_size;
118 	}
119 	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
120 	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
121 	if (fence) {
122 		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
123 	}
124 	radeon_ring_unlock_commit(rdev, ring, false);
125 	return r;
126 }
127 
128 
129 static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
130 {
131 	int vtx_size, i, tex_size;
132 	vtx_size = 0;
133 	for (i = 0; i < 6; i++) {
134 		tex_size = (vtx_fmt_1 >> (i * 3)) & 0x7;
135 		if (tex_size > 4)
136 			continue;
137 		vtx_size += tex_size;
138 	}
139 	return vtx_size;
140 }
141 
142 int r200_packet0_check(struct radeon_cs_parser *p,
143 		       struct radeon_cs_packet *pkt,
144 		       unsigned idx, unsigned reg)
145 {
146 	struct radeon_cs_reloc *reloc;
147 	struct r100_cs_track *track;
148 	volatile uint32_t *ib;
149 	uint32_t tmp;
150 	int r;
151 	int i;
152 	int face;
153 	u32 tile_flags = 0;
154 	u32 idx_value;
155 
156 	ib = p->ib.ptr;
157 	track = (struct r100_cs_track *)p->track;
158 	idx_value = radeon_get_ib_value(p, idx);
159 	switch (reg) {
160 	case RADEON_CRTC_GUI_TRIG_VLINE:
161 		r = r100_cs_packet_parse_vline(p);
162 		if (r) {
163 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
164 				  idx, reg);
165 			radeon_cs_dump_packet(p, pkt);
166 			return r;
167 		}
168 		break;
169 		/* FIXME: only allow PACKET3 blit? easier to check for out of
170 		 * range access */
171 	case RADEON_DST_PITCH_OFFSET:
172 	case RADEON_SRC_PITCH_OFFSET:
173 		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
174 		if (r)
175 			return r;
176 		break;
177 	case RADEON_RB3D_DEPTHOFFSET:
178 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
179 		if (r) {
180 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
181 				  idx, reg);
182 			radeon_cs_dump_packet(p, pkt);
183 			return r;
184 		}
185 		track->zb.robj = reloc->robj;
186 		track->zb.offset = idx_value;
187 		track->zb_dirty = true;
188 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
189 		break;
190 	case RADEON_RB3D_COLOROFFSET:
191 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
192 		if (r) {
193 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
194 				  idx, reg);
195 			radeon_cs_dump_packet(p, pkt);
196 			return r;
197 		}
198 		track->cb[0].robj = reloc->robj;
199 		track->cb[0].offset = idx_value;
200 		track->cb_dirty = true;
201 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
202 		break;
203 	case R200_PP_TXOFFSET_0:
204 	case R200_PP_TXOFFSET_1:
205 	case R200_PP_TXOFFSET_2:
206 	case R200_PP_TXOFFSET_3:
207 	case R200_PP_TXOFFSET_4:
208 	case R200_PP_TXOFFSET_5:
209 		i = (reg - R200_PP_TXOFFSET_0) / 24;
210 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
211 		if (r) {
212 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
213 				  idx, reg);
214 			radeon_cs_dump_packet(p, pkt);
215 			return r;
216 		}
217 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
218 			if (reloc->tiling_flags & RADEON_TILING_MACRO)
219 				tile_flags |= R200_TXO_MACRO_TILE;
220 			if (reloc->tiling_flags & RADEON_TILING_MICRO)
221 				tile_flags |= R200_TXO_MICRO_TILE;
222 
223 			tmp = idx_value & ~(0x7 << 2);
224 			tmp |= tile_flags;
225 			ib[idx] = tmp + ((u32)reloc->gpu_offset);
226 		} else
227 			ib[idx] = idx_value + ((u32)reloc->gpu_offset);
228 		track->textures[i].robj = reloc->robj;
229 		track->tex_dirty = true;
230 		break;
231 	case R200_PP_CUBIC_OFFSET_F1_0:
232 	case R200_PP_CUBIC_OFFSET_F2_0:
233 	case R200_PP_CUBIC_OFFSET_F3_0:
234 	case R200_PP_CUBIC_OFFSET_F4_0:
235 	case R200_PP_CUBIC_OFFSET_F5_0:
236 	case R200_PP_CUBIC_OFFSET_F1_1:
237 	case R200_PP_CUBIC_OFFSET_F2_1:
238 	case R200_PP_CUBIC_OFFSET_F3_1:
239 	case R200_PP_CUBIC_OFFSET_F4_1:
240 	case R200_PP_CUBIC_OFFSET_F5_1:
241 	case R200_PP_CUBIC_OFFSET_F1_2:
242 	case R200_PP_CUBIC_OFFSET_F2_2:
243 	case R200_PP_CUBIC_OFFSET_F3_2:
244 	case R200_PP_CUBIC_OFFSET_F4_2:
245 	case R200_PP_CUBIC_OFFSET_F5_2:
246 	case R200_PP_CUBIC_OFFSET_F1_3:
247 	case R200_PP_CUBIC_OFFSET_F2_3:
248 	case R200_PP_CUBIC_OFFSET_F3_3:
249 	case R200_PP_CUBIC_OFFSET_F4_3:
250 	case R200_PP_CUBIC_OFFSET_F5_3:
251 	case R200_PP_CUBIC_OFFSET_F1_4:
252 	case R200_PP_CUBIC_OFFSET_F2_4:
253 	case R200_PP_CUBIC_OFFSET_F3_4:
254 	case R200_PP_CUBIC_OFFSET_F4_4:
255 	case R200_PP_CUBIC_OFFSET_F5_4:
256 	case R200_PP_CUBIC_OFFSET_F1_5:
257 	case R200_PP_CUBIC_OFFSET_F2_5:
258 	case R200_PP_CUBIC_OFFSET_F3_5:
259 	case R200_PP_CUBIC_OFFSET_F4_5:
260 	case R200_PP_CUBIC_OFFSET_F5_5:
261 		i = (reg - R200_PP_TXOFFSET_0) / 24;
262 		face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
263 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
264 		if (r) {
265 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
266 				  idx, reg);
267 			radeon_cs_dump_packet(p, pkt);
268 			return r;
269 		}
270 		track->textures[i].cube_info[face - 1].offset = idx_value;
271 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
272 		track->textures[i].cube_info[face - 1].robj = reloc->robj;
273 		track->tex_dirty = true;
274 		break;
275 	case RADEON_RE_WIDTH_HEIGHT:
276 		track->maxy = ((idx_value >> 16) & 0x7FF);
277 		track->cb_dirty = true;
278 		track->zb_dirty = true;
279 		break;
280 	case RADEON_RB3D_COLORPITCH:
281 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
282 		if (r) {
283 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
284 				  idx, reg);
285 			radeon_cs_dump_packet(p, pkt);
286 			return r;
287 		}
288 
289 		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
290 			if (reloc->tiling_flags & RADEON_TILING_MACRO)
291 				tile_flags |= RADEON_COLOR_TILE_ENABLE;
292 			if (reloc->tiling_flags & RADEON_TILING_MICRO)
293 				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
294 
295 			tmp = idx_value & ~(0x7 << 16);
296 			tmp |= tile_flags;
297 			ib[idx] = tmp;
298 		} else
299 			ib[idx] = idx_value;
300 
301 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
302 		track->cb_dirty = true;
303 		break;
304 	case RADEON_RB3D_DEPTHPITCH:
305 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
306 		track->zb_dirty = true;
307 		break;
308 	case RADEON_RB3D_CNTL:
309 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
310 		case 7:
311 		case 8:
312 		case 9:
313 		case 11:
314 		case 12:
315 			track->cb[0].cpp = 1;
316 			break;
317 		case 3:
318 		case 4:
319 		case 15:
320 			track->cb[0].cpp = 2;
321 			break;
322 		case 6:
323 			track->cb[0].cpp = 4;
324 			break;
325 		default:
326 			DRM_ERROR("Invalid color buffer format (%d) !\n",
327 				  ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
328 			return -EINVAL;
329 		}
330 		if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
331 			DRM_ERROR("No support for depth xy offset in kms\n");
332 			return -EINVAL;
333 		}
334 
335 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
336 		track->cb_dirty = true;
337 		track->zb_dirty = true;
338 		break;
339 	case RADEON_RB3D_ZSTENCILCNTL:
340 		switch (idx_value & 0xf) {
341 		case 0:
342 			track->zb.cpp = 2;
343 			break;
344 		case 2:
345 		case 3:
346 		case 4:
347 		case 5:
348 		case 9:
349 		case 11:
350 			track->zb.cpp = 4;
351 			break;
352 		default:
353 			break;
354 		}
355 		track->zb_dirty = true;
356 		break;
357 	case RADEON_RB3D_ZPASS_ADDR:
358 		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
359 		if (r) {
360 			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
361 				  idx, reg);
362 			radeon_cs_dump_packet(p, pkt);
363 			return r;
364 		}
365 		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
366 		break;
367 	case RADEON_PP_CNTL:
368 		{
369 			uint32_t temp = idx_value >> 4;
370 			for (i = 0; i < track->num_texture; i++)
371 				track->textures[i].enabled = !!(temp & (1 << i));
372 			track->tex_dirty = true;
373 		}
374 		break;
375 	case RADEON_SE_VF_CNTL:
376 		track->vap_vf_cntl = idx_value;
377 		break;
378 	case 0x210c:
379 		/* VAP_VF_MAX_VTX_INDX */
380 		track->max_indx = idx_value & 0x00FFFFFFUL;
381 		break;
382 	case R200_SE_VTX_FMT_0:
383 		track->vtx_size = r200_get_vtx_size_0(idx_value);
384 		break;
385 	case R200_SE_VTX_FMT_1:
386 		track->vtx_size += r200_get_vtx_size_1(idx_value);
387 		break;
388 	case R200_PP_TXSIZE_0:
389 	case R200_PP_TXSIZE_1:
390 	case R200_PP_TXSIZE_2:
391 	case R200_PP_TXSIZE_3:
392 	case R200_PP_TXSIZE_4:
393 	case R200_PP_TXSIZE_5:
394 		i = (reg - R200_PP_TXSIZE_0) / 32;
395 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
396 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
397 		track->tex_dirty = true;
398 		break;
399 	case R200_PP_TXPITCH_0:
400 	case R200_PP_TXPITCH_1:
401 	case R200_PP_TXPITCH_2:
402 	case R200_PP_TXPITCH_3:
403 	case R200_PP_TXPITCH_4:
404 	case R200_PP_TXPITCH_5:
405 		i = (reg - R200_PP_TXPITCH_0) / 32;
406 		track->textures[i].pitch = idx_value + 32;
407 		track->tex_dirty = true;
408 		break;
409 	case R200_PP_TXFILTER_0:
410 	case R200_PP_TXFILTER_1:
411 	case R200_PP_TXFILTER_2:
412 	case R200_PP_TXFILTER_3:
413 	case R200_PP_TXFILTER_4:
414 	case R200_PP_TXFILTER_5:
415 		i = (reg - R200_PP_TXFILTER_0) / 32;
416 		track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
417 						 >> R200_MAX_MIP_LEVEL_SHIFT);
418 		tmp = (idx_value >> 23) & 0x7;
419 		if (tmp == 2 || tmp == 6)
420 			track->textures[i].roundup_w = false;
421 		tmp = (idx_value >> 27) & 0x7;
422 		if (tmp == 2 || tmp == 6)
423 			track->textures[i].roundup_h = false;
424 		track->tex_dirty = true;
425 		break;
426 	case R200_PP_TXMULTI_CTL_0:
427 	case R200_PP_TXMULTI_CTL_1:
428 	case R200_PP_TXMULTI_CTL_2:
429 	case R200_PP_TXMULTI_CTL_3:
430 	case R200_PP_TXMULTI_CTL_4:
431 	case R200_PP_TXMULTI_CTL_5:
432 		i = (reg - R200_PP_TXMULTI_CTL_0) / 32;
433 		break;
434 	case R200_PP_TXFORMAT_X_0:
435 	case R200_PP_TXFORMAT_X_1:
436 	case R200_PP_TXFORMAT_X_2:
437 	case R200_PP_TXFORMAT_X_3:
438 	case R200_PP_TXFORMAT_X_4:
439 	case R200_PP_TXFORMAT_X_5:
440 		i = (reg - R200_PP_TXFORMAT_X_0) / 32;
441 		track->textures[i].txdepth = idx_value & 0x7;
442 		tmp = (idx_value >> 16) & 0x3;
443 		/* 2D, 3D, CUBE */
444 		switch (tmp) {
445 		case 0:
446 		case 3:
447 		case 4:
448 		case 5:
449 		case 6:
450 		case 7:
451 			/* 1D/2D */
452 			track->textures[i].tex_coord_type = 0;
453 			break;
454 		case 1:
455 			/* CUBE */
456 			track->textures[i].tex_coord_type = 2;
457 			break;
458 		case 2:
459 			/* 3D */
460 			track->textures[i].tex_coord_type = 1;
461 			break;
462 		}
463 		track->tex_dirty = true;
464 		break;
465 	case R200_PP_TXFORMAT_0:
466 	case R200_PP_TXFORMAT_1:
467 	case R200_PP_TXFORMAT_2:
468 	case R200_PP_TXFORMAT_3:
469 	case R200_PP_TXFORMAT_4:
470 	case R200_PP_TXFORMAT_5:
471 		i = (reg - R200_PP_TXFORMAT_0) / 32;
472 		if (idx_value & R200_TXFORMAT_NON_POWER2) {
473 			track->textures[i].use_pitch = 1;
474 		} else {
475 			track->textures[i].use_pitch = 0;
476 			track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
477 			track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
478 		}
479 		if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE)
480 			track->textures[i].lookup_disable = true;
481 		switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
482 		case R200_TXFORMAT_I8:
483 		case R200_TXFORMAT_RGB332:
484 		case R200_TXFORMAT_Y8:
485 			track->textures[i].cpp = 1;
486 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
487 			break;
488 		case R200_TXFORMAT_AI88:
489 		case R200_TXFORMAT_ARGB1555:
490 		case R200_TXFORMAT_RGB565:
491 		case R200_TXFORMAT_ARGB4444:
492 		case R200_TXFORMAT_VYUY422:
493 		case R200_TXFORMAT_YVYU422:
494 		case R200_TXFORMAT_LDVDU655:
495 		case R200_TXFORMAT_DVDU88:
496 		case R200_TXFORMAT_AVYU4444:
497 			track->textures[i].cpp = 2;
498 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
499 			break;
500 		case R200_TXFORMAT_ARGB8888:
501 		case R200_TXFORMAT_RGBA8888:
502 		case R200_TXFORMAT_ABGR8888:
503 		case R200_TXFORMAT_BGR111110:
504 		case R200_TXFORMAT_LDVDU8888:
505 			track->textures[i].cpp = 4;
506 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
507 			break;
508 		case R200_TXFORMAT_DXT1:
509 			track->textures[i].cpp = 1;
510 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
511 			break;
512 		case R200_TXFORMAT_DXT23:
513 		case R200_TXFORMAT_DXT45:
514 			track->textures[i].cpp = 1;
515 			track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
516 			break;
517 		}
518 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
519 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
520 		track->tex_dirty = true;
521 		break;
522 	case R200_PP_CUBIC_FACES_0:
523 	case R200_PP_CUBIC_FACES_1:
524 	case R200_PP_CUBIC_FACES_2:
525 	case R200_PP_CUBIC_FACES_3:
526 	case R200_PP_CUBIC_FACES_4:
527 	case R200_PP_CUBIC_FACES_5:
528 		tmp = idx_value;
529 		i = (reg - R200_PP_CUBIC_FACES_0) / 32;
530 		for (face = 0; face < 4; face++) {
531 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
532 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
533 		}
534 		track->tex_dirty = true;
535 		break;
536 	default:
537 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
538 		       reg, idx);
539 		return -EINVAL;
540 	}
541 	return 0;
542 }
543 
544 void r200_set_safe_registers(struct radeon_device *rdev)
545 {
546 	rdev->config.r100.reg_safe_bm = r200_reg_safe_bm;
547 	rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm);
548 }
549