1 /* $OpenBSD: r600_cs.c,v 1.9 2018/04/20 16:09:37 deraadt Exp $ */ 2 /* 3 * Copyright 2008 Advanced Micro Devices, Inc. 4 * Copyright 2008 Red Hat Inc. 5 * Copyright 2009 Jerome Glisse. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Dave Airlie 26 * Alex Deucher 27 * Jerome Glisse 28 */ 29 #include <dev/pci/drm/drmP.h> 30 #include "radeon.h" 31 #include "r600d.h" 32 #include "r600_reg_safe.h" 33 34 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 35 struct radeon_cs_reloc **cs_reloc); 36 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 37 struct radeon_cs_reloc **cs_reloc); 38 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 39 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 40 extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); 41 42 43 struct r600_cs_track { 44 /* configuration we miror so that we use same code btw kms/ums */ 45 u32 group_size; 46 u32 nbanks; 47 u32 npipes; 48 /* value we track */ 49 u32 sq_config; 50 u32 log_nsamples; 51 u32 nsamples; 52 u32 cb_color_base_last[8]; 53 struct radeon_bo *cb_color_bo[8]; 54 u64 cb_color_bo_mc[8]; 55 u64 cb_color_bo_offset[8]; 56 struct radeon_bo *cb_color_frag_bo[8]; 57 u64 cb_color_frag_offset[8]; 58 struct radeon_bo *cb_color_tile_bo[8]; 59 u64 cb_color_tile_offset[8]; 60 u32 cb_color_mask[8]; 61 u32 cb_color_info[8]; 62 u32 cb_color_view[8]; 63 u32 cb_color_size_idx[8]; /* unused */ 64 u32 cb_target_mask; 65 u32 cb_shader_mask; /* unused */ 66 bool is_resolve; 67 u32 cb_color_size[8]; 68 u32 vgt_strmout_en; 69 u32 vgt_strmout_buffer_en; 70 struct radeon_bo *vgt_strmout_bo[4]; 71 u64 vgt_strmout_bo_mc[4]; /* unused */ 72 u32 vgt_strmout_bo_offset[4]; 73 u32 vgt_strmout_size[4]; 74 u32 db_depth_control; 75 u32 db_depth_info; 76 u32 db_depth_size_idx; 77 u32 db_depth_view; 78 u32 db_depth_size; 79 u32 db_offset; 80 struct radeon_bo *db_bo; 81 u64 db_bo_mc; 82 bool sx_misc_kill_all_prims; 83 bool cb_dirty; 84 bool db_dirty; 85 bool streamout_dirty; 86 struct radeon_bo *htile_bo; 87 u64 htile_offset; 88 u32 htile_surface; 89 }; 90 91 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 92 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 93 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } 94 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 95 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } 96 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 97 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 98 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 99 100 struct gpu_formats { 101 unsigned blockwidth; 102 unsigned blockheight; 103 unsigned blocksize; 104 unsigned valid_color; 105 enum radeon_family min_family; 106 }; 107 108 static const struct gpu_formats color_formats_table[] = { 109 /* 8 bit */ 110 FMT_8_BIT(V_038004_COLOR_8, 1), 111 FMT_8_BIT(V_038004_COLOR_4_4, 1), 112 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 113 FMT_8_BIT(V_038004_FMT_1, 0), 114 115 /* 16-bit */ 116 FMT_16_BIT(V_038004_COLOR_16, 1), 117 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 118 FMT_16_BIT(V_038004_COLOR_8_8, 1), 119 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 120 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 121 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 122 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 123 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 124 125 /* 24-bit */ 126 FMT_24_BIT(V_038004_FMT_8_8_8), 127 128 /* 32-bit */ 129 FMT_32_BIT(V_038004_COLOR_32, 1), 130 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 131 FMT_32_BIT(V_038004_COLOR_16_16, 1), 132 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 133 FMT_32_BIT(V_038004_COLOR_8_24, 1), 134 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 135 FMT_32_BIT(V_038004_COLOR_24_8, 1), 136 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 137 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 138 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 139 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 140 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 141 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 142 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 143 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 144 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 145 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 146 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 147 148 /* 48-bit */ 149 FMT_48_BIT(V_038004_FMT_16_16_16), 150 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 151 152 /* 64-bit */ 153 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 154 FMT_64_BIT(V_038004_COLOR_32_32, 1), 155 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 156 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 157 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 158 159 FMT_96_BIT(V_038004_FMT_32_32_32), 160 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 161 162 /* 128-bit */ 163 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 164 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 165 166 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 167 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 168 169 /* block compressed formats */ 170 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 171 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 172 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 173 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 174 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 175 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 176 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 177 178 /* The other Evergreen formats */ 179 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 180 }; 181 182 bool r600_fmt_is_valid_color(u32 format) 183 { 184 if (format >= ARRAY_SIZE(color_formats_table)) 185 return false; 186 187 if (color_formats_table[format].valid_color) 188 return true; 189 190 return false; 191 } 192 193 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) 194 { 195 if (format >= ARRAY_SIZE(color_formats_table)) 196 return false; 197 198 if (family < color_formats_table[format].min_family) 199 return false; 200 201 if (color_formats_table[format].blockwidth > 0) 202 return true; 203 204 return false; 205 } 206 207 int r600_fmt_get_blocksize(u32 format) 208 { 209 if (format >= ARRAY_SIZE(color_formats_table)) 210 return 0; 211 212 return color_formats_table[format].blocksize; 213 } 214 215 int r600_fmt_get_nblocksx(u32 format, u32 w) 216 { 217 unsigned bw; 218 219 if (format >= ARRAY_SIZE(color_formats_table)) 220 return 0; 221 222 bw = color_formats_table[format].blockwidth; 223 if (bw == 0) 224 return 0; 225 226 return (w + bw - 1) / bw; 227 } 228 229 int r600_fmt_get_nblocksy(u32 format, u32 h) 230 { 231 unsigned bh; 232 233 if (format >= ARRAY_SIZE(color_formats_table)) 234 return 0; 235 236 bh = color_formats_table[format].blockheight; 237 if (bh == 0) 238 return 0; 239 240 return (h + bh - 1) / bh; 241 } 242 243 struct array_mode_checker { 244 int array_mode; 245 u32 group_size; 246 u32 nbanks; 247 u32 npipes; 248 u32 nsamples; 249 u32 blocksize; 250 }; 251 252 /* returns alignment in pixels for pitch/height/depth and bytes for base */ 253 static int r600_get_array_mode_alignment(struct array_mode_checker *values, 254 u32 *pitch_align, 255 u32 *height_align, 256 u32 *depth_align, 257 u64 *base_align) 258 { 259 u32 tile_width = 8; 260 u32 tile_height = 8; 261 u32 macro_tile_width = values->nbanks; 262 u32 macro_tile_height = values->npipes; 263 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 264 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 265 266 switch (values->array_mode) { 267 case ARRAY_LINEAR_GENERAL: 268 /* technically tile_width/_height for pitch/height */ 269 *pitch_align = 1; /* tile_width */ 270 *height_align = 1; /* tile_height */ 271 *depth_align = 1; 272 *base_align = 1; 273 break; 274 case ARRAY_LINEAR_ALIGNED: 275 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 276 *height_align = 1; 277 *depth_align = 1; 278 *base_align = values->group_size; 279 break; 280 case ARRAY_1D_TILED_THIN1: 281 *pitch_align = max((u32)tile_width, 282 (u32)(values->group_size / 283 (tile_height * values->blocksize * values->nsamples))); 284 *height_align = tile_height; 285 *depth_align = 1; 286 *base_align = values->group_size; 287 break; 288 case ARRAY_2D_TILED_THIN1: 289 *pitch_align = max((u32)macro_tile_width * tile_width, 290 (u32)((values->group_size * values->nbanks) / 291 (values->blocksize * values->nsamples * tile_width))); 292 *height_align = macro_tile_height * tile_height; 293 *depth_align = 1; 294 *base_align = max(macro_tile_bytes, 295 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 296 break; 297 default: 298 return -EINVAL; 299 } 300 301 return 0; 302 } 303 304 static void r600_cs_track_init(struct r600_cs_track *track) 305 { 306 int i; 307 308 /* assume DX9 mode */ 309 track->sq_config = DX9_CONSTS; 310 for (i = 0; i < 8; i++) { 311 track->cb_color_base_last[i] = 0; 312 track->cb_color_size[i] = 0; 313 track->cb_color_size_idx[i] = 0; 314 track->cb_color_info[i] = 0; 315 track->cb_color_view[i] = 0xFFFFFFFF; 316 track->cb_color_bo[i] = NULL; 317 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 318 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 319 track->cb_color_frag_bo[i] = NULL; 320 track->cb_color_frag_offset[i] = 0xFFFFFFFF; 321 track->cb_color_tile_bo[i] = NULL; 322 track->cb_color_tile_offset[i] = 0xFFFFFFFF; 323 track->cb_color_mask[i] = 0xFFFFFFFF; 324 } 325 track->is_resolve = false; 326 track->nsamples = 16; 327 track->log_nsamples = 4; 328 track->cb_target_mask = 0xFFFFFFFF; 329 track->cb_shader_mask = 0xFFFFFFFF; 330 track->cb_dirty = true; 331 track->db_bo = NULL; 332 track->db_bo_mc = 0xFFFFFFFF; 333 /* assume the biggest format and that htile is enabled */ 334 track->db_depth_info = 7 | (1 << 25); 335 track->db_depth_view = 0xFFFFC000; 336 track->db_depth_size = 0xFFFFFFFF; 337 track->db_depth_size_idx = 0; 338 track->db_depth_control = 0xFFFFFFFF; 339 track->db_dirty = true; 340 track->htile_bo = NULL; 341 track->htile_offset = 0xFFFFFFFF; 342 track->htile_surface = 0; 343 344 for (i = 0; i < 4; i++) { 345 track->vgt_strmout_size[i] = 0; 346 track->vgt_strmout_bo[i] = NULL; 347 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; 348 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; 349 } 350 track->streamout_dirty = true; 351 track->sx_misc_kill_all_prims = false; 352 } 353 354 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 355 { 356 struct r600_cs_track *track = p->track; 357 u32 slice_tile_max, size, tmp; 358 u32 height, height_align, pitch, pitch_align, depth_align; 359 u64 base_offset, base_align; 360 struct array_mode_checker array_check; 361 volatile u32 *ib = p->ib.ptr; 362 unsigned array_mode; 363 u32 format; 364 /* When resolve is used, the second colorbuffer has always 1 sample. */ 365 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; 366 367 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 368 format = G_0280A0_FORMAT(track->cb_color_info[i]); 369 if (!r600_fmt_is_valid_color(format)) { 370 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 371 __func__, __LINE__, format, 372 i, track->cb_color_info[i]); 373 return -EINVAL; 374 } 375 /* pitch in pixels */ 376 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 377 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 378 slice_tile_max *= 64; 379 height = slice_tile_max / pitch; 380 if (height > 8192) 381 height = 8192; 382 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 383 384 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 385 array_check.array_mode = array_mode; 386 array_check.group_size = track->group_size; 387 array_check.nbanks = track->nbanks; 388 array_check.npipes = track->npipes; 389 array_check.nsamples = nsamples; 390 array_check.blocksize = r600_fmt_get_blocksize(format); 391 if (r600_get_array_mode_alignment(&array_check, 392 &pitch_align, &height_align, &depth_align, &base_align)) { 393 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 394 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 395 track->cb_color_info[i]); 396 return -EINVAL; 397 } 398 switch (array_mode) { 399 case V_0280A0_ARRAY_LINEAR_GENERAL: 400 break; 401 case V_0280A0_ARRAY_LINEAR_ALIGNED: 402 break; 403 case V_0280A0_ARRAY_1D_TILED_THIN1: 404 /* avoid breaking userspace */ 405 if (height > 7) 406 height &= ~0x7; 407 break; 408 case V_0280A0_ARRAY_2D_TILED_THIN1: 409 break; 410 default: 411 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 412 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 413 track->cb_color_info[i]); 414 return -EINVAL; 415 } 416 417 if (!IS_ALIGNED(pitch, pitch_align)) { 418 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 419 __func__, __LINE__, pitch, pitch_align, array_mode); 420 return -EINVAL; 421 } 422 if (!IS_ALIGNED(height, height_align)) { 423 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 424 __func__, __LINE__, height, height_align, array_mode); 425 return -EINVAL; 426 } 427 if (!IS_ALIGNED(base_offset, base_align)) { 428 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i, 429 base_offset, base_align, array_mode); 430 return -EINVAL; 431 } 432 433 /* check offset */ 434 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 435 r600_fmt_get_blocksize(format) * nsamples; 436 switch (array_mode) { 437 default: 438 case V_0280A0_ARRAY_LINEAR_GENERAL: 439 case V_0280A0_ARRAY_LINEAR_ALIGNED: 440 tmp += track->cb_color_view[i] & 0xFF; 441 break; 442 case V_0280A0_ARRAY_1D_TILED_THIN1: 443 case V_0280A0_ARRAY_2D_TILED_THIN1: 444 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; 445 break; 446 } 447 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 448 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 449 /* the initial DDX does bad things with the CB size occasionally */ 450 /* it rounds up height too far for slice tile max but the BO is smaller */ 451 /* r600c,g also seem to flush at bad times in some apps resulting in 452 * bogus values here. So for linear just allow anything to avoid breaking 453 * broken userspace. 454 */ 455 } else { 456 dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", 457 __func__, i, array_mode, 458 track->cb_color_bo_offset[i], tmp, 459 radeon_bo_size(track->cb_color_bo[i]), 460 pitch, height, r600_fmt_get_nblocksx(format, pitch), 461 r600_fmt_get_nblocksy(format, height), 462 r600_fmt_get_blocksize(format)); 463 return -EINVAL; 464 } 465 } 466 /* limit max tile */ 467 tmp = (height * pitch) >> 6; 468 if (tmp < slice_tile_max) 469 slice_tile_max = tmp; 470 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 471 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 472 ib[track->cb_color_size_idx[i]] = tmp; 473 474 /* FMASK/CMASK */ 475 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 476 case V_0280A0_TILE_DISABLE: 477 break; 478 case V_0280A0_FRAG_ENABLE: 479 if (track->nsamples > 1) { 480 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); 481 /* the tile size is 8x8, but the size is in units of bits. 482 * for bytes, do just * 8. */ 483 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); 484 485 if (bytes + track->cb_color_frag_offset[i] > 486 radeon_bo_size(track->cb_color_frag_bo[i])) { 487 dev_warn(p->dev, "%s FMASK_TILE_MAX too large " 488 "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 489 __func__, tile_max, bytes, 490 track->cb_color_frag_offset[i], 491 radeon_bo_size(track->cb_color_frag_bo[i])); 492 return -EINVAL; 493 } 494 } 495 /* fall through */ 496 case V_0280A0_CLEAR_ENABLE: 497 { 498 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); 499 /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. 500 * (128*128) / (8*8) / 2 = 128 bytes per block. */ 501 uint32_t bytes = (block_max + 1) * 128; 502 503 if (bytes + track->cb_color_tile_offset[i] > 504 radeon_bo_size(track->cb_color_tile_bo[i])) { 505 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " 506 "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", 507 __func__, block_max, bytes, 508 track->cb_color_tile_offset[i], 509 radeon_bo_size(track->cb_color_tile_bo[i])); 510 return -EINVAL; 511 } 512 break; 513 } 514 default: 515 dev_warn(p->dev, "%s invalid tile mode\n", __func__); 516 return -EINVAL; 517 } 518 return 0; 519 } 520 521 static int r600_cs_track_validate_db(struct radeon_cs_parser *p) 522 { 523 struct r600_cs_track *track = p->track; 524 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; 525 u32 height_align, pitch_align, depth_align; 526 u32 pitch = 8192; 527 u32 height = 8192; 528 u64 base_offset, base_align; 529 struct array_mode_checker array_check; 530 int array_mode; 531 volatile u32 *ib = p->ib.ptr; 532 533 534 if (track->db_bo == NULL) { 535 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 536 return -EINVAL; 537 } 538 switch (G_028010_FORMAT(track->db_depth_info)) { 539 case V_028010_DEPTH_16: 540 bpe = 2; 541 break; 542 case V_028010_DEPTH_X8_24: 543 case V_028010_DEPTH_8_24: 544 case V_028010_DEPTH_X8_24_FLOAT: 545 case V_028010_DEPTH_8_24_FLOAT: 546 case V_028010_DEPTH_32_FLOAT: 547 bpe = 4; 548 break; 549 case V_028010_DEPTH_X24_8_32_FLOAT: 550 bpe = 8; 551 break; 552 default: 553 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 554 return -EINVAL; 555 } 556 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 557 if (!track->db_depth_size_idx) { 558 dev_warn(p->dev, "z/stencil buffer size not set\n"); 559 return -EINVAL; 560 } 561 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 562 tmp = (tmp / bpe) >> 6; 563 if (!tmp) { 564 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 565 track->db_depth_size, bpe, track->db_offset, 566 radeon_bo_size(track->db_bo)); 567 return -EINVAL; 568 } 569 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 570 } else { 571 size = radeon_bo_size(track->db_bo); 572 /* pitch in pixels */ 573 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 574 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 575 slice_tile_max *= 64; 576 height = slice_tile_max / pitch; 577 if (height > 8192) 578 height = 8192; 579 base_offset = track->db_bo_mc + track->db_offset; 580 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 581 array_check.array_mode = array_mode; 582 array_check.group_size = track->group_size; 583 array_check.nbanks = track->nbanks; 584 array_check.npipes = track->npipes; 585 array_check.nsamples = track->nsamples; 586 array_check.blocksize = bpe; 587 if (r600_get_array_mode_alignment(&array_check, 588 &pitch_align, &height_align, &depth_align, &base_align)) { 589 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 590 G_028010_ARRAY_MODE(track->db_depth_info), 591 track->db_depth_info); 592 return -EINVAL; 593 } 594 switch (array_mode) { 595 case V_028010_ARRAY_1D_TILED_THIN1: 596 /* don't break userspace */ 597 height &= ~0x7; 598 break; 599 case V_028010_ARRAY_2D_TILED_THIN1: 600 break; 601 default: 602 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 603 G_028010_ARRAY_MODE(track->db_depth_info), 604 track->db_depth_info); 605 return -EINVAL; 606 } 607 608 if (!IS_ALIGNED(pitch, pitch_align)) { 609 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 610 __func__, __LINE__, pitch, pitch_align, array_mode); 611 return -EINVAL; 612 } 613 if (!IS_ALIGNED(height, height_align)) { 614 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 615 __func__, __LINE__, height, height_align, array_mode); 616 return -EINVAL; 617 } 618 if (!IS_ALIGNED(base_offset, base_align)) { 619 dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__, 620 base_offset, base_align, array_mode); 621 return -EINVAL; 622 } 623 624 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 625 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 626 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 627 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 628 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 629 array_mode, 630 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 631 radeon_bo_size(track->db_bo)); 632 return -EINVAL; 633 } 634 } 635 636 /* hyperz */ 637 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 638 unsigned long size; 639 unsigned nbx, nby; 640 641 if (track->htile_bo == NULL) { 642 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 643 __func__, __LINE__, track->db_depth_info); 644 return -EINVAL; 645 } 646 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 647 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 648 __func__, __LINE__, track->db_depth_size); 649 return -EINVAL; 650 } 651 652 nbx = pitch; 653 nby = height; 654 if (G_028D24_LINEAR(track->htile_surface)) { 655 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ 656 nbx = round_up(nbx, 16 * 8); 657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 658 nby = round_up(nby, track->npipes * 8); 659 } else { 660 /* always assume 8x8 htile */ 661 /* align is htile align * 8, htile align vary according to 662 * number of pipe and tile width and nby 663 */ 664 switch (track->npipes) { 665 case 8: 666 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 667 nbx = round_up(nbx, 64 * 8); 668 nby = round_up(nby, 64 * 8); 669 break; 670 case 4: 671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 672 nbx = round_up(nbx, 64 * 8); 673 nby = round_up(nby, 32 * 8); 674 break; 675 case 2: 676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 677 nbx = round_up(nbx, 32 * 8); 678 nby = round_up(nby, 32 * 8); 679 break; 680 case 1: 681 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 682 nbx = round_up(nbx, 32 * 8); 683 nby = round_up(nby, 16 * 8); 684 break; 685 default: 686 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 687 __func__, __LINE__, track->npipes); 688 return -EINVAL; 689 } 690 } 691 /* compute number of htile */ 692 nbx = nbx >> 3; 693 nby = nby >> 3; 694 /* size must be aligned on npipes * 2K boundary */ 695 size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); 696 size += track->htile_offset; 697 698 if (size > radeon_bo_size(track->htile_bo)) { 699 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 700 __func__, __LINE__, radeon_bo_size(track->htile_bo), 701 size, nbx, nby); 702 return -EINVAL; 703 } 704 } 705 706 track->db_dirty = false; 707 return 0; 708 } 709 710 static int r600_cs_track_check(struct radeon_cs_parser *p) 711 { 712 struct r600_cs_track *track = p->track; 713 u32 tmp; 714 int r, i; 715 716 /* on legacy kernel we don't perform advanced check */ 717 if (p->rdev == NULL) 718 return 0; 719 720 /* check streamout */ 721 if (track->streamout_dirty && track->vgt_strmout_en) { 722 for (i = 0; i < 4; i++) { 723 if (track->vgt_strmout_buffer_en & (1 << i)) { 724 if (track->vgt_strmout_bo[i]) { 725 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 726 (u64)track->vgt_strmout_size[i]; 727 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 728 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", 729 i, offset, 730 radeon_bo_size(track->vgt_strmout_bo[i])); 731 return -EINVAL; 732 } 733 } else { 734 dev_warn(p->dev, "No buffer for streamout %d\n", i); 735 return -EINVAL; 736 } 737 } 738 } 739 track->streamout_dirty = false; 740 } 741 742 if (track->sx_misc_kill_all_prims) 743 return 0; 744 745 /* check that we have a cb for each enabled target, we don't check 746 * shader_mask because it seems mesa isn't always setting it :( 747 */ 748 if (track->cb_dirty) { 749 tmp = track->cb_target_mask; 750 751 /* We must check both colorbuffers for RESOLVE. */ 752 if (track->is_resolve) { 753 tmp |= 0xff; 754 } 755 756 for (i = 0; i < 8; i++) { 757 u32 format = G_0280A0_FORMAT(track->cb_color_info[i]); 758 759 if (format != V_0280A0_COLOR_INVALID && 760 (tmp >> (i * 4)) & 0xF) { 761 /* at least one component is enabled */ 762 if (track->cb_color_bo[i] == NULL) { 763 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 764 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 765 return -EINVAL; 766 } 767 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 768 r = r600_cs_track_validate_cb(p, i); 769 if (r) 770 return r; 771 } 772 } 773 track->cb_dirty = false; 774 } 775 776 /* Check depth buffer */ 777 if (track->db_dirty && 778 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && 779 (G_028800_STENCIL_ENABLE(track->db_depth_control) || 780 G_028800_Z_ENABLE(track->db_depth_control))) { 781 r = r600_cs_track_validate_db(p); 782 if (r) 783 return r; 784 } 785 786 return 0; 787 } 788 789 /** 790 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 791 * @parser: parser structure holding parsing context. 792 * @pkt: where to store packet informations 793 * 794 * Assume that chunk_ib_index is properly set. Will return -EINVAL 795 * if packet is bigger than remaining ib size. or if packets is unknown. 796 **/ 797 static int r600_cs_packet_parse(struct radeon_cs_parser *p, 798 struct radeon_cs_packet *pkt, 799 unsigned idx) 800 { 801 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 802 uint32_t header; 803 804 if (idx >= ib_chunk->length_dw) { 805 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 806 idx, ib_chunk->length_dw); 807 return -EINVAL; 808 } 809 header = radeon_get_ib_value(p, idx); 810 pkt->idx = idx; 811 pkt->type = CP_PACKET_GET_TYPE(header); 812 pkt->count = CP_PACKET_GET_COUNT(header); 813 pkt->one_reg_wr = 0; 814 switch (pkt->type) { 815 case PACKET_TYPE0: 816 pkt->reg = CP_PACKET0_GET_REG(header); 817 break; 818 case PACKET_TYPE3: 819 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 820 break; 821 case PACKET_TYPE2: 822 pkt->count = -1; 823 break; 824 default: 825 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 826 return -EINVAL; 827 } 828 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 829 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 830 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 831 return -EINVAL; 832 } 833 return 0; 834 } 835 836 /** 837 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 838 * @parser: parser structure holding parsing context. 839 * @data: pointer to relocation data 840 * @offset_start: starting offset 841 * @offset_mask: offset mask (to align start offset on) 842 * @reloc: reloc informations 843 * 844 * Check next packet is relocation packet3, do bo validation and compute 845 * GPU offset using the provided start. 846 **/ 847 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 848 struct radeon_cs_reloc **cs_reloc) 849 { 850 struct radeon_cs_chunk *relocs_chunk; 851 struct radeon_cs_packet p3reloc; 852 unsigned idx; 853 int r; 854 855 if (p->chunk_relocs_idx == -1) { 856 DRM_ERROR("No relocation chunk !\n"); 857 return -EINVAL; 858 } 859 *cs_reloc = NULL; 860 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 861 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 862 if (r) { 863 return r; 864 } 865 p->idx += p3reloc.count + 2; 866 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 867 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 868 p3reloc.idx); 869 return -EINVAL; 870 } 871 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 872 if (idx >= relocs_chunk->length_dw) { 873 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 874 idx, relocs_chunk->length_dw); 875 return -EINVAL; 876 } 877 /* FIXME: we assume reloc size is 4 dwords */ 878 *cs_reloc = p->relocs_ptr[(idx / 4)]; 879 return 0; 880 } 881 882 /** 883 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3 884 * @parser: parser structure holding parsing context. 885 * @data: pointer to relocation data 886 * @offset_start: starting offset 887 * @offset_mask: offset mask (to align start offset on) 888 * @reloc: reloc informations 889 * 890 * Check next packet is relocation packet3, do bo validation and compute 891 * GPU offset using the provided start. 892 **/ 893 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 894 struct radeon_cs_reloc **cs_reloc) 895 { 896 struct radeon_cs_chunk *relocs_chunk; 897 struct radeon_cs_packet p3reloc; 898 unsigned idx; 899 int r; 900 901 if (p->chunk_relocs_idx == -1) { 902 DRM_ERROR("No relocation chunk !\n"); 903 return -EINVAL; 904 } 905 *cs_reloc = NULL; 906 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 907 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 908 if (r) { 909 return r; 910 } 911 p->idx += p3reloc.count + 2; 912 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 913 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 914 p3reloc.idx); 915 return -EINVAL; 916 } 917 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 918 if (idx >= relocs_chunk->length_dw) { 919 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 920 idx, relocs_chunk->length_dw); 921 return -EINVAL; 922 } 923 *cs_reloc = p->relocs; 924 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 925 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 926 return 0; 927 } 928 929 /** 930 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 931 * @parser: parser structure holding parsing context. 932 * 933 * Check next packet is relocation packet3, do bo validation and compute 934 * GPU offset using the provided start. 935 **/ 936 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 937 { 938 struct radeon_cs_packet p3reloc; 939 int r; 940 941 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 942 if (r) { 943 return 0; 944 } 945 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 946 return 0; 947 } 948 return 1; 949 } 950 951 /** 952 * r600_cs_packet_next_vline() - parse userspace VLINE packet 953 * @parser: parser structure holding parsing context. 954 * 955 * Userspace sends a special sequence for VLINE waits. 956 * PACKET0 - VLINE_START_END + value 957 * PACKET3 - WAIT_REG_MEM poll vline status reg 958 * RELOC (P3) - crtc_id in reloc. 959 * 960 * This function parses this and relocates the VLINE START END 961 * and WAIT_REG_MEM packets to the correct crtc. 962 * It also detects a switched off crtc and nulls out the 963 * wait in that case. 964 */ 965 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 966 { 967 struct drm_mode_object *obj; 968 struct drm_crtc *crtc; 969 struct radeon_crtc *radeon_crtc; 970 struct radeon_cs_packet p3reloc, wait_reg_mem; 971 int crtc_id; 972 int r; 973 uint32_t header, h_idx, reg, wait_reg_mem_info; 974 volatile uint32_t *ib; 975 976 ib = p->ib.ptr; 977 978 /* parse the WAIT_REG_MEM */ 979 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 980 if (r) 981 return r; 982 983 /* check its a WAIT_REG_MEM */ 984 if (wait_reg_mem.type != PACKET_TYPE3 || 985 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 986 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 987 return -EINVAL; 988 } 989 990 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 991 /* bit 4 is reg (0) or mem (1) */ 992 if (wait_reg_mem_info & 0x10) { 993 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 994 return -EINVAL; 995 } 996 /* waiting for value to be equal */ 997 if ((wait_reg_mem_info & 0x7) != 0x3) { 998 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 999 return -EINVAL; 1000 } 1001 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 1002 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 1003 return -EINVAL; 1004 } 1005 1006 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 1007 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 1008 return -EINVAL; 1009 } 1010 1011 /* jump over the NOP */ 1012 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 1013 if (r) 1014 return r; 1015 1016 h_idx = p->idx - 2; 1017 p->idx += wait_reg_mem.count + 2; 1018 p->idx += p3reloc.count + 2; 1019 1020 header = radeon_get_ib_value(p, h_idx); 1021 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 1022 reg = CP_PACKET0_GET_REG(header); 1023 1024 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1025 if (!obj) { 1026 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1027 return -EINVAL; 1028 } 1029 crtc = obj_to_crtc(obj); 1030 radeon_crtc = to_radeon_crtc(crtc); 1031 crtc_id = radeon_crtc->crtc_id; 1032 1033 if (!crtc->enabled) { 1034 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 1035 ib[h_idx + 2] = PACKET2(0); 1036 ib[h_idx + 3] = PACKET2(0); 1037 ib[h_idx + 4] = PACKET2(0); 1038 ib[h_idx + 5] = PACKET2(0); 1039 ib[h_idx + 6] = PACKET2(0); 1040 ib[h_idx + 7] = PACKET2(0); 1041 ib[h_idx + 8] = PACKET2(0); 1042 } else if (crtc_id == 1) { 1043 switch (reg) { 1044 case AVIVO_D1MODE_VLINE_START_END: 1045 header &= ~R600_CP_PACKET0_REG_MASK; 1046 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1047 break; 1048 default: 1049 DRM_ERROR("unknown crtc reloc\n"); 1050 return -EINVAL; 1051 } 1052 ib[h_idx] = header; 1053 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 1054 } 1055 1056 return 0; 1057 } 1058 1059 static int r600_packet0_check(struct radeon_cs_parser *p, 1060 struct radeon_cs_packet *pkt, 1061 unsigned idx, unsigned reg) 1062 { 1063 int r; 1064 1065 switch (reg) { 1066 case AVIVO_D1MODE_VLINE_START_END: 1067 r = r600_cs_packet_parse_vline(p); 1068 if (r) { 1069 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1070 idx, reg); 1071 return r; 1072 } 1073 break; 1074 default: 1075 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1076 reg, idx); 1077 return -EINVAL; 1078 } 1079 return 0; 1080 } 1081 1082 static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 1083 struct radeon_cs_packet *pkt) 1084 { 1085 unsigned reg, i; 1086 unsigned idx; 1087 int r; 1088 1089 idx = pkt->idx + 1; 1090 reg = pkt->reg; 1091 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 1092 r = r600_packet0_check(p, pkt, idx, reg); 1093 if (r) { 1094 return r; 1095 } 1096 } 1097 return 0; 1098 } 1099 1100 /** 1101 * r600_cs_check_reg() - check if register is authorized or not 1102 * @parser: parser structure holding parsing context 1103 * @reg: register we are testing 1104 * @idx: index into the cs buffer 1105 * 1106 * This function will test against r600_reg_safe_bm and return 0 1107 * if register is safe. If register is not flag as safe this function 1108 * will test it against a list of register needind special handling. 1109 */ 1110 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1111 { 1112 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 1113 struct radeon_cs_reloc *reloc; 1114 u32 m, i, tmp, *ib; 1115 int r; 1116 1117 i = (reg >> 7); 1118 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1119 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1120 return -EINVAL; 1121 } 1122 m = 1 << ((reg >> 2) & 31); 1123 if (!(r600_reg_safe_bm[i] & m)) 1124 return 0; 1125 ib = p->ib.ptr; 1126 switch (reg) { 1127 /* force following reg to 0 in an attempt to disable out buffer 1128 * which will need us to better understand how it works to perform 1129 * security check on it (Jerome) 1130 */ 1131 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 1132 case R_008C44_SQ_ESGS_RING_SIZE: 1133 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 1134 case R_008C54_SQ_ESTMP_RING_SIZE: 1135 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 1136 case R_008C74_SQ_FBUF_RING_SIZE: 1137 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 1138 case R_008C5C_SQ_GSTMP_RING_SIZE: 1139 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 1140 case R_008C4C_SQ_GSVS_RING_SIZE: 1141 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 1142 case R_008C6C_SQ_PSTMP_RING_SIZE: 1143 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 1144 case R_008C7C_SQ_REDUC_RING_SIZE: 1145 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 1146 case R_008C64_SQ_VSTMP_RING_SIZE: 1147 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1148 /* get value to populate the IB don't remove */ 1149 /*tmp =radeon_get_ib_value(p, idx); 1150 ib[idx] = 0;*/ 1151 break; 1152 case SQ_ESGS_RING_BASE: 1153 case SQ_GSVS_RING_BASE: 1154 case SQ_ESTMP_RING_BASE: 1155 case SQ_GSTMP_RING_BASE: 1156 case SQ_PSTMP_RING_BASE: 1157 case SQ_VSTMP_RING_BASE: 1158 r = r600_cs_packet_next_reloc(p, &reloc); 1159 if (r) { 1160 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1161 "0x%04X\n", reg); 1162 return -EINVAL; 1163 } 1164 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1165 break; 1166 case SQ_CONFIG: 1167 track->sq_config = radeon_get_ib_value(p, idx); 1168 break; 1169 case R_028800_DB_DEPTH_CONTROL: 1170 track->db_depth_control = radeon_get_ib_value(p, idx); 1171 track->db_dirty = true; 1172 break; 1173 case R_028010_DB_DEPTH_INFO: 1174 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1175 r600_cs_packet_next_is_pkt3_nop(p)) { 1176 r = r600_cs_packet_next_reloc(p, &reloc); 1177 if (r) { 1178 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1179 "0x%04X\n", reg); 1180 return -EINVAL; 1181 } 1182 track->db_depth_info = radeon_get_ib_value(p, idx); 1183 ib[idx] &= C_028010_ARRAY_MODE; 1184 track->db_depth_info &= C_028010_ARRAY_MODE; 1185 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1186 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1187 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1188 } else { 1189 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1190 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1191 } 1192 } else { 1193 track->db_depth_info = radeon_get_ib_value(p, idx); 1194 } 1195 track->db_dirty = true; 1196 break; 1197 case R_028004_DB_DEPTH_VIEW: 1198 track->db_depth_view = radeon_get_ib_value(p, idx); 1199 track->db_dirty = true; 1200 break; 1201 case R_028000_DB_DEPTH_SIZE: 1202 track->db_depth_size = radeon_get_ib_value(p, idx); 1203 track->db_depth_size_idx = idx; 1204 track->db_dirty = true; 1205 break; 1206 case R_028AB0_VGT_STRMOUT_EN: 1207 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 1208 track->streamout_dirty = true; 1209 break; 1210 case R_028B20_VGT_STRMOUT_BUFFER_EN: 1211 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 1212 track->streamout_dirty = true; 1213 break; 1214 case VGT_STRMOUT_BUFFER_BASE_0: 1215 case VGT_STRMOUT_BUFFER_BASE_1: 1216 case VGT_STRMOUT_BUFFER_BASE_2: 1217 case VGT_STRMOUT_BUFFER_BASE_3: 1218 r = r600_cs_packet_next_reloc(p, &reloc); 1219 if (r) { 1220 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1221 "0x%04X\n", reg); 1222 return -EINVAL; 1223 } 1224 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1225 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1226 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1227 track->vgt_strmout_bo[tmp] = reloc->robj; 1228 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; 1229 track->streamout_dirty = true; 1230 break; 1231 case VGT_STRMOUT_BUFFER_SIZE_0: 1232 case VGT_STRMOUT_BUFFER_SIZE_1: 1233 case VGT_STRMOUT_BUFFER_SIZE_2: 1234 case VGT_STRMOUT_BUFFER_SIZE_3: 1235 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; 1236 /* size in register is DWs, convert to bytes */ 1237 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; 1238 track->streamout_dirty = true; 1239 break; 1240 case CP_COHER_BASE: 1241 r = r600_cs_packet_next_reloc(p, &reloc); 1242 if (r) { 1243 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1244 "0x%04X\n", reg); 1245 return -EINVAL; 1246 } 1247 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1248 break; 1249 case R_028238_CB_TARGET_MASK: 1250 track->cb_target_mask = radeon_get_ib_value(p, idx); 1251 track->cb_dirty = true; 1252 break; 1253 case R_02823C_CB_SHADER_MASK: 1254 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1255 break; 1256 case R_028C04_PA_SC_AA_CONFIG: 1257 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1258 track->log_nsamples = tmp; 1259 track->nsamples = 1 << tmp; 1260 track->cb_dirty = true; 1261 break; 1262 case R_028808_CB_COLOR_CONTROL: 1263 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); 1264 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; 1265 track->cb_dirty = true; 1266 break; 1267 case R_0280A0_CB_COLOR0_INFO: 1268 case R_0280A4_CB_COLOR1_INFO: 1269 case R_0280A8_CB_COLOR2_INFO: 1270 case R_0280AC_CB_COLOR3_INFO: 1271 case R_0280B0_CB_COLOR4_INFO: 1272 case R_0280B4_CB_COLOR5_INFO: 1273 case R_0280B8_CB_COLOR6_INFO: 1274 case R_0280BC_CB_COLOR7_INFO: 1275 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1276 r600_cs_packet_next_is_pkt3_nop(p)) { 1277 r = r600_cs_packet_next_reloc(p, &reloc); 1278 if (r) { 1279 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1280 return -EINVAL; 1281 } 1282 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1283 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1284 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1285 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1286 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1287 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1288 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1289 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1290 } 1291 } else { 1292 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1293 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1294 } 1295 track->cb_dirty = true; 1296 break; 1297 case R_028080_CB_COLOR0_VIEW: 1298 case R_028084_CB_COLOR1_VIEW: 1299 case R_028088_CB_COLOR2_VIEW: 1300 case R_02808C_CB_COLOR3_VIEW: 1301 case R_028090_CB_COLOR4_VIEW: 1302 case R_028094_CB_COLOR5_VIEW: 1303 case R_028098_CB_COLOR6_VIEW: 1304 case R_02809C_CB_COLOR7_VIEW: 1305 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; 1306 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); 1307 track->cb_dirty = true; 1308 break; 1309 case R_028060_CB_COLOR0_SIZE: 1310 case R_028064_CB_COLOR1_SIZE: 1311 case R_028068_CB_COLOR2_SIZE: 1312 case R_02806C_CB_COLOR3_SIZE: 1313 case R_028070_CB_COLOR4_SIZE: 1314 case R_028074_CB_COLOR5_SIZE: 1315 case R_028078_CB_COLOR6_SIZE: 1316 case R_02807C_CB_COLOR7_SIZE: 1317 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1318 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1319 track->cb_color_size_idx[tmp] = idx; 1320 track->cb_dirty = true; 1321 break; 1322 /* This register were added late, there is userspace 1323 * which does provide relocation for those but set 1324 * 0 offset. In order to avoid breaking old userspace 1325 * we detect this and set address to point to last 1326 * CB_COLOR0_BASE, note that if userspace doesn't set 1327 * CB_COLOR0_BASE before this register we will report 1328 * error. Old userspace always set CB_COLOR0_BASE 1329 * before any of this. 1330 */ 1331 case R_0280E0_CB_COLOR0_FRAG: 1332 case R_0280E4_CB_COLOR1_FRAG: 1333 case R_0280E8_CB_COLOR2_FRAG: 1334 case R_0280EC_CB_COLOR3_FRAG: 1335 case R_0280F0_CB_COLOR4_FRAG: 1336 case R_0280F4_CB_COLOR5_FRAG: 1337 case R_0280F8_CB_COLOR6_FRAG: 1338 case R_0280FC_CB_COLOR7_FRAG: 1339 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1340 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1341 if (!track->cb_color_base_last[tmp]) { 1342 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1343 return -EINVAL; 1344 } 1345 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1346 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1347 ib[idx] = track->cb_color_base_last[tmp]; 1348 } else { 1349 r = r600_cs_packet_next_reloc(p, &reloc); 1350 if (r) { 1351 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1352 return -EINVAL; 1353 } 1354 track->cb_color_frag_bo[tmp] = reloc->robj; 1355 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1356 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 } 1358 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1359 track->cb_dirty = true; 1360 } 1361 break; 1362 case R_0280C0_CB_COLOR0_TILE: 1363 case R_0280C4_CB_COLOR1_TILE: 1364 case R_0280C8_CB_COLOR2_TILE: 1365 case R_0280CC_CB_COLOR3_TILE: 1366 case R_0280D0_CB_COLOR4_TILE: 1367 case R_0280D4_CB_COLOR5_TILE: 1368 case R_0280D8_CB_COLOR6_TILE: 1369 case R_0280DC_CB_COLOR7_TILE: 1370 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1371 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1372 if (!track->cb_color_base_last[tmp]) { 1373 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1374 return -EINVAL; 1375 } 1376 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1377 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1378 ib[idx] = track->cb_color_base_last[tmp]; 1379 } else { 1380 r = r600_cs_packet_next_reloc(p, &reloc); 1381 if (r) { 1382 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1383 return -EINVAL; 1384 } 1385 track->cb_color_tile_bo[tmp] = reloc->robj; 1386 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1387 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1388 } 1389 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1390 track->cb_dirty = true; 1391 } 1392 break; 1393 case R_028100_CB_COLOR0_MASK: 1394 case R_028104_CB_COLOR1_MASK: 1395 case R_028108_CB_COLOR2_MASK: 1396 case R_02810C_CB_COLOR3_MASK: 1397 case R_028110_CB_COLOR4_MASK: 1398 case R_028114_CB_COLOR5_MASK: 1399 case R_028118_CB_COLOR6_MASK: 1400 case R_02811C_CB_COLOR7_MASK: 1401 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1402 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); 1403 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1404 track->cb_dirty = true; 1405 } 1406 break; 1407 case CB_COLOR0_BASE: 1408 case CB_COLOR1_BASE: 1409 case CB_COLOR2_BASE: 1410 case CB_COLOR3_BASE: 1411 case CB_COLOR4_BASE: 1412 case CB_COLOR5_BASE: 1413 case CB_COLOR6_BASE: 1414 case CB_COLOR7_BASE: 1415 r = r600_cs_packet_next_reloc(p, &reloc); 1416 if (r) { 1417 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1418 "0x%04X\n", reg); 1419 return -EINVAL; 1420 } 1421 tmp = (reg - CB_COLOR0_BASE) / 4; 1422 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1423 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1424 track->cb_color_base_last[tmp] = ib[idx]; 1425 track->cb_color_bo[tmp] = reloc->robj; 1426 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1427 track->cb_dirty = true; 1428 break; 1429 case DB_DEPTH_BASE: 1430 r = r600_cs_packet_next_reloc(p, &reloc); 1431 if (r) { 1432 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1433 "0x%04X\n", reg); 1434 return -EINVAL; 1435 } 1436 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1437 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1438 track->db_bo = reloc->robj; 1439 track->db_bo_mc = reloc->lobj.gpu_offset; 1440 track->db_dirty = true; 1441 break; 1442 case DB_HTILE_DATA_BASE: 1443 r = r600_cs_packet_next_reloc(p, &reloc); 1444 if (r) { 1445 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1446 "0x%04X\n", reg); 1447 return -EINVAL; 1448 } 1449 track->htile_offset = radeon_get_ib_value(p, idx) << 8; 1450 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1451 track->htile_bo = reloc->robj; 1452 track->db_dirty = true; 1453 break; 1454 case DB_HTILE_SURFACE: 1455 track->htile_surface = radeon_get_ib_value(p, idx); 1456 /* force 8x8 htile width and height */ 1457 ib[idx] |= 3; 1458 track->db_dirty = true; 1459 break; 1460 case SQ_PGM_START_FS: 1461 case SQ_PGM_START_ES: 1462 case SQ_PGM_START_VS: 1463 case SQ_PGM_START_GS: 1464 case SQ_PGM_START_PS: 1465 case SQ_ALU_CONST_CACHE_GS_0: 1466 case SQ_ALU_CONST_CACHE_GS_1: 1467 case SQ_ALU_CONST_CACHE_GS_2: 1468 case SQ_ALU_CONST_CACHE_GS_3: 1469 case SQ_ALU_CONST_CACHE_GS_4: 1470 case SQ_ALU_CONST_CACHE_GS_5: 1471 case SQ_ALU_CONST_CACHE_GS_6: 1472 case SQ_ALU_CONST_CACHE_GS_7: 1473 case SQ_ALU_CONST_CACHE_GS_8: 1474 case SQ_ALU_CONST_CACHE_GS_9: 1475 case SQ_ALU_CONST_CACHE_GS_10: 1476 case SQ_ALU_CONST_CACHE_GS_11: 1477 case SQ_ALU_CONST_CACHE_GS_12: 1478 case SQ_ALU_CONST_CACHE_GS_13: 1479 case SQ_ALU_CONST_CACHE_GS_14: 1480 case SQ_ALU_CONST_CACHE_GS_15: 1481 case SQ_ALU_CONST_CACHE_PS_0: 1482 case SQ_ALU_CONST_CACHE_PS_1: 1483 case SQ_ALU_CONST_CACHE_PS_2: 1484 case SQ_ALU_CONST_CACHE_PS_3: 1485 case SQ_ALU_CONST_CACHE_PS_4: 1486 case SQ_ALU_CONST_CACHE_PS_5: 1487 case SQ_ALU_CONST_CACHE_PS_6: 1488 case SQ_ALU_CONST_CACHE_PS_7: 1489 case SQ_ALU_CONST_CACHE_PS_8: 1490 case SQ_ALU_CONST_CACHE_PS_9: 1491 case SQ_ALU_CONST_CACHE_PS_10: 1492 case SQ_ALU_CONST_CACHE_PS_11: 1493 case SQ_ALU_CONST_CACHE_PS_12: 1494 case SQ_ALU_CONST_CACHE_PS_13: 1495 case SQ_ALU_CONST_CACHE_PS_14: 1496 case SQ_ALU_CONST_CACHE_PS_15: 1497 case SQ_ALU_CONST_CACHE_VS_0: 1498 case SQ_ALU_CONST_CACHE_VS_1: 1499 case SQ_ALU_CONST_CACHE_VS_2: 1500 case SQ_ALU_CONST_CACHE_VS_3: 1501 case SQ_ALU_CONST_CACHE_VS_4: 1502 case SQ_ALU_CONST_CACHE_VS_5: 1503 case SQ_ALU_CONST_CACHE_VS_6: 1504 case SQ_ALU_CONST_CACHE_VS_7: 1505 case SQ_ALU_CONST_CACHE_VS_8: 1506 case SQ_ALU_CONST_CACHE_VS_9: 1507 case SQ_ALU_CONST_CACHE_VS_10: 1508 case SQ_ALU_CONST_CACHE_VS_11: 1509 case SQ_ALU_CONST_CACHE_VS_12: 1510 case SQ_ALU_CONST_CACHE_VS_13: 1511 case SQ_ALU_CONST_CACHE_VS_14: 1512 case SQ_ALU_CONST_CACHE_VS_15: 1513 r = r600_cs_packet_next_reloc(p, &reloc); 1514 if (r) { 1515 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1516 "0x%04X\n", reg); 1517 return -EINVAL; 1518 } 1519 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1520 break; 1521 case SX_MEMORY_EXPORT_BASE: 1522 r = r600_cs_packet_next_reloc(p, &reloc); 1523 if (r) { 1524 dev_warn(p->dev, "bad SET_CONFIG_REG " 1525 "0x%04X\n", reg); 1526 return -EINVAL; 1527 } 1528 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1529 break; 1530 case SX_MISC: 1531 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1532 break; 1533 default: 1534 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1535 return -EINVAL; 1536 } 1537 return 0; 1538 } 1539 1540 unsigned r600_mip_minify(unsigned size, unsigned level) 1541 { 1542 unsigned val; 1543 1544 val = max(1U, size >> level); 1545 if (level > 0) 1546 val = roundup_pow_of_two(val); 1547 return val; 1548 } 1549 1550 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1551 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, 1552 unsigned block_align, unsigned height_align, unsigned base_align, 1553 unsigned *l0_size, unsigned *mipmap_size) 1554 { 1555 unsigned offset, i, level; 1556 unsigned width, height, depth, size; 1557 unsigned blocksize; 1558 unsigned nbx, nby; 1559 unsigned nlevels = llevel - blevel + 1; 1560 1561 *l0_size = -1; 1562 blocksize = r600_fmt_get_blocksize(format); 1563 1564 w0 = r600_mip_minify(w0, 0); 1565 h0 = r600_mip_minify(h0, 0); 1566 d0 = r600_mip_minify(d0, 0); 1567 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { 1568 width = r600_mip_minify(w0, i); 1569 nbx = r600_fmt_get_nblocksx(format, width); 1570 1571 nbx = round_up(nbx, block_align); 1572 1573 height = r600_mip_minify(h0, i); 1574 nby = r600_fmt_get_nblocksy(format, height); 1575 nby = round_up(nby, height_align); 1576 1577 depth = r600_mip_minify(d0, i); 1578 1579 size = nbx * nby * blocksize * nsamples; 1580 if (nfaces) 1581 size *= nfaces; 1582 else 1583 size *= depth; 1584 1585 if (i == 0) 1586 *l0_size = size; 1587 1588 if (i == 0 || i == 1) 1589 offset = round_up(offset, base_align); 1590 1591 offset += size; 1592 } 1593 *mipmap_size = offset; 1594 if (llevel == 0) 1595 *mipmap_size = *l0_size; 1596 if (!blevel) 1597 *mipmap_size -= *l0_size; 1598 } 1599 1600 /** 1601 * r600_check_texture_resource() - check if register is authorized or not 1602 * @p: parser structure holding parsing context 1603 * @idx: index into the cs buffer 1604 * @texture: texture's bo structure 1605 * @mipmap: mipmap's bo structure 1606 * 1607 * This function will check that the resource has valid field and that 1608 * the texture and mipmap bo object are big enough to cover this resource. 1609 */ 1610 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1611 struct radeon_bo *texture, 1612 struct radeon_bo *mipmap, 1613 u64 base_offset, 1614 u64 mip_offset, 1615 u32 tiling_flags) 1616 { 1617 struct r600_cs_track *track = p->track; 1618 u32 dim, nfaces, llevel, blevel, w0, h0, d0; 1619 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; 1620 u32 height_align, pitch, pitch_align, depth_align; 1621 u32 barray, larray; 1622 u64 base_align; 1623 struct array_mode_checker array_check; 1624 u32 format; 1625 bool is_array; 1626 1627 /* on legacy kernel we don't perform advanced check */ 1628 if (p->rdev == NULL) 1629 return 0; 1630 1631 /* convert to bytes */ 1632 base_offset <<= 8; 1633 mip_offset <<= 8; 1634 1635 word0 = radeon_get_ib_value(p, idx + 0); 1636 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1637 if (tiling_flags & RADEON_TILING_MACRO) 1638 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1639 else if (tiling_flags & RADEON_TILING_MICRO) 1640 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1641 } 1642 word1 = radeon_get_ib_value(p, idx + 1); 1643 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1644 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1645 word4 = radeon_get_ib_value(p, idx + 4); 1646 word5 = radeon_get_ib_value(p, idx + 5); 1647 dim = G_038000_DIM(word0); 1648 w0 = G_038000_TEX_WIDTH(word0) + 1; 1649 pitch = (G_038000_PITCH(word0) + 1) * 8; 1650 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1651 d0 = G_038004_TEX_DEPTH(word1); 1652 format = G_038004_DATA_FORMAT(word1); 1653 blevel = G_038010_BASE_LEVEL(word4); 1654 llevel = G_038014_LAST_LEVEL(word5); 1655 /* pitch in texels */ 1656 array_check.array_mode = G_038000_TILE_MODE(word0); 1657 array_check.group_size = track->group_size; 1658 array_check.nbanks = track->nbanks; 1659 array_check.npipes = track->npipes; 1660 array_check.nsamples = 1; 1661 array_check.blocksize = r600_fmt_get_blocksize(format); 1662 nfaces = 1; 1663 is_array = false; 1664 switch (dim) { 1665 case V_038000_SQ_TEX_DIM_1D: 1666 case V_038000_SQ_TEX_DIM_2D: 1667 case V_038000_SQ_TEX_DIM_3D: 1668 break; 1669 case V_038000_SQ_TEX_DIM_CUBEMAP: 1670 if (p->family >= CHIP_RV770) 1671 nfaces = 8; 1672 else 1673 nfaces = 6; 1674 break; 1675 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1676 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1677 is_array = true; 1678 break; 1679 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1680 is_array = true; 1681 /* fall through */ 1682 case V_038000_SQ_TEX_DIM_2D_MSAA: 1683 array_check.nsamples = 1 << llevel; 1684 llevel = 0; 1685 break; 1686 default: 1687 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1688 return -EINVAL; 1689 } 1690 if (!r600_fmt_is_valid_texture(format, p->family)) { 1691 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1692 __func__, __LINE__, format); 1693 return -EINVAL; 1694 } 1695 1696 if (r600_get_array_mode_alignment(&array_check, 1697 &pitch_align, &height_align, &depth_align, &base_align)) { 1698 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1699 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1700 return -EINVAL; 1701 } 1702 1703 /* XXX check height as well... */ 1704 1705 if (!IS_ALIGNED(pitch, pitch_align)) { 1706 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1707 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1708 return -EINVAL; 1709 } 1710 if (!IS_ALIGNED(base_offset, base_align)) { 1711 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n", 1712 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0)); 1713 return -EINVAL; 1714 } 1715 if (!IS_ALIGNED(mip_offset, base_align)) { 1716 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n", 1717 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0)); 1718 return -EINVAL; 1719 } 1720 1721 if (blevel > llevel) { 1722 dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1723 blevel, llevel); 1724 } 1725 if (is_array) { 1726 barray = G_038014_BASE_ARRAY(word5); 1727 larray = G_038014_LAST_ARRAY(word5); 1728 1729 nfaces = larray - barray + 1; 1730 } 1731 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, 1732 pitch_align, height_align, base_align, 1733 &l0_size, &mipmap_size); 1734 /* using get ib will give us the offset into the texture bo */ 1735 if ((l0_size + word2) > radeon_bo_size(texture)) { 1736 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1737 w0, h0, pitch_align, height_align, 1738 array_check.array_mode, format, word2, 1739 l0_size, radeon_bo_size(texture)); 1740 dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align); 1741 return -EINVAL; 1742 } 1743 /* using get ib will give us the offset into the mipmap bo */ 1744 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1745 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1746 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1747 } 1748 return 0; 1749 } 1750 1751 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1752 { 1753 u32 m, i; 1754 1755 i = (reg >> 7); 1756 if (i >= ARRAY_SIZE(r600_reg_safe_bm)) { 1757 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1758 return false; 1759 } 1760 m = 1 << ((reg >> 2) & 31); 1761 if (!(r600_reg_safe_bm[i] & m)) 1762 return true; 1763 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1764 return false; 1765 } 1766 1767 static int r600_packet3_check(struct radeon_cs_parser *p, 1768 struct radeon_cs_packet *pkt) 1769 { 1770 struct radeon_cs_reloc *reloc; 1771 struct r600_cs_track *track; 1772 volatile u32 *ib; 1773 unsigned idx; 1774 unsigned i; 1775 unsigned start_reg, end_reg, reg; 1776 int r; 1777 u32 idx_value; 1778 1779 track = (struct r600_cs_track *)p->track; 1780 ib = p->ib.ptr; 1781 idx = pkt->idx + 1; 1782 idx_value = radeon_get_ib_value(p, idx); 1783 1784 switch (pkt->opcode) { 1785 case PACKET3_SET_PREDICATION: 1786 { 1787 int pred_op; 1788 int tmp; 1789 uint64_t offset; 1790 1791 if (pkt->count != 1) { 1792 DRM_ERROR("bad SET PREDICATION\n"); 1793 return -EINVAL; 1794 } 1795 1796 tmp = radeon_get_ib_value(p, idx + 1); 1797 pred_op = (tmp >> 16) & 0x7; 1798 1799 /* for the clear predicate operation */ 1800 if (pred_op == 0) 1801 return 0; 1802 1803 if (pred_op > 2) { 1804 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1805 return -EINVAL; 1806 } 1807 1808 r = r600_cs_packet_next_reloc(p, &reloc); 1809 if (r) { 1810 DRM_ERROR("bad SET PREDICATION\n"); 1811 return -EINVAL; 1812 } 1813 1814 offset = reloc->lobj.gpu_offset + 1815 (idx_value & 0xfffffff0) + 1816 ((u64)(tmp & 0xff) << 32); 1817 1818 ib[idx + 0] = offset; 1819 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1820 } 1821 break; 1822 1823 case PACKET3_START_3D_CMDBUF: 1824 if (p->family >= CHIP_RV770 || pkt->count) { 1825 DRM_ERROR("bad START_3D\n"); 1826 return -EINVAL; 1827 } 1828 break; 1829 case PACKET3_CONTEXT_CONTROL: 1830 if (pkt->count != 1) { 1831 DRM_ERROR("bad CONTEXT_CONTROL\n"); 1832 return -EINVAL; 1833 } 1834 break; 1835 case PACKET3_INDEX_TYPE: 1836 case PACKET3_NUM_INSTANCES: 1837 if (pkt->count) { 1838 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1839 return -EINVAL; 1840 } 1841 break; 1842 case PACKET3_DRAW_INDEX: 1843 { 1844 uint64_t offset; 1845 if (pkt->count != 3) { 1846 DRM_ERROR("bad DRAW_INDEX\n"); 1847 return -EINVAL; 1848 } 1849 r = r600_cs_packet_next_reloc(p, &reloc); 1850 if (r) { 1851 DRM_ERROR("bad DRAW_INDEX\n"); 1852 return -EINVAL; 1853 } 1854 1855 offset = reloc->lobj.gpu_offset + 1856 idx_value + 1857 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1858 1859 ib[idx+0] = offset; 1860 ib[idx+1] = upper_32_bits(offset) & 0xff; 1861 1862 r = r600_cs_track_check(p); 1863 if (r) { 1864 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1865 return r; 1866 } 1867 break; 1868 } 1869 case PACKET3_DRAW_INDEX_AUTO: 1870 if (pkt->count != 1) { 1871 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1872 return -EINVAL; 1873 } 1874 r = r600_cs_track_check(p); 1875 if (r) { 1876 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1877 return r; 1878 } 1879 break; 1880 case PACKET3_DRAW_INDEX_IMMD_BE: 1881 case PACKET3_DRAW_INDEX_IMMD: 1882 if (pkt->count < 2) { 1883 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1884 return -EINVAL; 1885 } 1886 r = r600_cs_track_check(p); 1887 if (r) { 1888 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1889 return r; 1890 } 1891 break; 1892 case PACKET3_WAIT_REG_MEM: 1893 if (pkt->count != 5) { 1894 DRM_ERROR("bad WAIT_REG_MEM\n"); 1895 return -EINVAL; 1896 } 1897 /* bit 4 is reg (0) or mem (1) */ 1898 if (idx_value & 0x10) { 1899 uint64_t offset; 1900 1901 r = r600_cs_packet_next_reloc(p, &reloc); 1902 if (r) { 1903 DRM_ERROR("bad WAIT_REG_MEM\n"); 1904 return -EINVAL; 1905 } 1906 1907 offset = reloc->lobj.gpu_offset + 1908 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1909 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1910 1911 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1912 ib[idx+2] = upper_32_bits(offset) & 0xff; 1913 } 1914 break; 1915 case PACKET3_CP_DMA: 1916 { 1917 u32 command, size; 1918 u64 offset, tmp; 1919 if (pkt->count != 4) { 1920 DRM_ERROR("bad CP DMA\n"); 1921 return -EINVAL; 1922 } 1923 command = radeon_get_ib_value(p, idx+4); 1924 size = command & 0x1fffff; 1925 if (command & PACKET3_CP_DMA_CMD_SAS) { 1926 /* src address space is register */ 1927 DRM_ERROR("CP DMA SAS not supported\n"); 1928 return -EINVAL; 1929 } else { 1930 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1931 DRM_ERROR("CP DMA SAIC only supported for registers\n"); 1932 return -EINVAL; 1933 } 1934 /* src address space is memory */ 1935 r = r600_cs_packet_next_reloc(p, &reloc); 1936 if (r) { 1937 DRM_ERROR("bad CP DMA SRC\n"); 1938 return -EINVAL; 1939 } 1940 1941 tmp = radeon_get_ib_value(p, idx) + 1942 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1943 1944 offset = reloc->lobj.gpu_offset + tmp; 1945 1946 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1947 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", 1948 tmp + size, radeon_bo_size(reloc->robj)); 1949 return -EINVAL; 1950 } 1951 1952 ib[idx] = offset; 1953 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1954 } 1955 if (command & PACKET3_CP_DMA_CMD_DAS) { 1956 /* dst address space is register */ 1957 DRM_ERROR("CP DMA DAS not supported\n"); 1958 return -EINVAL; 1959 } else { 1960 /* dst address space is memory */ 1961 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1962 DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1963 return -EINVAL; 1964 } 1965 r = r600_cs_packet_next_reloc(p, &reloc); 1966 if (r) { 1967 DRM_ERROR("bad CP DMA DST\n"); 1968 return -EINVAL; 1969 } 1970 1971 tmp = radeon_get_ib_value(p, idx+2) + 1972 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1973 1974 offset = reloc->lobj.gpu_offset + tmp; 1975 1976 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1977 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", 1978 tmp + size, radeon_bo_size(reloc->robj)); 1979 return -EINVAL; 1980 } 1981 1982 ib[idx+2] = offset; 1983 ib[idx+3] = upper_32_bits(offset) & 0xff; 1984 } 1985 break; 1986 } 1987 case PACKET3_SURFACE_SYNC: 1988 if (pkt->count != 3) { 1989 DRM_ERROR("bad SURFACE_SYNC\n"); 1990 return -EINVAL; 1991 } 1992 /* 0xffffffff/0x0 is flush all cache flag */ 1993 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1994 radeon_get_ib_value(p, idx + 2) != 0) { 1995 r = r600_cs_packet_next_reloc(p, &reloc); 1996 if (r) { 1997 DRM_ERROR("bad SURFACE_SYNC\n"); 1998 return -EINVAL; 1999 } 2000 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2001 } 2002 break; 2003 case PACKET3_EVENT_WRITE: 2004 if (pkt->count != 2 && pkt->count != 0) { 2005 DRM_ERROR("bad EVENT_WRITE\n"); 2006 return -EINVAL; 2007 } 2008 if (pkt->count) { 2009 uint64_t offset; 2010 2011 r = r600_cs_packet_next_reloc(p, &reloc); 2012 if (r) { 2013 DRM_ERROR("bad EVENT_WRITE\n"); 2014 return -EINVAL; 2015 } 2016 offset = reloc->lobj.gpu_offset + 2017 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 2018 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2019 2020 ib[idx+1] = offset & 0xfffffff8; 2021 ib[idx+2] = upper_32_bits(offset) & 0xff; 2022 } 2023 break; 2024 case PACKET3_EVENT_WRITE_EOP: 2025 { 2026 uint64_t offset; 2027 2028 if (pkt->count != 4) { 2029 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2030 return -EINVAL; 2031 } 2032 r = r600_cs_packet_next_reloc(p, &reloc); 2033 if (r) { 2034 DRM_ERROR("bad EVENT_WRITE\n"); 2035 return -EINVAL; 2036 } 2037 2038 offset = reloc->lobj.gpu_offset + 2039 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2040 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2041 2042 ib[idx+1] = offset & 0xfffffffc; 2043 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 2044 break; 2045 } 2046 case PACKET3_SET_CONFIG_REG: 2047 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 2048 end_reg = 4 * pkt->count + start_reg - 4; 2049 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 2050 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 2051 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 2052 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2053 return -EINVAL; 2054 } 2055 for (i = 0; i < pkt->count; i++) { 2056 reg = start_reg + (4 * i); 2057 r = r600_cs_check_reg(p, reg, idx+1+i); 2058 if (r) 2059 return r; 2060 } 2061 break; 2062 case PACKET3_SET_CONTEXT_REG: 2063 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 2064 end_reg = 4 * pkt->count + start_reg - 4; 2065 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 2066 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 2067 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 2068 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2069 return -EINVAL; 2070 } 2071 for (i = 0; i < pkt->count; i++) { 2072 reg = start_reg + (4 * i); 2073 r = r600_cs_check_reg(p, reg, idx+1+i); 2074 if (r) 2075 return r; 2076 } 2077 break; 2078 case PACKET3_SET_RESOURCE: 2079 if (pkt->count % 7) { 2080 DRM_ERROR("bad SET_RESOURCE\n"); 2081 return -EINVAL; 2082 } 2083 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 2084 end_reg = 4 * pkt->count + start_reg - 4; 2085 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 2086 (start_reg >= PACKET3_SET_RESOURCE_END) || 2087 (end_reg >= PACKET3_SET_RESOURCE_END)) { 2088 DRM_ERROR("bad SET_RESOURCE\n"); 2089 return -EINVAL; 2090 } 2091 for (i = 0; i < (pkt->count / 7); i++) { 2092 struct radeon_bo *texture, *mipmap; 2093 u32 size, offset, base_offset, mip_offset; 2094 2095 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 2096 case SQ_TEX_VTX_VALID_TEXTURE: 2097 /* tex base */ 2098 r = r600_cs_packet_next_reloc(p, &reloc); 2099 if (r) { 2100 DRM_ERROR("bad SET_RESOURCE\n"); 2101 return -EINVAL; 2102 } 2103 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2104 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 2105 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 2106 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 2107 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 2108 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 2109 } 2110 texture = reloc->robj; 2111 /* tex mip base */ 2112 r = r600_cs_packet_next_reloc(p, &reloc); 2113 if (r) { 2114 DRM_ERROR("bad SET_RESOURCE\n"); 2115 return -EINVAL; 2116 } 2117 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2118 mipmap = reloc->robj; 2119 r = r600_check_texture_resource(p, idx+(i*7)+1, 2120 texture, mipmap, 2121 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 2122 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 2123 reloc->lobj.tiling_flags); 2124 if (r) 2125 return r; 2126 ib[idx+1+(i*7)+2] += base_offset; 2127 ib[idx+1+(i*7)+3] += mip_offset; 2128 break; 2129 case SQ_TEX_VTX_VALID_BUFFER: 2130 { 2131 uint64_t offset64; 2132 /* vtx base */ 2133 r = r600_cs_packet_next_reloc(p, &reloc); 2134 if (r) { 2135 DRM_ERROR("bad SET_RESOURCE\n"); 2136 return -EINVAL; 2137 } 2138 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2139 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2140 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2141 /* force size to size of the buffer */ 2142 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2143 size + offset, radeon_bo_size(reloc->robj)); 2144 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2145 } 2146 2147 offset64 = reloc->lobj.gpu_offset + offset; 2148 ib[idx+1+(i*8)+0] = offset64; 2149 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2150 (upper_32_bits(offset64) & 0xff); 2151 break; 2152 } 2153 case SQ_TEX_VTX_INVALID_TEXTURE: 2154 case SQ_TEX_VTX_INVALID_BUFFER: 2155 default: 2156 DRM_ERROR("bad SET_RESOURCE\n"); 2157 return -EINVAL; 2158 } 2159 } 2160 break; 2161 case PACKET3_SET_ALU_CONST: 2162 if (track->sq_config & DX9_CONSTS) { 2163 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 2164 end_reg = 4 * pkt->count + start_reg - 4; 2165 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2166 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2167 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2168 DRM_ERROR("bad SET_ALU_CONST\n"); 2169 return -EINVAL; 2170 } 2171 } 2172 break; 2173 case PACKET3_SET_BOOL_CONST: 2174 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 2175 end_reg = 4 * pkt->count + start_reg - 4; 2176 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2177 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2178 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2179 DRM_ERROR("bad SET_BOOL_CONST\n"); 2180 return -EINVAL; 2181 } 2182 break; 2183 case PACKET3_SET_LOOP_CONST: 2184 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 2185 end_reg = 4 * pkt->count + start_reg - 4; 2186 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2187 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2188 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2189 DRM_ERROR("bad SET_LOOP_CONST\n"); 2190 return -EINVAL; 2191 } 2192 break; 2193 case PACKET3_SET_CTL_CONST: 2194 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 2195 end_reg = 4 * pkt->count + start_reg - 4; 2196 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2197 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2198 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2199 DRM_ERROR("bad SET_CTL_CONST\n"); 2200 return -EINVAL; 2201 } 2202 break; 2203 case PACKET3_SET_SAMPLER: 2204 if (pkt->count % 3) { 2205 DRM_ERROR("bad SET_SAMPLER\n"); 2206 return -EINVAL; 2207 } 2208 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 2209 end_reg = 4 * pkt->count + start_reg - 4; 2210 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2211 (start_reg >= PACKET3_SET_SAMPLER_END) || 2212 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2213 DRM_ERROR("bad SET_SAMPLER\n"); 2214 return -EINVAL; 2215 } 2216 break; 2217 case PACKET3_STRMOUT_BASE_UPDATE: 2218 /* RS780 and RS880 also need this */ 2219 if (p->family < CHIP_RS780) { 2220 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2221 return -EINVAL; 2222 } 2223 if (pkt->count != 1) { 2224 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); 2225 return -EINVAL; 2226 } 2227 if (idx_value > 3) { 2228 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); 2229 return -EINVAL; 2230 } 2231 { 2232 u64 offset; 2233 2234 r = r600_cs_packet_next_reloc(p, &reloc); 2235 if (r) { 2236 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2237 return -EINVAL; 2238 } 2239 2240 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2241 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2242 return -EINVAL; 2243 } 2244 2245 offset = radeon_get_ib_value(p, idx+1) << 8; 2246 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2247 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", 2248 offset, track->vgt_strmout_bo_offset[idx_value]); 2249 return -EINVAL; 2250 } 2251 2252 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2253 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n", 2254 offset + 4, radeon_bo_size(reloc->robj)); 2255 return -EINVAL; 2256 } 2257 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2258 } 2259 break; 2260 case PACKET3_SURFACE_BASE_UPDATE: 2261 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2262 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2263 return -EINVAL; 2264 } 2265 if (pkt->count) { 2266 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2267 return -EINVAL; 2268 } 2269 break; 2270 case PACKET3_STRMOUT_BUFFER_UPDATE: 2271 if (pkt->count != 4) { 2272 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2273 return -EINVAL; 2274 } 2275 /* Updating memory at DST_ADDRESS. */ 2276 if (idx_value & 0x1) { 2277 u64 offset; 2278 r = r600_cs_packet_next_reloc(p, &reloc); 2279 if (r) { 2280 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2281 return -EINVAL; 2282 } 2283 offset = radeon_get_ib_value(p, idx+1); 2284 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2285 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2286 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", 2287 offset + 4, radeon_bo_size(reloc->robj)); 2288 return -EINVAL; 2289 } 2290 offset += reloc->lobj.gpu_offset; 2291 ib[idx+1] = offset; 2292 ib[idx+2] = upper_32_bits(offset) & 0xff; 2293 } 2294 /* Reading data from SRC_ADDRESS. */ 2295 if (((idx_value >> 1) & 0x3) == 2) { 2296 u64 offset; 2297 r = r600_cs_packet_next_reloc(p, &reloc); 2298 if (r) { 2299 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2300 return -EINVAL; 2301 } 2302 offset = radeon_get_ib_value(p, idx+3); 2303 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2304 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2305 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", 2306 offset + 4, radeon_bo_size(reloc->robj)); 2307 return -EINVAL; 2308 } 2309 offset += reloc->lobj.gpu_offset; 2310 ib[idx+3] = offset; 2311 ib[idx+4] = upper_32_bits(offset) & 0xff; 2312 } 2313 break; 2314 case PACKET3_MEM_WRITE: 2315 { 2316 u64 offset; 2317 2318 if (pkt->count != 3) { 2319 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2320 return -EINVAL; 2321 } 2322 r = r600_cs_packet_next_reloc(p, &reloc); 2323 if (r) { 2324 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2325 return -EINVAL; 2326 } 2327 offset = radeon_get_ib_value(p, idx+0); 2328 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2329 if (offset & 0x7) { 2330 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2331 return -EINVAL; 2332 } 2333 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2334 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", 2335 offset + 8, radeon_bo_size(reloc->robj)); 2336 return -EINVAL; 2337 } 2338 offset += reloc->lobj.gpu_offset; 2339 ib[idx+0] = offset; 2340 ib[idx+1] = upper_32_bits(offset) & 0xff; 2341 break; 2342 } 2343 case PACKET3_COPY_DW: 2344 if (pkt->count != 4) { 2345 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2346 return -EINVAL; 2347 } 2348 if (idx_value & 0x1) { 2349 u64 offset; 2350 /* SRC is memory. */ 2351 r = r600_cs_packet_next_reloc(p, &reloc); 2352 if (r) { 2353 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2354 return -EINVAL; 2355 } 2356 offset = radeon_get_ib_value(p, idx+1); 2357 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2358 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2359 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", 2360 offset + 4, radeon_bo_size(reloc->robj)); 2361 return -EINVAL; 2362 } 2363 offset += reloc->lobj.gpu_offset; 2364 ib[idx+1] = offset; 2365 ib[idx+2] = upper_32_bits(offset) & 0xff; 2366 } else { 2367 /* SRC is a reg. */ 2368 reg = radeon_get_ib_value(p, idx+1) << 2; 2369 if (!r600_is_safe_reg(p, reg, idx+1)) 2370 return -EINVAL; 2371 } 2372 if (idx_value & 0x2) { 2373 u64 offset; 2374 /* DST is memory. */ 2375 r = r600_cs_packet_next_reloc(p, &reloc); 2376 if (r) { 2377 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2378 return -EINVAL; 2379 } 2380 offset = radeon_get_ib_value(p, idx+3); 2381 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2382 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2383 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", 2384 offset + 4, radeon_bo_size(reloc->robj)); 2385 return -EINVAL; 2386 } 2387 offset += reloc->lobj.gpu_offset; 2388 ib[idx+3] = offset; 2389 ib[idx+4] = upper_32_bits(offset) & 0xff; 2390 } else { 2391 /* DST is a reg. */ 2392 reg = radeon_get_ib_value(p, idx+3) << 2; 2393 if (!r600_is_safe_reg(p, reg, idx+3)) 2394 return -EINVAL; 2395 } 2396 break; 2397 case PACKET3_NOP: 2398 break; 2399 default: 2400 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2401 return -EINVAL; 2402 } 2403 return 0; 2404 } 2405 2406 int r600_cs_parse(struct radeon_cs_parser *p) 2407 { 2408 struct radeon_cs_packet pkt; 2409 struct r600_cs_track *track; 2410 int r; 2411 2412 if (p->track == NULL) { 2413 /* initialize tracker, we are in kms */ 2414 track = kzalloc(sizeof(*track), GFP_KERNEL); 2415 if (track == NULL) 2416 return -ENOMEM; 2417 r600_cs_track_init(track); 2418 if (p->rdev->family < CHIP_RV770) { 2419 track->npipes = p->rdev->config.r600.tiling_npipes; 2420 track->nbanks = p->rdev->config.r600.tiling_nbanks; 2421 track->group_size = p->rdev->config.r600.tiling_group_size; 2422 } else if (p->rdev->family <= CHIP_RV740) { 2423 track->npipes = p->rdev->config.rv770.tiling_npipes; 2424 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 2425 track->group_size = p->rdev->config.rv770.tiling_group_size; 2426 } 2427 p->track = track; 2428 } 2429 do { 2430 r = r600_cs_packet_parse(p, &pkt, p->idx); 2431 if (r) { 2432 kfree(p->track); 2433 p->track = NULL; 2434 return r; 2435 } 2436 p->idx += pkt.count + 2; 2437 switch (pkt.type) { 2438 case PACKET_TYPE0: 2439 r = r600_cs_parse_packet0(p, &pkt); 2440 break; 2441 case PACKET_TYPE2: 2442 break; 2443 case PACKET_TYPE3: 2444 r = r600_packet3_check(p, &pkt); 2445 break; 2446 default: 2447 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2448 kfree(p->track); 2449 p->track = NULL; 2450 return -EINVAL; 2451 } 2452 if (r) { 2453 kfree(p->track); 2454 p->track = NULL; 2455 return r; 2456 } 2457 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2458 #if 0 2459 for (r = 0; r < p->ib.length_dw; r++) { 2460 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2461 mdelay(1); 2462 } 2463 #endif 2464 kfree(p->track); 2465 p->track = NULL; 2466 return 0; 2467 } 2468 2469 /* don't these need UMS functions */ 2470 #if 0 2471 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2472 { 2473 if (p->chunk_relocs_idx == -1) { 2474 return 0; 2475 } 2476 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); 2477 if (p->relocs == NULL) { 2478 return -ENOMEM; 2479 } 2480 return 0; 2481 } 2482 2483 /** 2484 * cs_parser_fini() - clean parser states 2485 * @parser: parser structure holding parsing context. 2486 * @error: error number 2487 * 2488 * If error is set than unvalidate buffer, otherwise just free memory 2489 * used by parsing context. 2490 **/ 2491 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) 2492 { 2493 unsigned i; 2494 2495 kfree(parser->relocs); 2496 for (i = 0; i < parser->nchunks; i++) { 2497 kfree(parser->chunks[i].kdata); 2498 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { 2499 kfree(parser->chunks[i].kpage[0]); 2500 kfree(parser->chunks[i].kpage[1]); 2501 } 2502 } 2503 kfree(parser->chunks); 2504 kfree(parser->chunks_array); 2505 } 2506 2507 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 2508 unsigned family, u32 *ib, int *l) 2509 { 2510 struct radeon_cs_parser parser; 2511 struct radeon_cs_chunk *ib_chunk; 2512 struct r600_cs_track *track; 2513 int r; 2514 2515 /* initialize tracker */ 2516 track = kzalloc(sizeof(*track), GFP_KERNEL); 2517 if (track == NULL) 2518 return -ENOMEM; 2519 r600_cs_track_init(track); 2520 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); 2521 /* initialize parser */ 2522 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 2523 parser.filp = filp; 2524 #ifdef notyet 2525 parser.dev = dev; 2526 #endif 2527 parser.rdev = NULL; 2528 parser.family = family; 2529 parser.track = track; 2530 parser.ib.ptr = ib; 2531 r = radeon_cs_parser_init(&parser, data); 2532 if (r) { 2533 DRM_ERROR("Failed to initialize parser !\n"); 2534 r600_cs_parser_fini(&parser, r); 2535 return r; 2536 } 2537 r = r600_cs_parser_relocs_legacy(&parser); 2538 if (r) { 2539 DRM_ERROR("Failed to parse relocation !\n"); 2540 r600_cs_parser_fini(&parser, r); 2541 return r; 2542 } 2543 /* Copy the packet into the IB, the parser will read from the 2544 * input memory (cached) and write to the IB (which can be 2545 * uncached). */ 2546 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2547 parser.ib.length_dw = ib_chunk->length_dw; 2548 *l = parser.ib.length_dw; 2549 r = r600_cs_parse(&parser); 2550 if (r) { 2551 DRM_ERROR("Invalid command stream !\n"); 2552 r600_cs_parser_fini(&parser, r); 2553 return r; 2554 } 2555 r = radeon_cs_finish_pages(&parser); 2556 if (r) { 2557 DRM_ERROR("Invalid command stream !\n"); 2558 r600_cs_parser_fini(&parser, r); 2559 return r; 2560 } 2561 r600_cs_parser_fini(&parser, r); 2562 return r; 2563 } 2564 #endif 2565 2566 void r600_cs_legacy_init(void) 2567 { 2568 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2569 } 2570 2571 /* 2572 * DMA 2573 */ 2574 /** 2575 * r600_dma_cs_next_reloc() - parse next reloc 2576 * @p: parser structure holding parsing context. 2577 * @cs_reloc: reloc informations 2578 * 2579 * Return the next reloc, do bo validation and compute 2580 * GPU offset using the provided start. 2581 **/ 2582 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2583 struct radeon_cs_reloc **cs_reloc) 2584 { 2585 struct radeon_cs_chunk *relocs_chunk; 2586 unsigned idx; 2587 2588 *cs_reloc = NULL; 2589 if (p->chunk_relocs_idx == -1) { 2590 DRM_ERROR("No relocation chunk !\n"); 2591 return -EINVAL; 2592 } 2593 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2594 idx = p->dma_reloc_idx; 2595 if (idx >= p->nrelocs) { 2596 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2597 idx, p->nrelocs); 2598 return -EINVAL; 2599 } 2600 *cs_reloc = p->relocs_ptr[idx]; 2601 p->dma_reloc_idx++; 2602 return 0; 2603 } 2604 2605 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) 2606 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) 2607 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) 2608 2609 /** 2610 * r600_dma_cs_parse() - parse the DMA IB 2611 * @p: parser structure holding parsing context. 2612 * 2613 * Parses the DMA IB from the CS ioctl and updates 2614 * the GPU addresses based on the reloc information and 2615 * checks for errors. (R6xx-R7xx) 2616 * Returns 0 for success and an error on failure. 2617 **/ 2618 int r600_dma_cs_parse(struct radeon_cs_parser *p) 2619 { 2620 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2621 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2622 u32 header, cmd, count, tiled; 2623 volatile u32 *ib = p->ib.ptr; 2624 u32 idx, idx_value; 2625 u64 src_offset, dst_offset; 2626 int r; 2627 2628 do { 2629 if (p->idx >= ib_chunk->length_dw) { 2630 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2631 p->idx, ib_chunk->length_dw); 2632 return -EINVAL; 2633 } 2634 idx = p->idx; 2635 header = radeon_get_ib_value(p, idx); 2636 cmd = GET_DMA_CMD(header); 2637 count = GET_DMA_COUNT(header); 2638 tiled = GET_DMA_T(header); 2639 2640 switch (cmd) { 2641 case DMA_PACKET_WRITE: 2642 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2643 if (r) { 2644 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2645 return -EINVAL; 2646 } 2647 if (tiled) { 2648 dst_offset = radeon_get_ib_value(p, idx+1); 2649 dst_offset <<= 8; 2650 2651 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2652 p->idx += count + 5; 2653 } else { 2654 dst_offset = radeon_get_ib_value(p, idx+1); 2655 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2656 2657 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2658 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2659 p->idx += count + 3; 2660 } 2661 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2662 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", 2663 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2664 return -EINVAL; 2665 } 2666 break; 2667 case DMA_PACKET_COPY: 2668 r = r600_dma_cs_next_reloc(p, &src_reloc); 2669 if (r) { 2670 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2671 return -EINVAL; 2672 } 2673 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2674 if (r) { 2675 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2676 return -EINVAL; 2677 } 2678 if (tiled) { 2679 idx_value = radeon_get_ib_value(p, idx + 2); 2680 /* detile bit */ 2681 if (idx_value & (1 << 31)) { 2682 /* tiled src, linear dst */ 2683 src_offset = radeon_get_ib_value(p, idx+1); 2684 src_offset <<= 8; 2685 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2686 2687 dst_offset = radeon_get_ib_value(p, idx+5); 2688 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2689 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2690 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2691 } else { 2692 /* linear src, tiled dst */ 2693 src_offset = radeon_get_ib_value(p, idx+5); 2694 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2695 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2696 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2697 2698 dst_offset = radeon_get_ib_value(p, idx+1); 2699 dst_offset <<= 8; 2700 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2701 } 2702 p->idx += 7; 2703 } else { 2704 if (p->family >= CHIP_RV770) { 2705 src_offset = radeon_get_ib_value(p, idx+2); 2706 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2707 dst_offset = radeon_get_ib_value(p, idx+1); 2708 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2709 2710 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2711 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2712 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2713 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2714 p->idx += 5; 2715 } else { 2716 src_offset = radeon_get_ib_value(p, idx+2); 2717 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2718 dst_offset = radeon_get_ib_value(p, idx+1); 2719 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2720 2721 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2722 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2723 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2724 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; 2725 p->idx += 4; 2726 } 2727 } 2728 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2729 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n", 2730 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2731 return -EINVAL; 2732 } 2733 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2734 dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n", 2735 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2736 return -EINVAL; 2737 } 2738 break; 2739 case DMA_PACKET_CONSTANT_FILL: 2740 if (p->family < CHIP_RV770) { 2741 DRM_ERROR("Constant Fill is 7xx only !\n"); 2742 return -EINVAL; 2743 } 2744 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2745 if (r) { 2746 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2747 return -EINVAL; 2748 } 2749 dst_offset = radeon_get_ib_value(p, idx+1); 2750 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2751 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2752 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", 2753 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2754 return -EINVAL; 2755 } 2756 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2757 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 2758 p->idx += 4; 2759 break; 2760 case DMA_PACKET_NOP: 2761 p->idx += 1; 2762 break; 2763 default: 2764 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2765 return -EINVAL; 2766 } 2767 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2768 #if 0 2769 for (r = 0; r < p->ib->length_dw; r++) { 2770 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2771 mdelay(1); 2772 } 2773 #endif 2774 return 0; 2775 } 2776