1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 * 28 * $FreeBSD: head/sys/dev/drm2/radeon/r600_cs.c 254885 2013-08-25 19:37:15Z dumbbell $ 29 */ 30 31 #include <drm/drmP.h> 32 #include "radeon.h" 33 #include "radeon_asic.h" 34 #include "r600d.h" 35 #include "r600_reg_safe.h" 36 #include "r600_cp.h" 37 #include "r600_cs.h" 38 39 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 40 struct radeon_cs_reloc **cs_reloc); 41 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 42 struct radeon_cs_reloc **cs_reloc); 43 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); 44 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; 45 46 47 struct r600_cs_track { 48 /* configuration we miror so that we use same code btw kms/ums */ 49 u32 group_size; 50 u32 nbanks; 51 u32 npipes; 52 /* value we track */ 53 u32 sq_config; 54 u32 log_nsamples; 55 u32 nsamples; 56 u32 cb_color_base_last[8]; 57 struct radeon_bo *cb_color_bo[8]; 58 u64 cb_color_bo_mc[8]; 59 u64 cb_color_bo_offset[8]; 60 struct radeon_bo *cb_color_frag_bo[8]; 61 u64 cb_color_frag_offset[8]; 62 struct radeon_bo *cb_color_tile_bo[8]; 63 u64 cb_color_tile_offset[8]; 64 u32 cb_color_mask[8]; 65 u32 cb_color_info[8]; 66 u32 cb_color_view[8]; 67 u32 cb_color_size_idx[8]; /* unused */ 68 u32 cb_target_mask; 69 u32 cb_shader_mask; /* unused */ 70 bool is_resolve; 71 u32 cb_color_size[8]; 72 u32 vgt_strmout_en; 73 u32 vgt_strmout_buffer_en; 74 struct radeon_bo *vgt_strmout_bo[4]; 75 u64 vgt_strmout_bo_mc[4]; /* unused */ 76 u32 vgt_strmout_bo_offset[4]; 77 u32 vgt_strmout_size[4]; 78 u32 db_depth_control; 79 u32 db_depth_info; 80 u32 db_depth_size_idx; 81 u32 db_depth_view; 82 u32 db_depth_size; 83 u32 db_offset; 84 struct radeon_bo *db_bo; 85 u64 db_bo_mc; 86 bool sx_misc_kill_all_prims; 87 bool cb_dirty; 88 bool db_dirty; 89 bool streamout_dirty; 90 struct radeon_bo *htile_bo; 91 u64 htile_offset; 92 u32 htile_surface; 93 }; 94 95 #define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 } 96 #define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 } 97 #define FMT_24_BIT(fmt) [fmt] = { 1, 1, 4, 0, CHIP_R600 } 98 #define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 } 99 #define FMT_48_BIT(fmt) [fmt] = { 1, 1, 8, 0, CHIP_R600 } 100 #define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 } 101 #define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 } 102 #define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 } 103 104 struct gpu_formats { 105 unsigned blockwidth; 106 unsigned blockheight; 107 unsigned blocksize; 108 unsigned valid_color; 109 enum radeon_family min_family; 110 }; 111 112 static const struct gpu_formats color_formats_table[] = { 113 /* 8 bit */ 114 FMT_8_BIT(V_038004_COLOR_8, 1), 115 FMT_8_BIT(V_038004_COLOR_4_4, 1), 116 FMT_8_BIT(V_038004_COLOR_3_3_2, 1), 117 FMT_8_BIT(V_038004_FMT_1, 0), 118 119 /* 16-bit */ 120 FMT_16_BIT(V_038004_COLOR_16, 1), 121 FMT_16_BIT(V_038004_COLOR_16_FLOAT, 1), 122 FMT_16_BIT(V_038004_COLOR_8_8, 1), 123 FMT_16_BIT(V_038004_COLOR_5_6_5, 1), 124 FMT_16_BIT(V_038004_COLOR_6_5_5, 1), 125 FMT_16_BIT(V_038004_COLOR_1_5_5_5, 1), 126 FMT_16_BIT(V_038004_COLOR_4_4_4_4, 1), 127 FMT_16_BIT(V_038004_COLOR_5_5_5_1, 1), 128 129 /* 24-bit */ 130 FMT_24_BIT(V_038004_FMT_8_8_8), 131 132 /* 32-bit */ 133 FMT_32_BIT(V_038004_COLOR_32, 1), 134 FMT_32_BIT(V_038004_COLOR_32_FLOAT, 1), 135 FMT_32_BIT(V_038004_COLOR_16_16, 1), 136 FMT_32_BIT(V_038004_COLOR_16_16_FLOAT, 1), 137 FMT_32_BIT(V_038004_COLOR_8_24, 1), 138 FMT_32_BIT(V_038004_COLOR_8_24_FLOAT, 1), 139 FMT_32_BIT(V_038004_COLOR_24_8, 1), 140 FMT_32_BIT(V_038004_COLOR_24_8_FLOAT, 1), 141 FMT_32_BIT(V_038004_COLOR_10_11_11, 1), 142 FMT_32_BIT(V_038004_COLOR_10_11_11_FLOAT, 1), 143 FMT_32_BIT(V_038004_COLOR_11_11_10, 1), 144 FMT_32_BIT(V_038004_COLOR_11_11_10_FLOAT, 1), 145 FMT_32_BIT(V_038004_COLOR_2_10_10_10, 1), 146 FMT_32_BIT(V_038004_COLOR_8_8_8_8, 1), 147 FMT_32_BIT(V_038004_COLOR_10_10_10_2, 1), 148 FMT_32_BIT(V_038004_FMT_5_9_9_9_SHAREDEXP, 0), 149 FMT_32_BIT(V_038004_FMT_32_AS_8, 0), 150 FMT_32_BIT(V_038004_FMT_32_AS_8_8, 0), 151 152 /* 48-bit */ 153 FMT_48_BIT(V_038004_FMT_16_16_16), 154 FMT_48_BIT(V_038004_FMT_16_16_16_FLOAT), 155 156 /* 64-bit */ 157 FMT_64_BIT(V_038004_COLOR_X24_8_32_FLOAT, 1), 158 FMT_64_BIT(V_038004_COLOR_32_32, 1), 159 FMT_64_BIT(V_038004_COLOR_32_32_FLOAT, 1), 160 FMT_64_BIT(V_038004_COLOR_16_16_16_16, 1), 161 FMT_64_BIT(V_038004_COLOR_16_16_16_16_FLOAT, 1), 162 163 FMT_96_BIT(V_038004_FMT_32_32_32), 164 FMT_96_BIT(V_038004_FMT_32_32_32_FLOAT), 165 166 /* 128-bit */ 167 FMT_128_BIT(V_038004_COLOR_32_32_32_32, 1), 168 FMT_128_BIT(V_038004_COLOR_32_32_32_32_FLOAT, 1), 169 170 [V_038004_FMT_GB_GR] = { 2, 1, 4, 0 }, 171 [V_038004_FMT_BG_RG] = { 2, 1, 4, 0 }, 172 173 /* block compressed formats */ 174 [V_038004_FMT_BC1] = { 4, 4, 8, 0 }, 175 [V_038004_FMT_BC2] = { 4, 4, 16, 0 }, 176 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 177 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 178 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 179 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 180 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */ 181 182 /* The other Evergreen formats */ 183 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR}, 184 }; 185 186 bool r600_fmt_is_valid_color(u32 format) 187 { 188 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 189 return false; 190 191 if (color_formats_table[format].valid_color) 192 return true; 193 194 return false; 195 } 196 197 bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family) 198 { 199 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 200 return false; 201 202 if (family < color_formats_table[format].min_family) 203 return false; 204 205 if (color_formats_table[format].blockwidth > 0) 206 return true; 207 208 return false; 209 } 210 211 int r600_fmt_get_blocksize(u32 format) 212 { 213 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 214 return 0; 215 216 return color_formats_table[format].blocksize; 217 } 218 219 int r600_fmt_get_nblocksx(u32 format, u32 w) 220 { 221 unsigned bw; 222 223 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 224 return 0; 225 226 bw = color_formats_table[format].blockwidth; 227 if (bw == 0) 228 return 0; 229 230 return (w + bw - 1) / bw; 231 } 232 233 int r600_fmt_get_nblocksy(u32 format, u32 h) 234 { 235 unsigned bh; 236 237 if (format >= DRM_ARRAY_SIZE(color_formats_table)) 238 return 0; 239 240 bh = color_formats_table[format].blockheight; 241 if (bh == 0) 242 return 0; 243 244 return (h + bh - 1) / bh; 245 } 246 247 struct array_mode_checker { 248 int array_mode; 249 u32 group_size; 250 u32 nbanks; 251 u32 npipes; 252 u32 nsamples; 253 u32 blocksize; 254 }; 255 256 /* returns alignment in pixels for pitch/height/depth and bytes for base */ 257 static int r600_get_array_mode_alignment(struct array_mode_checker *values, 258 u32 *pitch_align, 259 u32 *height_align, 260 u32 *depth_align, 261 u64 *base_align) 262 { 263 u32 tile_width = 8; 264 u32 tile_height = 8; 265 u32 macro_tile_width = values->nbanks; 266 u32 macro_tile_height = values->npipes; 267 u32 tile_bytes = tile_width * tile_height * values->blocksize * values->nsamples; 268 u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; 269 270 switch (values->array_mode) { 271 case ARRAY_LINEAR_GENERAL: 272 /* technically tile_width/_height for pitch/height */ 273 *pitch_align = 1; /* tile_width */ 274 *height_align = 1; /* tile_height */ 275 *depth_align = 1; 276 *base_align = 1; 277 break; 278 case ARRAY_LINEAR_ALIGNED: 279 *pitch_align = max((u32)64, (u32)(values->group_size / values->blocksize)); 280 *height_align = 1; 281 *depth_align = 1; 282 *base_align = values->group_size; 283 break; 284 case ARRAY_1D_TILED_THIN1: 285 *pitch_align = max((u32)tile_width, 286 (u32)(values->group_size / 287 (tile_height * values->blocksize * values->nsamples))); 288 *height_align = tile_height; 289 *depth_align = 1; 290 *base_align = values->group_size; 291 break; 292 case ARRAY_2D_TILED_THIN1: 293 *pitch_align = max((u32)macro_tile_width * tile_width, 294 (u32)((values->group_size * values->nbanks) / 295 (values->blocksize * values->nsamples * tile_width))); 296 *height_align = macro_tile_height * tile_height; 297 *depth_align = 1; 298 *base_align = max(macro_tile_bytes, 299 (*pitch_align) * values->blocksize * (*height_align) * values->nsamples); 300 break; 301 default: 302 return -EINVAL; 303 } 304 305 return 0; 306 } 307 308 static void r600_cs_track_init(struct r600_cs_track *track) 309 { 310 int i; 311 312 /* assume DX9 mode */ 313 track->sq_config = DX9_CONSTS; 314 for (i = 0; i < 8; i++) { 315 track->cb_color_base_last[i] = 0; 316 track->cb_color_size[i] = 0; 317 track->cb_color_size_idx[i] = 0; 318 track->cb_color_info[i] = 0; 319 track->cb_color_view[i] = 0xFFFFFFFF; 320 track->cb_color_bo[i] = NULL; 321 track->cb_color_bo_offset[i] = 0xFFFFFFFF; 322 track->cb_color_bo_mc[i] = 0xFFFFFFFF; 323 track->cb_color_frag_bo[i] = NULL; 324 track->cb_color_frag_offset[i] = 0xFFFFFFFF; 325 track->cb_color_tile_bo[i] = NULL; 326 track->cb_color_tile_offset[i] = 0xFFFFFFFF; 327 track->cb_color_mask[i] = 0xFFFFFFFF; 328 } 329 track->is_resolve = false; 330 track->nsamples = 16; 331 track->log_nsamples = 4; 332 track->cb_target_mask = 0xFFFFFFFF; 333 track->cb_shader_mask = 0xFFFFFFFF; 334 track->cb_dirty = true; 335 track->db_bo = NULL; 336 track->db_bo_mc = 0xFFFFFFFF; 337 /* assume the biggest format and that htile is enabled */ 338 track->db_depth_info = 7 | (1 << 25); 339 track->db_depth_view = 0xFFFFC000; 340 track->db_depth_size = 0xFFFFFFFF; 341 track->db_depth_size_idx = 0; 342 track->db_depth_control = 0xFFFFFFFF; 343 track->db_dirty = true; 344 track->htile_bo = NULL; 345 track->htile_offset = 0xFFFFFFFF; 346 track->htile_surface = 0; 347 348 for (i = 0; i < 4; i++) { 349 track->vgt_strmout_size[i] = 0; 350 track->vgt_strmout_bo[i] = NULL; 351 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF; 352 track->vgt_strmout_bo_mc[i] = 0xFFFFFFFF; 353 } 354 track->streamout_dirty = true; 355 track->sx_misc_kill_all_prims = false; 356 } 357 358 static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) 359 { 360 struct r600_cs_track *track = p->track; 361 u32 slice_tile_max, size, tmp; 362 u32 height, height_align, pitch, pitch_align, depth_align; 363 u64 base_offset, base_align; 364 struct array_mode_checker array_check; 365 volatile u32 *ib = p->ib.ptr; 366 unsigned array_mode; 367 u32 format; 368 /* When resolve is used, the second colorbuffer has always 1 sample. */ 369 unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; 370 371 size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; 372 format = G_0280A0_FORMAT(track->cb_color_info[i]); 373 if (!r600_fmt_is_valid_color(format)) { 374 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", 375 __func__, __LINE__, format, 376 i, track->cb_color_info[i]); 377 return -EINVAL; 378 } 379 /* pitch in pixels */ 380 pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; 381 slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; 382 slice_tile_max *= 64; 383 height = slice_tile_max / pitch; 384 if (height > 8192) 385 height = 8192; 386 array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); 387 388 base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; 389 array_check.array_mode = array_mode; 390 array_check.group_size = track->group_size; 391 array_check.nbanks = track->nbanks; 392 array_check.npipes = track->npipes; 393 array_check.nsamples = nsamples; 394 array_check.blocksize = r600_fmt_get_blocksize(format); 395 if (r600_get_array_mode_alignment(&array_check, 396 &pitch_align, &height_align, &depth_align, &base_align)) { 397 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 398 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 399 track->cb_color_info[i]); 400 return -EINVAL; 401 } 402 switch (array_mode) { 403 case V_0280A0_ARRAY_LINEAR_GENERAL: 404 break; 405 case V_0280A0_ARRAY_LINEAR_ALIGNED: 406 break; 407 case V_0280A0_ARRAY_1D_TILED_THIN1: 408 /* avoid breaking userspace */ 409 if (height > 7) 410 height &= ~0x7; 411 break; 412 case V_0280A0_ARRAY_2D_TILED_THIN1: 413 break; 414 default: 415 dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, 416 G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, 417 track->cb_color_info[i]); 418 return -EINVAL; 419 } 420 421 if (!IS_ALIGNED(pitch, pitch_align)) { 422 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n", 423 __func__, __LINE__, pitch, pitch_align, array_mode); 424 return -EINVAL; 425 } 426 if (!IS_ALIGNED(height, height_align)) { 427 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n", 428 __func__, __LINE__, height, height_align, array_mode); 429 return -EINVAL; 430 } 431 if (!IS_ALIGNED(base_offset, base_align)) { 432 dev_warn(p->dev, "%s offset[%d] 0x%jx 0x%jx, %d not aligned\n", __func__, i, 433 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 434 return -EINVAL; 435 } 436 437 /* check offset */ 438 tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * 439 r600_fmt_get_blocksize(format) * nsamples; 440 switch (array_mode) { 441 default: 442 case V_0280A0_ARRAY_LINEAR_GENERAL: 443 case V_0280A0_ARRAY_LINEAR_ALIGNED: 444 tmp += track->cb_color_view[i] & 0xFF; 445 break; 446 case V_0280A0_ARRAY_1D_TILED_THIN1: 447 case V_0280A0_ARRAY_2D_TILED_THIN1: 448 tmp += G_028080_SLICE_MAX(track->cb_color_view[i]) * tmp; 449 break; 450 } 451 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 452 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 453 /* the initial DDX does bad things with the CB size occasionally */ 454 /* it rounds up height too far for slice tile max but the BO is smaller */ 455 /* r600c,g also seem to flush at bad times in some apps resulting in 456 * bogus values here. So for linear just allow anything to avoid breaking 457 * broken userspace. 458 */ 459 } else { 460 dev_warn(p->dev, "%s offset[%d] %d %ju %d %lu too big (%d %d) (%d %d %d)\n", 461 __func__, i, array_mode, 462 (uintmax_t)track->cb_color_bo_offset[i], tmp, 463 radeon_bo_size(track->cb_color_bo[i]), 464 pitch, height, r600_fmt_get_nblocksx(format, pitch), 465 r600_fmt_get_nblocksy(format, height), 466 r600_fmt_get_blocksize(format)); 467 return -EINVAL; 468 } 469 } 470 /* limit max tile */ 471 tmp = (height * pitch) >> 6; 472 if (tmp < slice_tile_max) 473 slice_tile_max = tmp; 474 tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | 475 S_028060_SLICE_TILE_MAX(slice_tile_max - 1); 476 ib[track->cb_color_size_idx[i]] = tmp; 477 478 /* FMASK/CMASK */ 479 switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { 480 case V_0280A0_TILE_DISABLE: 481 break; 482 case V_0280A0_FRAG_ENABLE: 483 if (track->nsamples > 1) { 484 uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); 485 /* the tile size is 8x8, but the size is in units of bits. 486 * for bytes, do just * 8. */ 487 uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); 488 489 if (bytes + track->cb_color_frag_offset[i] > 490 radeon_bo_size(track->cb_color_frag_bo[i])) { 491 dev_warn(p->dev, "%s FMASK_TILE_MAX too large " 492 "(tile_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 493 __func__, tile_max, bytes, 494 (uintmax_t)track->cb_color_frag_offset[i], 495 radeon_bo_size(track->cb_color_frag_bo[i])); 496 return -EINVAL; 497 } 498 } 499 /* fall through */ 500 case V_0280A0_CLEAR_ENABLE: 501 { 502 uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); 503 /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. 504 * (128*128) / (8*8) / 2 = 128 bytes per block. */ 505 uint32_t bytes = (block_max + 1) * 128; 506 507 if (bytes + track->cb_color_tile_offset[i] > 508 radeon_bo_size(track->cb_color_tile_bo[i])) { 509 dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " 510 "(block_max=%u, bytes=%u, offset=%ju, bo_size=%lu)\n", 511 __func__, block_max, bytes, 512 (uintmax_t)track->cb_color_tile_offset[i], 513 radeon_bo_size(track->cb_color_tile_bo[i])); 514 return -EINVAL; 515 } 516 break; 517 } 518 default: 519 dev_warn(p->dev, "%s invalid tile mode\n", __func__); 520 return -EINVAL; 521 } 522 return 0; 523 } 524 525 static int r600_cs_track_validate_db(struct radeon_cs_parser *p) 526 { 527 struct r600_cs_track *track = p->track; 528 u32 nviews, bpe, ntiles, size, slice_tile_max, tmp; 529 u32 height_align, pitch_align, depth_align; 530 u32 pitch = 8192; 531 u32 height = 8192; 532 u64 base_offset, base_align; 533 struct array_mode_checker array_check; 534 int array_mode; 535 volatile u32 *ib = p->ib.ptr; 536 537 538 if (track->db_bo == NULL) { 539 dev_warn(p->dev, "z/stencil with no depth buffer\n"); 540 return -EINVAL; 541 } 542 switch (G_028010_FORMAT(track->db_depth_info)) { 543 case V_028010_DEPTH_16: 544 bpe = 2; 545 break; 546 case V_028010_DEPTH_X8_24: 547 case V_028010_DEPTH_8_24: 548 case V_028010_DEPTH_X8_24_FLOAT: 549 case V_028010_DEPTH_8_24_FLOAT: 550 case V_028010_DEPTH_32_FLOAT: 551 bpe = 4; 552 break; 553 case V_028010_DEPTH_X24_8_32_FLOAT: 554 bpe = 8; 555 break; 556 default: 557 dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); 558 return -EINVAL; 559 } 560 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 561 if (!track->db_depth_size_idx) { 562 dev_warn(p->dev, "z/stencil buffer size not set\n"); 563 return -EINVAL; 564 } 565 tmp = radeon_bo_size(track->db_bo) - track->db_offset; 566 tmp = (tmp / bpe) >> 6; 567 if (!tmp) { 568 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", 569 track->db_depth_size, bpe, track->db_offset, 570 radeon_bo_size(track->db_bo)); 571 return -EINVAL; 572 } 573 ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); 574 } else { 575 size = radeon_bo_size(track->db_bo); 576 /* pitch in pixels */ 577 pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; 578 slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 579 slice_tile_max *= 64; 580 height = slice_tile_max / pitch; 581 if (height > 8192) 582 height = 8192; 583 base_offset = track->db_bo_mc + track->db_offset; 584 array_mode = G_028010_ARRAY_MODE(track->db_depth_info); 585 array_check.array_mode = array_mode; 586 array_check.group_size = track->group_size; 587 array_check.nbanks = track->nbanks; 588 array_check.npipes = track->npipes; 589 array_check.nsamples = track->nsamples; 590 array_check.blocksize = bpe; 591 if (r600_get_array_mode_alignment(&array_check, 592 &pitch_align, &height_align, &depth_align, &base_align)) { 593 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 594 G_028010_ARRAY_MODE(track->db_depth_info), 595 track->db_depth_info); 596 return -EINVAL; 597 } 598 switch (array_mode) { 599 case V_028010_ARRAY_1D_TILED_THIN1: 600 /* don't break userspace */ 601 height &= ~0x7; 602 break; 603 case V_028010_ARRAY_2D_TILED_THIN1: 604 break; 605 default: 606 dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, 607 G_028010_ARRAY_MODE(track->db_depth_info), 608 track->db_depth_info); 609 return -EINVAL; 610 } 611 612 if (!IS_ALIGNED(pitch, pitch_align)) { 613 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n", 614 __func__, __LINE__, pitch, pitch_align, array_mode); 615 return -EINVAL; 616 } 617 if (!IS_ALIGNED(height, height_align)) { 618 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n", 619 __func__, __LINE__, height, height_align, array_mode); 620 return -EINVAL; 621 } 622 if (!IS_ALIGNED(base_offset, base_align)) { 623 dev_warn(p->dev, "%s offset 0x%jx, 0x%jx, %d not aligned\n", __func__, 624 (uintmax_t)base_offset, (uintmax_t)base_align, array_mode); 625 return -EINVAL; 626 } 627 628 ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; 629 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 630 tmp = ntiles * bpe * 64 * nviews * track->nsamples; 631 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 632 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", 633 array_mode, 634 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 635 radeon_bo_size(track->db_bo)); 636 return -EINVAL; 637 } 638 } 639 640 /* hyperz */ 641 if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { 642 unsigned long size; 643 unsigned nbx, nby; 644 645 if (track->htile_bo == NULL) { 646 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n", 647 __func__, __LINE__, track->db_depth_info); 648 return -EINVAL; 649 } 650 if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { 651 dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n", 652 __func__, __LINE__, track->db_depth_size); 653 return -EINVAL; 654 } 655 656 nbx = pitch; 657 nby = height; 658 if (G_028D24_LINEAR(track->htile_surface)) { 659 /* nbx must be 16 htiles aligned == 16 * 8 pixel aligned */ 660 nbx = roundup2(nbx, 16 * 8); 661 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 662 nby = roundup(nby, track->npipes * 8); 663 } else { 664 /* always assume 8x8 htile */ 665 /* align is htile align * 8, htile align vary according to 666 * number of pipe and tile width and nby 667 */ 668 switch (track->npipes) { 669 case 8: 670 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 671 nbx = roundup2(nbx, 64 * 8); 672 nby = roundup2(nby, 64 * 8); 673 break; 674 case 4: 675 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 676 nbx = roundup2(nbx, 64 * 8); 677 nby = roundup2(nby, 32 * 8); 678 break; 679 case 2: 680 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 681 nbx = roundup2(nbx, 32 * 8); 682 nby = roundup2(nby, 32 * 8); 683 break; 684 case 1: 685 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 686 nbx = roundup2(nbx, 32 * 8); 687 nby = roundup2(nby, 16 * 8); 688 break; 689 default: 690 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 691 __func__, __LINE__, track->npipes); 692 return -EINVAL; 693 } 694 } 695 /* compute number of htile */ 696 nbx = nbx >> 3; 697 nby = nby >> 3; 698 /* size must be aligned on npipes * 2K boundary */ 699 size = roundup(nbx * nby * 4, track->npipes * (2 << 10)); 700 size += track->htile_offset; 701 702 if (size > radeon_bo_size(track->htile_bo)) { 703 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n", 704 __func__, __LINE__, radeon_bo_size(track->htile_bo), 705 size, nbx, nby); 706 return -EINVAL; 707 } 708 } 709 710 track->db_dirty = false; 711 return 0; 712 } 713 714 static int r600_cs_track_check(struct radeon_cs_parser *p) 715 { 716 struct r600_cs_track *track = p->track; 717 u32 tmp; 718 int r, i; 719 720 /* on legacy kernel we don't perform advanced check */ 721 if (p->rdev == NULL) 722 return 0; 723 724 /* check streamout */ 725 if (track->streamout_dirty && track->vgt_strmout_en) { 726 for (i = 0; i < 4; i++) { 727 if (track->vgt_strmout_buffer_en & (1 << i)) { 728 if (track->vgt_strmout_bo[i]) { 729 u64 offset = (u64)track->vgt_strmout_bo_offset[i] + 730 (u64)track->vgt_strmout_size[i]; 731 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { 732 DRM_ERROR("streamout %d bo too small: 0x%jx, 0x%lx\n", 733 i, (uintmax_t)offset, 734 radeon_bo_size(track->vgt_strmout_bo[i])); 735 return -EINVAL; 736 } 737 } else { 738 dev_warn(p->dev, "No buffer for streamout %d\n", i); 739 return -EINVAL; 740 } 741 } 742 } 743 track->streamout_dirty = false; 744 } 745 746 if (track->sx_misc_kill_all_prims) 747 return 0; 748 749 /* check that we have a cb for each enabled target, we don't check 750 * shader_mask because it seems mesa isn't always setting it :( 751 */ 752 if (track->cb_dirty) { 753 tmp = track->cb_target_mask; 754 755 /* We must check both colorbuffers for RESOLVE. */ 756 if (track->is_resolve) { 757 tmp |= 0xff; 758 } 759 760 for (i = 0; i < 8; i++) { 761 if ((tmp >> (i * 4)) & 0xF) { 762 /* at least one component is enabled */ 763 if (track->cb_color_bo[i] == NULL) { 764 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", 765 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); 766 return -EINVAL; 767 } 768 /* perform rewrite of CB_COLOR[0-7]_SIZE */ 769 r = r600_cs_track_validate_cb(p, i); 770 if (r) 771 return r; 772 } 773 } 774 track->cb_dirty = false; 775 } 776 777 /* Check depth buffer */ 778 if (track->db_dirty && 779 G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && 780 (G_028800_STENCIL_ENABLE(track->db_depth_control) || 781 G_028800_Z_ENABLE(track->db_depth_control))) { 782 r = r600_cs_track_validate_db(p); 783 if (r) 784 return r; 785 } 786 787 return 0; 788 } 789 790 /** 791 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 792 * @parser: parser structure holding parsing context. 793 * @pkt: where to store packet informations 794 * 795 * Assume that chunk_ib_index is properly set. Will return -EINVAL 796 * if packet is bigger than remaining ib size. or if packets is unknown. 797 **/ 798 static int r600_cs_packet_parse(struct radeon_cs_parser *p, 799 struct radeon_cs_packet *pkt, 800 unsigned idx) 801 { 802 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 803 uint32_t header; 804 805 if (idx >= ib_chunk->length_dw) { 806 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 807 idx, ib_chunk->length_dw); 808 return -EINVAL; 809 } 810 header = radeon_get_ib_value(p, idx); 811 pkt->idx = idx; 812 pkt->type = CP_PACKET_GET_TYPE(header); 813 pkt->count = CP_PACKET_GET_COUNT(header); 814 pkt->one_reg_wr = 0; 815 switch (pkt->type) { 816 case PACKET_TYPE0: 817 pkt->reg = CP_PACKET0_GET_REG(header); 818 break; 819 case PACKET_TYPE3: 820 pkt->opcode = CP_PACKET3_GET_OPCODE(header); 821 break; 822 case PACKET_TYPE2: 823 pkt->count = -1; 824 break; 825 default: 826 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); 827 return -EINVAL; 828 } 829 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { 830 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", 831 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); 832 return -EINVAL; 833 } 834 return 0; 835 } 836 837 /** 838 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3 839 * @parser: parser structure holding parsing context. 840 * @data: pointer to relocation data 841 * @offset_start: starting offset 842 * @offset_mask: offset mask (to align start offset on) 843 * @reloc: reloc informations 844 * 845 * Check next packet is relocation packet3, do bo validation and compute 846 * GPU offset using the provided start. 847 **/ 848 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 849 struct radeon_cs_reloc **cs_reloc) 850 { 851 struct radeon_cs_chunk *relocs_chunk; 852 struct radeon_cs_packet p3reloc; 853 unsigned idx; 854 int r; 855 856 if (p->chunk_relocs_idx == -1) { 857 DRM_ERROR("No relocation chunk !\n"); 858 return -EINVAL; 859 } 860 *cs_reloc = NULL; 861 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 862 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 863 if (r) { 864 return r; 865 } 866 p->idx += p3reloc.count + 2; 867 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 868 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 869 p3reloc.idx); 870 return -EINVAL; 871 } 872 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 873 if (idx >= relocs_chunk->length_dw) { 874 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 875 idx, relocs_chunk->length_dw); 876 return -EINVAL; 877 } 878 /* FIXME: we assume reloc size is 4 dwords */ 879 *cs_reloc = p->relocs_ptr[(idx / 4)]; 880 return 0; 881 } 882 883 /** 884 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3 885 * @parser: parser structure holding parsing context. 886 * @data: pointer to relocation data 887 * @offset_start: starting offset 888 * @offset_mask: offset mask (to align start offset on) 889 * @reloc: reloc informations 890 * 891 * Check next packet is relocation packet3, do bo validation and compute 892 * GPU offset using the provided start. 893 **/ 894 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 895 struct radeon_cs_reloc **cs_reloc) 896 { 897 struct radeon_cs_chunk *relocs_chunk; 898 struct radeon_cs_packet p3reloc; 899 unsigned idx; 900 int r; 901 902 if (p->chunk_relocs_idx == -1) { 903 DRM_ERROR("No relocation chunk !\n"); 904 return -EINVAL; 905 } 906 *cs_reloc = NULL; 907 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 908 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 909 if (r) { 910 return r; 911 } 912 p->idx += p3reloc.count + 2; 913 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 914 DRM_ERROR("No packet3 for relocation for packet at %d.\n", 915 p3reloc.idx); 916 return -EINVAL; 917 } 918 idx = radeon_get_ib_value(p, p3reloc.idx + 1); 919 if (idx >= relocs_chunk->length_dw) { 920 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 921 idx, relocs_chunk->length_dw); 922 return -EINVAL; 923 } 924 *cs_reloc = p->relocs; 925 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; 926 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; 927 return 0; 928 } 929 930 /** 931 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc 932 * @parser: parser structure holding parsing context. 933 * 934 * Check next packet is relocation packet3, do bo validation and compute 935 * GPU offset using the provided start. 936 **/ 937 static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p) 938 { 939 struct radeon_cs_packet p3reloc; 940 int r; 941 942 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 943 if (r) { 944 return 0; 945 } 946 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) { 947 return 0; 948 } 949 return 1; 950 } 951 952 /** 953 * r600_cs_packet_next_vline() - parse userspace VLINE packet 954 * @parser: parser structure holding parsing context. 955 * 956 * Userspace sends a special sequence for VLINE waits. 957 * PACKET0 - VLINE_START_END + value 958 * PACKET3 - WAIT_REG_MEM poll vline status reg 959 * RELOC (P3) - crtc_id in reloc. 960 * 961 * This function parses this and relocates the VLINE START END 962 * and WAIT_REG_MEM packets to the correct crtc. 963 * It also detects a switched off crtc and nulls out the 964 * wait in that case. 965 */ 966 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 967 { 968 struct drm_mode_object *obj; 969 struct drm_crtc *crtc; 970 struct radeon_crtc *radeon_crtc; 971 struct radeon_cs_packet p3reloc, wait_reg_mem; 972 int crtc_id; 973 int r; 974 uint32_t header, h_idx, reg, wait_reg_mem_info; 975 volatile uint32_t *ib; 976 977 ib = p->ib.ptr; 978 979 /* parse the WAIT_REG_MEM */ 980 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 981 if (r) 982 return r; 983 984 /* check its a WAIT_REG_MEM */ 985 if (wait_reg_mem.type != PACKET_TYPE3 || 986 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 987 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 988 return -EINVAL; 989 } 990 991 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 992 /* bit 4 is reg (0) or mem (1) */ 993 if (wait_reg_mem_info & 0x10) { 994 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 995 return -EINVAL; 996 } 997 /* waiting for value to be equal */ 998 if ((wait_reg_mem_info & 0x7) != 0x3) { 999 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 1000 return -EINVAL; 1001 } 1002 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 1003 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 1004 return -EINVAL; 1005 } 1006 1007 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 1008 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 1009 return -EINVAL; 1010 } 1011 1012 /* jump over the NOP */ 1013 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 1014 if (r) 1015 return r; 1016 1017 h_idx = p->idx - 2; 1018 p->idx += wait_reg_mem.count + 2; 1019 p->idx += p3reloc.count + 2; 1020 1021 header = radeon_get_ib_value(p, h_idx); 1022 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 1023 reg = CP_PACKET0_GET_REG(header); 1024 1025 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1026 if (!obj) { 1027 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1028 return -EINVAL; 1029 } 1030 crtc = obj_to_crtc(obj); 1031 radeon_crtc = to_radeon_crtc(crtc); 1032 crtc_id = radeon_crtc->crtc_id; 1033 1034 if (!crtc->enabled) { 1035 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 1036 ib[h_idx + 2] = PACKET2(0); 1037 ib[h_idx + 3] = PACKET2(0); 1038 ib[h_idx + 4] = PACKET2(0); 1039 ib[h_idx + 5] = PACKET2(0); 1040 ib[h_idx + 6] = PACKET2(0); 1041 ib[h_idx + 7] = PACKET2(0); 1042 ib[h_idx + 8] = PACKET2(0); 1043 } else if (crtc_id == 1) { 1044 switch (reg) { 1045 case AVIVO_D1MODE_VLINE_START_END: 1046 header &= ~R600_CP_PACKET0_REG_MASK; 1047 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1048 break; 1049 default: 1050 DRM_ERROR("unknown crtc reloc\n"); 1051 return -EINVAL; 1052 } 1053 ib[h_idx] = header; 1054 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 1055 } 1056 1057 return 0; 1058 } 1059 1060 static int r600_packet0_check(struct radeon_cs_parser *p, 1061 struct radeon_cs_packet *pkt, 1062 unsigned idx, unsigned reg) 1063 { 1064 int r; 1065 1066 switch (reg) { 1067 case AVIVO_D1MODE_VLINE_START_END: 1068 r = r600_cs_packet_parse_vline(p); 1069 if (r) { 1070 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1071 idx, reg); 1072 return r; 1073 } 1074 break; 1075 default: 1076 DRM_ERROR("Forbidden register 0x%04X in cs at %d\n", 1077 reg, idx); 1078 return -EINVAL; 1079 } 1080 return 0; 1081 } 1082 1083 static int r600_cs_parse_packet0(struct radeon_cs_parser *p, 1084 struct radeon_cs_packet *pkt) 1085 { 1086 unsigned reg, i; 1087 unsigned idx; 1088 int r; 1089 1090 idx = pkt->idx + 1; 1091 reg = pkt->reg; 1092 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { 1093 r = r600_packet0_check(p, pkt, idx, reg); 1094 if (r) { 1095 return r; 1096 } 1097 } 1098 return 0; 1099 } 1100 1101 /** 1102 * r600_cs_check_reg() - check if register is authorized or not 1103 * @parser: parser structure holding parsing context 1104 * @reg: register we are testing 1105 * @idx: index into the cs buffer 1106 * 1107 * This function will test against r600_reg_safe_bm and return 0 1108 * if register is safe. If register is not flag as safe this function 1109 * will test it against a list of register needind special handling. 1110 */ 1111 static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1112 { 1113 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 1114 struct radeon_cs_reloc *reloc; 1115 u32 m, i, tmp, *ib; 1116 int r; 1117 1118 i = (reg >> 7); 1119 if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { 1120 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1121 return -EINVAL; 1122 } 1123 m = 1 << ((reg >> 2) & 31); 1124 if (!(r600_reg_safe_bm[i] & m)) 1125 return 0; 1126 ib = p->ib.ptr; 1127 switch (reg) { 1128 /* force following reg to 0 in an attempt to disable out buffer 1129 * which will need us to better understand how it works to perform 1130 * security check on it (Jerome) 1131 */ 1132 case R_0288A8_SQ_ESGS_RING_ITEMSIZE: 1133 case R_008C44_SQ_ESGS_RING_SIZE: 1134 case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: 1135 case R_008C54_SQ_ESTMP_RING_SIZE: 1136 case R_0288C0_SQ_FBUF_RING_ITEMSIZE: 1137 case R_008C74_SQ_FBUF_RING_SIZE: 1138 case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: 1139 case R_008C5C_SQ_GSTMP_RING_SIZE: 1140 case R_0288AC_SQ_GSVS_RING_ITEMSIZE: 1141 case R_008C4C_SQ_GSVS_RING_SIZE: 1142 case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: 1143 case R_008C6C_SQ_PSTMP_RING_SIZE: 1144 case R_0288C4_SQ_REDUC_RING_ITEMSIZE: 1145 case R_008C7C_SQ_REDUC_RING_SIZE: 1146 case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: 1147 case R_008C64_SQ_VSTMP_RING_SIZE: 1148 case R_0288C8_SQ_GS_VERT_ITEMSIZE: 1149 /* get value to populate the IB don't remove */ 1150 tmp =radeon_get_ib_value(p, idx); 1151 ib[idx] = 0; 1152 break; 1153 case SQ_CONFIG: 1154 track->sq_config = radeon_get_ib_value(p, idx); 1155 break; 1156 case R_028800_DB_DEPTH_CONTROL: 1157 track->db_depth_control = radeon_get_ib_value(p, idx); 1158 track->db_dirty = true; 1159 break; 1160 case R_028010_DB_DEPTH_INFO: 1161 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1162 r600_cs_packet_next_is_pkt3_nop(p)) { 1163 r = r600_cs_packet_next_reloc(p, &reloc); 1164 if (r) { 1165 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1166 "0x%04X\n", reg); 1167 return -EINVAL; 1168 } 1169 track->db_depth_info = radeon_get_ib_value(p, idx); 1170 ib[idx] &= C_028010_ARRAY_MODE; 1171 track->db_depth_info &= C_028010_ARRAY_MODE; 1172 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1173 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1174 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); 1175 } else { 1176 ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1177 track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); 1178 } 1179 } else { 1180 track->db_depth_info = radeon_get_ib_value(p, idx); 1181 } 1182 track->db_dirty = true; 1183 break; 1184 case R_028004_DB_DEPTH_VIEW: 1185 track->db_depth_view = radeon_get_ib_value(p, idx); 1186 track->db_dirty = true; 1187 break; 1188 case R_028000_DB_DEPTH_SIZE: 1189 track->db_depth_size = radeon_get_ib_value(p, idx); 1190 track->db_depth_size_idx = idx; 1191 track->db_dirty = true; 1192 break; 1193 case R_028AB0_VGT_STRMOUT_EN: 1194 track->vgt_strmout_en = radeon_get_ib_value(p, idx); 1195 track->streamout_dirty = true; 1196 break; 1197 case R_028B20_VGT_STRMOUT_BUFFER_EN: 1198 track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); 1199 track->streamout_dirty = true; 1200 break; 1201 case VGT_STRMOUT_BUFFER_BASE_0: 1202 case VGT_STRMOUT_BUFFER_BASE_1: 1203 case VGT_STRMOUT_BUFFER_BASE_2: 1204 case VGT_STRMOUT_BUFFER_BASE_3: 1205 r = r600_cs_packet_next_reloc(p, &reloc); 1206 if (r) { 1207 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1208 "0x%04X\n", reg); 1209 return -EINVAL; 1210 } 1211 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; 1212 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1213 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1214 track->vgt_strmout_bo[tmp] = reloc->robj; 1215 track->vgt_strmout_bo_mc[tmp] = reloc->lobj.gpu_offset; 1216 track->streamout_dirty = true; 1217 break; 1218 case VGT_STRMOUT_BUFFER_SIZE_0: 1219 case VGT_STRMOUT_BUFFER_SIZE_1: 1220 case VGT_STRMOUT_BUFFER_SIZE_2: 1221 case VGT_STRMOUT_BUFFER_SIZE_3: 1222 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16; 1223 /* size in register is DWs, convert to bytes */ 1224 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4; 1225 track->streamout_dirty = true; 1226 break; 1227 case CP_COHER_BASE: 1228 r = r600_cs_packet_next_reloc(p, &reloc); 1229 if (r) { 1230 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1231 "0x%04X\n", reg); 1232 return -EINVAL; 1233 } 1234 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1235 break; 1236 case R_028238_CB_TARGET_MASK: 1237 track->cb_target_mask = radeon_get_ib_value(p, idx); 1238 track->cb_dirty = true; 1239 break; 1240 case R_02823C_CB_SHADER_MASK: 1241 track->cb_shader_mask = radeon_get_ib_value(p, idx); 1242 break; 1243 case R_028C04_PA_SC_AA_CONFIG: 1244 tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); 1245 track->log_nsamples = tmp; 1246 track->nsamples = 1 << tmp; 1247 track->cb_dirty = true; 1248 break; 1249 case R_028808_CB_COLOR_CONTROL: 1250 tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); 1251 track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; 1252 track->cb_dirty = true; 1253 break; 1254 case R_0280A0_CB_COLOR0_INFO: 1255 case R_0280A4_CB_COLOR1_INFO: 1256 case R_0280A8_CB_COLOR2_INFO: 1257 case R_0280AC_CB_COLOR3_INFO: 1258 case R_0280B0_CB_COLOR4_INFO: 1259 case R_0280B4_CB_COLOR5_INFO: 1260 case R_0280B8_CB_COLOR6_INFO: 1261 case R_0280BC_CB_COLOR7_INFO: 1262 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1263 r600_cs_packet_next_is_pkt3_nop(p)) { 1264 r = r600_cs_packet_next_reloc(p, &reloc); 1265 if (r) { 1266 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1267 return -EINVAL; 1268 } 1269 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1270 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1271 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 1272 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1273 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); 1274 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1275 ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1276 track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); 1277 } 1278 } else { 1279 tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; 1280 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1281 } 1282 track->cb_dirty = true; 1283 break; 1284 case R_028080_CB_COLOR0_VIEW: 1285 case R_028084_CB_COLOR1_VIEW: 1286 case R_028088_CB_COLOR2_VIEW: 1287 case R_02808C_CB_COLOR3_VIEW: 1288 case R_028090_CB_COLOR4_VIEW: 1289 case R_028094_CB_COLOR5_VIEW: 1290 case R_028098_CB_COLOR6_VIEW: 1291 case R_02809C_CB_COLOR7_VIEW: 1292 tmp = (reg - R_028080_CB_COLOR0_VIEW) / 4; 1293 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx); 1294 track->cb_dirty = true; 1295 break; 1296 case R_028060_CB_COLOR0_SIZE: 1297 case R_028064_CB_COLOR1_SIZE: 1298 case R_028068_CB_COLOR2_SIZE: 1299 case R_02806C_CB_COLOR3_SIZE: 1300 case R_028070_CB_COLOR4_SIZE: 1301 case R_028074_CB_COLOR5_SIZE: 1302 case R_028078_CB_COLOR6_SIZE: 1303 case R_02807C_CB_COLOR7_SIZE: 1304 tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; 1305 track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); 1306 track->cb_color_size_idx[tmp] = idx; 1307 track->cb_dirty = true; 1308 break; 1309 /* This register were added late, there is userspace 1310 * which does provide relocation for those but set 1311 * 0 offset. In order to avoid breaking old userspace 1312 * we detect this and set address to point to last 1313 * CB_COLOR0_BASE, note that if userspace doesn't set 1314 * CB_COLOR0_BASE before this register we will report 1315 * error. Old userspace always set CB_COLOR0_BASE 1316 * before any of this. 1317 */ 1318 case R_0280E0_CB_COLOR0_FRAG: 1319 case R_0280E4_CB_COLOR1_FRAG: 1320 case R_0280E8_CB_COLOR2_FRAG: 1321 case R_0280EC_CB_COLOR3_FRAG: 1322 case R_0280F0_CB_COLOR4_FRAG: 1323 case R_0280F4_CB_COLOR5_FRAG: 1324 case R_0280F8_CB_COLOR6_FRAG: 1325 case R_0280FC_CB_COLOR7_FRAG: 1326 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1327 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1328 if (!track->cb_color_base_last[tmp]) { 1329 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1330 return -EINVAL; 1331 } 1332 track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; 1333 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1334 ib[idx] = track->cb_color_base_last[tmp]; 1335 } else { 1336 r = r600_cs_packet_next_reloc(p, &reloc); 1337 if (r) { 1338 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1339 return -EINVAL; 1340 } 1341 track->cb_color_frag_bo[tmp] = reloc->robj; 1342 track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; 1343 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1344 } 1345 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1346 track->cb_dirty = true; 1347 } 1348 break; 1349 case R_0280C0_CB_COLOR0_TILE: 1350 case R_0280C4_CB_COLOR1_TILE: 1351 case R_0280C8_CB_COLOR2_TILE: 1352 case R_0280CC_CB_COLOR3_TILE: 1353 case R_0280D0_CB_COLOR4_TILE: 1354 case R_0280D4_CB_COLOR5_TILE: 1355 case R_0280D8_CB_COLOR6_TILE: 1356 case R_0280DC_CB_COLOR7_TILE: 1357 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1358 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1359 if (!track->cb_color_base_last[tmp]) { 1360 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1361 return -EINVAL; 1362 } 1363 track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; 1364 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1365 ib[idx] = track->cb_color_base_last[tmp]; 1366 } else { 1367 r = r600_cs_packet_next_reloc(p, &reloc); 1368 if (r) { 1369 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1370 return -EINVAL; 1371 } 1372 track->cb_color_tile_bo[tmp] = reloc->robj; 1373 track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; 1374 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1375 } 1376 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1377 track->cb_dirty = true; 1378 } 1379 break; 1380 case R_028100_CB_COLOR0_MASK: 1381 case R_028104_CB_COLOR1_MASK: 1382 case R_028108_CB_COLOR2_MASK: 1383 case R_02810C_CB_COLOR3_MASK: 1384 case R_028110_CB_COLOR4_MASK: 1385 case R_028114_CB_COLOR5_MASK: 1386 case R_028118_CB_COLOR6_MASK: 1387 case R_02811C_CB_COLOR7_MASK: 1388 tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; 1389 track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); 1390 if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { 1391 track->cb_dirty = true; 1392 } 1393 break; 1394 case CB_COLOR0_BASE: 1395 case CB_COLOR1_BASE: 1396 case CB_COLOR2_BASE: 1397 case CB_COLOR3_BASE: 1398 case CB_COLOR4_BASE: 1399 case CB_COLOR5_BASE: 1400 case CB_COLOR6_BASE: 1401 case CB_COLOR7_BASE: 1402 r = r600_cs_packet_next_reloc(p, &reloc); 1403 if (r) { 1404 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1405 "0x%04X\n", reg); 1406 return -EINVAL; 1407 } 1408 tmp = (reg - CB_COLOR0_BASE) / 4; 1409 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; 1410 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1411 track->cb_color_base_last[tmp] = ib[idx]; 1412 track->cb_color_bo[tmp] = reloc->robj; 1413 track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; 1414 track->cb_dirty = true; 1415 break; 1416 case DB_DEPTH_BASE: 1417 r = r600_cs_packet_next_reloc(p, &reloc); 1418 if (r) { 1419 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1420 "0x%04X\n", reg); 1421 return -EINVAL; 1422 } 1423 track->db_offset = radeon_get_ib_value(p, idx) << 8; 1424 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1425 track->db_bo = reloc->robj; 1426 track->db_bo_mc = reloc->lobj.gpu_offset; 1427 track->db_dirty = true; 1428 break; 1429 case DB_HTILE_DATA_BASE: 1430 r = r600_cs_packet_next_reloc(p, &reloc); 1431 if (r) { 1432 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1433 "0x%04X\n", reg); 1434 return -EINVAL; 1435 } 1436 track->htile_offset = radeon_get_ib_value(p, idx) << 8; 1437 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1438 track->htile_bo = reloc->robj; 1439 track->db_dirty = true; 1440 break; 1441 case DB_HTILE_SURFACE: 1442 track->htile_surface = radeon_get_ib_value(p, idx); 1443 /* force 8x8 htile width and height */ 1444 ib[idx] |= 3; 1445 track->db_dirty = true; 1446 break; 1447 case SQ_PGM_START_FS: 1448 case SQ_PGM_START_ES: 1449 case SQ_PGM_START_VS: 1450 case SQ_PGM_START_GS: 1451 case SQ_PGM_START_PS: 1452 case SQ_ALU_CONST_CACHE_GS_0: 1453 case SQ_ALU_CONST_CACHE_GS_1: 1454 case SQ_ALU_CONST_CACHE_GS_2: 1455 case SQ_ALU_CONST_CACHE_GS_3: 1456 case SQ_ALU_CONST_CACHE_GS_4: 1457 case SQ_ALU_CONST_CACHE_GS_5: 1458 case SQ_ALU_CONST_CACHE_GS_6: 1459 case SQ_ALU_CONST_CACHE_GS_7: 1460 case SQ_ALU_CONST_CACHE_GS_8: 1461 case SQ_ALU_CONST_CACHE_GS_9: 1462 case SQ_ALU_CONST_CACHE_GS_10: 1463 case SQ_ALU_CONST_CACHE_GS_11: 1464 case SQ_ALU_CONST_CACHE_GS_12: 1465 case SQ_ALU_CONST_CACHE_GS_13: 1466 case SQ_ALU_CONST_CACHE_GS_14: 1467 case SQ_ALU_CONST_CACHE_GS_15: 1468 case SQ_ALU_CONST_CACHE_PS_0: 1469 case SQ_ALU_CONST_CACHE_PS_1: 1470 case SQ_ALU_CONST_CACHE_PS_2: 1471 case SQ_ALU_CONST_CACHE_PS_3: 1472 case SQ_ALU_CONST_CACHE_PS_4: 1473 case SQ_ALU_CONST_CACHE_PS_5: 1474 case SQ_ALU_CONST_CACHE_PS_6: 1475 case SQ_ALU_CONST_CACHE_PS_7: 1476 case SQ_ALU_CONST_CACHE_PS_8: 1477 case SQ_ALU_CONST_CACHE_PS_9: 1478 case SQ_ALU_CONST_CACHE_PS_10: 1479 case SQ_ALU_CONST_CACHE_PS_11: 1480 case SQ_ALU_CONST_CACHE_PS_12: 1481 case SQ_ALU_CONST_CACHE_PS_13: 1482 case SQ_ALU_CONST_CACHE_PS_14: 1483 case SQ_ALU_CONST_CACHE_PS_15: 1484 case SQ_ALU_CONST_CACHE_VS_0: 1485 case SQ_ALU_CONST_CACHE_VS_1: 1486 case SQ_ALU_CONST_CACHE_VS_2: 1487 case SQ_ALU_CONST_CACHE_VS_3: 1488 case SQ_ALU_CONST_CACHE_VS_4: 1489 case SQ_ALU_CONST_CACHE_VS_5: 1490 case SQ_ALU_CONST_CACHE_VS_6: 1491 case SQ_ALU_CONST_CACHE_VS_7: 1492 case SQ_ALU_CONST_CACHE_VS_8: 1493 case SQ_ALU_CONST_CACHE_VS_9: 1494 case SQ_ALU_CONST_CACHE_VS_10: 1495 case SQ_ALU_CONST_CACHE_VS_11: 1496 case SQ_ALU_CONST_CACHE_VS_12: 1497 case SQ_ALU_CONST_CACHE_VS_13: 1498 case SQ_ALU_CONST_CACHE_VS_14: 1499 case SQ_ALU_CONST_CACHE_VS_15: 1500 r = r600_cs_packet_next_reloc(p, &reloc); 1501 if (r) { 1502 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1503 "0x%04X\n", reg); 1504 return -EINVAL; 1505 } 1506 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1507 break; 1508 case SX_MEMORY_EXPORT_BASE: 1509 r = r600_cs_packet_next_reloc(p, &reloc); 1510 if (r) { 1511 dev_warn(p->dev, "bad SET_CONFIG_REG " 1512 "0x%04X\n", reg); 1513 return -EINVAL; 1514 } 1515 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1516 break; 1517 case SX_MISC: 1518 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; 1519 break; 1520 default: 1521 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1522 return -EINVAL; 1523 } 1524 return 0; 1525 } 1526 1527 unsigned r600_mip_minify(unsigned size, unsigned level) 1528 { 1529 unsigned val; 1530 1531 val = max(1U, size >> level); 1532 if (level > 0) 1533 val = roundup_pow_of_two(val); 1534 return val; 1535 } 1536 1537 static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, 1538 unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, 1539 unsigned block_align, unsigned height_align, unsigned base_align, 1540 unsigned *l0_size, unsigned *mipmap_size) 1541 { 1542 unsigned offset, i, level; 1543 unsigned width, height, depth, size; 1544 unsigned blocksize; 1545 unsigned nbx, nby; 1546 unsigned nlevels = llevel - blevel + 1; 1547 1548 *l0_size = -1; 1549 blocksize = r600_fmt_get_blocksize(format); 1550 1551 w0 = r600_mip_minify(w0, 0); 1552 h0 = r600_mip_minify(h0, 0); 1553 d0 = r600_mip_minify(d0, 0); 1554 for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { 1555 width = r600_mip_minify(w0, i); 1556 nbx = r600_fmt_get_nblocksx(format, width); 1557 1558 nbx = roundup(nbx, block_align); 1559 1560 height = r600_mip_minify(h0, i); 1561 nby = r600_fmt_get_nblocksy(format, height); 1562 nby = roundup(nby, height_align); 1563 1564 depth = r600_mip_minify(d0, i); 1565 1566 size = nbx * nby * blocksize * nsamples; 1567 if (nfaces) 1568 size *= nfaces; 1569 else 1570 size *= depth; 1571 1572 if (i == 0) 1573 *l0_size = size; 1574 1575 if (i == 0 || i == 1) 1576 offset = roundup(offset, base_align); 1577 1578 offset += size; 1579 } 1580 *mipmap_size = offset; 1581 if (llevel == 0) 1582 *mipmap_size = *l0_size; 1583 if (!blevel) 1584 *mipmap_size -= *l0_size; 1585 } 1586 1587 /** 1588 * r600_check_texture_resource() - check if register is authorized or not 1589 * @p: parser structure holding parsing context 1590 * @idx: index into the cs buffer 1591 * @texture: texture's bo structure 1592 * @mipmap: mipmap's bo structure 1593 * 1594 * This function will check that the resource has valid field and that 1595 * the texture and mipmap bo object are big enough to cover this resource. 1596 */ 1597 static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, 1598 struct radeon_bo *texture, 1599 struct radeon_bo *mipmap, 1600 u64 base_offset, 1601 u64 mip_offset, 1602 u32 tiling_flags) 1603 { 1604 struct r600_cs_track *track = p->track; 1605 u32 dim, nfaces, llevel, blevel, w0, h0, d0; 1606 u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; 1607 u32 height_align, pitch, pitch_align, depth_align; 1608 u32 barray, larray; 1609 u64 base_align; 1610 struct array_mode_checker array_check; 1611 u32 format; 1612 bool is_array; 1613 1614 /* on legacy kernel we don't perform advanced check */ 1615 if (p->rdev == NULL) 1616 return 0; 1617 1618 /* convert to bytes */ 1619 base_offset <<= 8; 1620 mip_offset <<= 8; 1621 1622 word0 = radeon_get_ib_value(p, idx + 0); 1623 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1624 if (tiling_flags & RADEON_TILING_MACRO) 1625 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1626 else if (tiling_flags & RADEON_TILING_MICRO) 1627 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1628 } 1629 word1 = radeon_get_ib_value(p, idx + 1); 1630 word2 = radeon_get_ib_value(p, idx + 2) << 8; 1631 word3 = radeon_get_ib_value(p, idx + 3) << 8; 1632 word4 = radeon_get_ib_value(p, idx + 4); 1633 word5 = radeon_get_ib_value(p, idx + 5); 1634 dim = G_038000_DIM(word0); 1635 w0 = G_038000_TEX_WIDTH(word0) + 1; 1636 pitch = (G_038000_PITCH(word0) + 1) * 8; 1637 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1638 d0 = G_038004_TEX_DEPTH(word1); 1639 format = G_038004_DATA_FORMAT(word1); 1640 blevel = G_038010_BASE_LEVEL(word4); 1641 llevel = G_038014_LAST_LEVEL(word5); 1642 /* pitch in texels */ 1643 array_check.array_mode = G_038000_TILE_MODE(word0); 1644 array_check.group_size = track->group_size; 1645 array_check.nbanks = track->nbanks; 1646 array_check.npipes = track->npipes; 1647 array_check.nsamples = 1; 1648 array_check.blocksize = r600_fmt_get_blocksize(format); 1649 nfaces = 1; 1650 is_array = false; 1651 switch (dim) { 1652 case V_038000_SQ_TEX_DIM_1D: 1653 case V_038000_SQ_TEX_DIM_2D: 1654 case V_038000_SQ_TEX_DIM_3D: 1655 break; 1656 case V_038000_SQ_TEX_DIM_CUBEMAP: 1657 if (p->family >= CHIP_RV770) 1658 nfaces = 8; 1659 else 1660 nfaces = 6; 1661 break; 1662 case V_038000_SQ_TEX_DIM_1D_ARRAY: 1663 case V_038000_SQ_TEX_DIM_2D_ARRAY: 1664 is_array = true; 1665 break; 1666 case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: 1667 is_array = true; 1668 /* fall through */ 1669 case V_038000_SQ_TEX_DIM_2D_MSAA: 1670 array_check.nsamples = 1 << llevel; 1671 llevel = 0; 1672 break; 1673 default: 1674 dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); 1675 return -EINVAL; 1676 } 1677 if (!r600_fmt_is_valid_texture(format, p->family)) { 1678 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1679 __func__, __LINE__, format); 1680 return -EINVAL; 1681 } 1682 1683 if (r600_get_array_mode_alignment(&array_check, 1684 &pitch_align, &height_align, &depth_align, &base_align)) { 1685 dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", 1686 __func__, __LINE__, G_038000_TILE_MODE(word0)); 1687 return -EINVAL; 1688 } 1689 1690 /* XXX check height as well... */ 1691 1692 if (!IS_ALIGNED(pitch, pitch_align)) { 1693 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n", 1694 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0)); 1695 return -EINVAL; 1696 } 1697 if (!IS_ALIGNED(base_offset, base_align)) { 1698 dev_warn(p->dev, "%s:%d tex base offset (0x%jx, 0x%jx, %d) invalid\n", 1699 __func__, __LINE__, (uintmax_t)base_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1700 return -EINVAL; 1701 } 1702 if (!IS_ALIGNED(mip_offset, base_align)) { 1703 dev_warn(p->dev, "%s:%d tex mip offset (0x%jx, 0x%jx, %d) invalid\n", 1704 __func__, __LINE__, (uintmax_t)mip_offset, (uintmax_t)base_align, G_038000_TILE_MODE(word0)); 1705 return -EINVAL; 1706 } 1707 1708 if (blevel > llevel) { 1709 dev_warn(p->dev, "texture blevel %d > llevel %d\n", 1710 blevel, llevel); 1711 } 1712 if (is_array) { 1713 barray = G_038014_BASE_ARRAY(word5); 1714 larray = G_038014_LAST_ARRAY(word5); 1715 1716 nfaces = larray - barray + 1; 1717 } 1718 r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, 1719 pitch_align, height_align, base_align, 1720 &l0_size, &mipmap_size); 1721 /* using get ib will give us the offset into the texture bo */ 1722 if ((l0_size + word2) > radeon_bo_size(texture)) { 1723 dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n", 1724 w0, h0, pitch_align, height_align, 1725 array_check.array_mode, format, word2, 1726 l0_size, radeon_bo_size(texture)); 1727 dev_warn(p->dev, "alignments %d %d %d %jd\n", pitch, pitch_align, height_align, (uintmax_t)base_align); 1728 return -EINVAL; 1729 } 1730 /* using get ib will give us the offset into the mipmap bo */ 1731 if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { 1732 /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", 1733 w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ 1734 } 1735 return 0; 1736 } 1737 1738 static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1739 { 1740 u32 m, i; 1741 1742 i = (reg >> 7); 1743 if (i >= DRM_ARRAY_SIZE(r600_reg_safe_bm)) { 1744 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1745 return false; 1746 } 1747 m = 1 << ((reg >> 2) & 31); 1748 if (!(r600_reg_safe_bm[i] & m)) 1749 return true; 1750 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); 1751 return false; 1752 } 1753 1754 static int r600_packet3_check(struct radeon_cs_parser *p, 1755 struct radeon_cs_packet *pkt) 1756 { 1757 struct radeon_cs_reloc *reloc; 1758 struct r600_cs_track *track; 1759 volatile u32 *ib; 1760 unsigned idx; 1761 unsigned i; 1762 unsigned start_reg, end_reg, reg; 1763 int r; 1764 u32 idx_value; 1765 1766 track = (struct r600_cs_track *)p->track; 1767 ib = p->ib.ptr; 1768 idx = pkt->idx + 1; 1769 idx_value = radeon_get_ib_value(p, idx); 1770 1771 switch (pkt->opcode) { 1772 case PACKET3_SET_PREDICATION: 1773 { 1774 int pred_op; 1775 int tmp; 1776 uint64_t offset; 1777 1778 if (pkt->count != 1) { 1779 DRM_ERROR("bad SET PREDICATION\n"); 1780 return -EINVAL; 1781 } 1782 1783 tmp = radeon_get_ib_value(p, idx + 1); 1784 pred_op = (tmp >> 16) & 0x7; 1785 1786 /* for the clear predicate operation */ 1787 if (pred_op == 0) 1788 return 0; 1789 1790 if (pred_op > 2) { 1791 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); 1792 return -EINVAL; 1793 } 1794 1795 r = r600_cs_packet_next_reloc(p, &reloc); 1796 if (r) { 1797 DRM_ERROR("bad SET PREDICATION\n"); 1798 return -EINVAL; 1799 } 1800 1801 offset = reloc->lobj.gpu_offset + 1802 (idx_value & 0xfffffff0) + 1803 ((u64)(tmp & 0xff) << 32); 1804 1805 ib[idx + 0] = offset; 1806 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1807 } 1808 break; 1809 1810 case PACKET3_START_3D_CMDBUF: 1811 if (p->family >= CHIP_RV770 || pkt->count) { 1812 DRM_ERROR("bad START_3D\n"); 1813 return -EINVAL; 1814 } 1815 break; 1816 case PACKET3_CONTEXT_CONTROL: 1817 if (pkt->count != 1) { 1818 DRM_ERROR("bad CONTEXT_CONTROL\n"); 1819 return -EINVAL; 1820 } 1821 break; 1822 case PACKET3_INDEX_TYPE: 1823 case PACKET3_NUM_INSTANCES: 1824 if (pkt->count) { 1825 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n"); 1826 return -EINVAL; 1827 } 1828 break; 1829 case PACKET3_DRAW_INDEX: 1830 { 1831 uint64_t offset; 1832 if (pkt->count != 3) { 1833 DRM_ERROR("bad DRAW_INDEX\n"); 1834 return -EINVAL; 1835 } 1836 r = r600_cs_packet_next_reloc(p, &reloc); 1837 if (r) { 1838 DRM_ERROR("bad DRAW_INDEX\n"); 1839 return -EINVAL; 1840 } 1841 1842 offset = reloc->lobj.gpu_offset + 1843 idx_value + 1844 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1845 1846 ib[idx+0] = offset; 1847 ib[idx+1] = upper_32_bits(offset) & 0xff; 1848 1849 r = r600_cs_track_check(p); 1850 if (r) { 1851 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1852 return r; 1853 } 1854 break; 1855 } 1856 case PACKET3_DRAW_INDEX_AUTO: 1857 if (pkt->count != 1) { 1858 DRM_ERROR("bad DRAW_INDEX_AUTO\n"); 1859 return -EINVAL; 1860 } 1861 r = r600_cs_track_check(p); 1862 if (r) { 1863 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); 1864 return r; 1865 } 1866 break; 1867 case PACKET3_DRAW_INDEX_IMMD_BE: 1868 case PACKET3_DRAW_INDEX_IMMD: 1869 if (pkt->count < 2) { 1870 DRM_ERROR("bad DRAW_INDEX_IMMD\n"); 1871 return -EINVAL; 1872 } 1873 r = r600_cs_track_check(p); 1874 if (r) { 1875 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); 1876 return r; 1877 } 1878 break; 1879 case PACKET3_WAIT_REG_MEM: 1880 if (pkt->count != 5) { 1881 DRM_ERROR("bad WAIT_REG_MEM\n"); 1882 return -EINVAL; 1883 } 1884 /* bit 4 is reg (0) or mem (1) */ 1885 if (idx_value & 0x10) { 1886 uint64_t offset; 1887 1888 r = r600_cs_packet_next_reloc(p, &reloc); 1889 if (r) { 1890 DRM_ERROR("bad WAIT_REG_MEM\n"); 1891 return -EINVAL; 1892 } 1893 1894 offset = reloc->lobj.gpu_offset + 1895 (radeon_get_ib_value(p, idx+1) & 0xfffffff0) + 1896 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 1897 1898 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1899 ib[idx+2] = upper_32_bits(offset) & 0xff; 1900 } 1901 break; 1902 case PACKET3_CP_DMA: 1903 { 1904 u32 command, size; 1905 u64 offset, tmp; 1906 if (pkt->count != 4) { 1907 DRM_ERROR("bad CP DMA\n"); 1908 return -EINVAL; 1909 } 1910 command = radeon_get_ib_value(p, idx+4); 1911 size = command & 0x1fffff; 1912 if (command & PACKET3_CP_DMA_CMD_SAS) { 1913 /* src address space is register */ 1914 DRM_ERROR("CP DMA SAS not supported\n"); 1915 return -EINVAL; 1916 } else { 1917 if (command & PACKET3_CP_DMA_CMD_SAIC) { 1918 DRM_ERROR("CP DMA SAIC only supported for registers\n"); 1919 return -EINVAL; 1920 } 1921 /* src address space is memory */ 1922 r = r600_cs_packet_next_reloc(p, &reloc); 1923 if (r) { 1924 DRM_ERROR("bad CP DMA SRC\n"); 1925 return -EINVAL; 1926 } 1927 1928 tmp = radeon_get_ib_value(p, idx) + 1929 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); 1930 1931 offset = reloc->lobj.gpu_offset + tmp; 1932 1933 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1934 dev_warn(p->dev, "CP DMA src buffer too small (%ju %lu)\n", 1935 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1936 return -EINVAL; 1937 } 1938 1939 ib[idx] = offset; 1940 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 1941 } 1942 if (command & PACKET3_CP_DMA_CMD_DAS) { 1943 /* dst address space is register */ 1944 DRM_ERROR("CP DMA DAS not supported\n"); 1945 return -EINVAL; 1946 } else { 1947 /* dst address space is memory */ 1948 if (command & PACKET3_CP_DMA_CMD_DAIC) { 1949 DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1950 return -EINVAL; 1951 } 1952 r = r600_cs_packet_next_reloc(p, &reloc); 1953 if (r) { 1954 DRM_ERROR("bad CP DMA DST\n"); 1955 return -EINVAL; 1956 } 1957 1958 tmp = radeon_get_ib_value(p, idx+2) + 1959 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32); 1960 1961 offset = reloc->lobj.gpu_offset + tmp; 1962 1963 if ((tmp + size) > radeon_bo_size(reloc->robj)) { 1964 dev_warn(p->dev, "CP DMA dst buffer too small (%ju %lu)\n", 1965 (uintmax_t)tmp + size, radeon_bo_size(reloc->robj)); 1966 return -EINVAL; 1967 } 1968 1969 ib[idx+2] = offset; 1970 ib[idx+3] = upper_32_bits(offset) & 0xff; 1971 } 1972 break; 1973 } 1974 case PACKET3_SURFACE_SYNC: 1975 if (pkt->count != 3) { 1976 DRM_ERROR("bad SURFACE_SYNC\n"); 1977 return -EINVAL; 1978 } 1979 /* 0xffffffff/0x0 is flush all cache flag */ 1980 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1981 radeon_get_ib_value(p, idx + 2) != 0) { 1982 r = r600_cs_packet_next_reloc(p, &reloc); 1983 if (r) { 1984 DRM_ERROR("bad SURFACE_SYNC\n"); 1985 return -EINVAL; 1986 } 1987 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1988 } 1989 break; 1990 case PACKET3_EVENT_WRITE: 1991 if (pkt->count != 2 && pkt->count != 0) { 1992 DRM_ERROR("bad EVENT_WRITE\n"); 1993 return -EINVAL; 1994 } 1995 if (pkt->count) { 1996 uint64_t offset; 1997 1998 r = r600_cs_packet_next_reloc(p, &reloc); 1999 if (r) { 2000 DRM_ERROR("bad EVENT_WRITE\n"); 2001 return -EINVAL; 2002 } 2003 offset = reloc->lobj.gpu_offset + 2004 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) + 2005 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2006 2007 ib[idx+1] = offset & 0xfffffff8; 2008 ib[idx+2] = upper_32_bits(offset) & 0xff; 2009 } 2010 break; 2011 case PACKET3_EVENT_WRITE_EOP: 2012 { 2013 uint64_t offset; 2014 2015 if (pkt->count != 4) { 2016 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2017 return -EINVAL; 2018 } 2019 r = r600_cs_packet_next_reloc(p, &reloc); 2020 if (r) { 2021 DRM_ERROR("bad EVENT_WRITE\n"); 2022 return -EINVAL; 2023 } 2024 2025 offset = reloc->lobj.gpu_offset + 2026 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) + 2027 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); 2028 2029 ib[idx+1] = offset & 0xfffffffc; 2030 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); 2031 break; 2032 } 2033 case PACKET3_SET_CONFIG_REG: 2034 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 2035 end_reg = 4 * pkt->count + start_reg - 4; 2036 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 2037 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 2038 (end_reg >= PACKET3_SET_CONFIG_REG_END)) { 2039 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); 2040 return -EINVAL; 2041 } 2042 for (i = 0; i < pkt->count; i++) { 2043 reg = start_reg + (4 * i); 2044 r = r600_cs_check_reg(p, reg, idx+1+i); 2045 if (r) 2046 return r; 2047 } 2048 break; 2049 case PACKET3_SET_CONTEXT_REG: 2050 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 2051 end_reg = 4 * pkt->count + start_reg - 4; 2052 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 2053 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 2054 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { 2055 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); 2056 return -EINVAL; 2057 } 2058 for (i = 0; i < pkt->count; i++) { 2059 reg = start_reg + (4 * i); 2060 r = r600_cs_check_reg(p, reg, idx+1+i); 2061 if (r) 2062 return r; 2063 } 2064 break; 2065 case PACKET3_SET_RESOURCE: 2066 if (pkt->count % 7) { 2067 DRM_ERROR("bad SET_RESOURCE\n"); 2068 return -EINVAL; 2069 } 2070 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET; 2071 end_reg = 4 * pkt->count + start_reg - 4; 2072 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 2073 (start_reg >= PACKET3_SET_RESOURCE_END) || 2074 (end_reg >= PACKET3_SET_RESOURCE_END)) { 2075 DRM_ERROR("bad SET_RESOURCE\n"); 2076 return -EINVAL; 2077 } 2078 for (i = 0; i < (pkt->count / 7); i++) { 2079 struct radeon_bo *texture, *mipmap; 2080 u32 size, offset, base_offset, mip_offset; 2081 2082 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 2083 case SQ_TEX_VTX_VALID_TEXTURE: 2084 /* tex base */ 2085 r = r600_cs_packet_next_reloc(p, &reloc); 2086 if (r) { 2087 DRM_ERROR("bad SET_RESOURCE\n"); 2088 return -EINVAL; 2089 } 2090 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2091 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 2092 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 2093 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 2094 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 2095 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 2096 } 2097 texture = reloc->robj; 2098 /* tex mip base */ 2099 r = r600_cs_packet_next_reloc(p, &reloc); 2100 if (r) { 2101 DRM_ERROR("bad SET_RESOURCE\n"); 2102 return -EINVAL; 2103 } 2104 mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2105 mipmap = reloc->robj; 2106 r = r600_check_texture_resource(p, idx+(i*7)+1, 2107 texture, mipmap, 2108 base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), 2109 mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), 2110 reloc->lobj.tiling_flags); 2111 if (r) 2112 return r; 2113 ib[idx+1+(i*7)+2] += base_offset; 2114 ib[idx+1+(i*7)+3] += mip_offset; 2115 break; 2116 case SQ_TEX_VTX_VALID_BUFFER: 2117 { 2118 uint64_t offset64; 2119 /* vtx base */ 2120 r = r600_cs_packet_next_reloc(p, &reloc); 2121 if (r) { 2122 DRM_ERROR("bad SET_RESOURCE\n"); 2123 return -EINVAL; 2124 } 2125 offset = radeon_get_ib_value(p, idx+1+(i*7)+0); 2126 size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1; 2127 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { 2128 /* force size to size of the buffer */ 2129 dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", 2130 size + offset, radeon_bo_size(reloc->robj)); 2131 ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset; 2132 } 2133 2134 offset64 = reloc->lobj.gpu_offset + offset; 2135 ib[idx+1+(i*8)+0] = offset64; 2136 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) | 2137 (upper_32_bits(offset64) & 0xff); 2138 break; 2139 } 2140 case SQ_TEX_VTX_INVALID_TEXTURE: 2141 case SQ_TEX_VTX_INVALID_BUFFER: 2142 default: 2143 DRM_ERROR("bad SET_RESOURCE\n"); 2144 return -EINVAL; 2145 } 2146 } 2147 break; 2148 case PACKET3_SET_ALU_CONST: 2149 if (track->sq_config & DX9_CONSTS) { 2150 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 2151 end_reg = 4 * pkt->count + start_reg - 4; 2152 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 2153 (start_reg >= PACKET3_SET_ALU_CONST_END) || 2154 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 2155 DRM_ERROR("bad SET_ALU_CONST\n"); 2156 return -EINVAL; 2157 } 2158 } 2159 break; 2160 case PACKET3_SET_BOOL_CONST: 2161 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 2162 end_reg = 4 * pkt->count + start_reg - 4; 2163 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 2164 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 2165 (end_reg >= PACKET3_SET_BOOL_CONST_END)) { 2166 DRM_ERROR("bad SET_BOOL_CONST\n"); 2167 return -EINVAL; 2168 } 2169 break; 2170 case PACKET3_SET_LOOP_CONST: 2171 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 2172 end_reg = 4 * pkt->count + start_reg - 4; 2173 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 2174 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 2175 (end_reg >= PACKET3_SET_LOOP_CONST_END)) { 2176 DRM_ERROR("bad SET_LOOP_CONST\n"); 2177 return -EINVAL; 2178 } 2179 break; 2180 case PACKET3_SET_CTL_CONST: 2181 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET; 2182 end_reg = 4 * pkt->count + start_reg - 4; 2183 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 2184 (start_reg >= PACKET3_SET_CTL_CONST_END) || 2185 (end_reg >= PACKET3_SET_CTL_CONST_END)) { 2186 DRM_ERROR("bad SET_CTL_CONST\n"); 2187 return -EINVAL; 2188 } 2189 break; 2190 case PACKET3_SET_SAMPLER: 2191 if (pkt->count % 3) { 2192 DRM_ERROR("bad SET_SAMPLER\n"); 2193 return -EINVAL; 2194 } 2195 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET; 2196 end_reg = 4 * pkt->count + start_reg - 4; 2197 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 2198 (start_reg >= PACKET3_SET_SAMPLER_END) || 2199 (end_reg >= PACKET3_SET_SAMPLER_END)) { 2200 DRM_ERROR("bad SET_SAMPLER\n"); 2201 return -EINVAL; 2202 } 2203 break; 2204 case PACKET3_STRMOUT_BASE_UPDATE: 2205 /* RS780 and RS880 also need this */ 2206 if (p->family < CHIP_RS780) { 2207 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n"); 2208 return -EINVAL; 2209 } 2210 if (pkt->count != 1) { 2211 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n"); 2212 return -EINVAL; 2213 } 2214 if (idx_value > 3) { 2215 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n"); 2216 return -EINVAL; 2217 } 2218 { 2219 u64 offset; 2220 2221 r = r600_cs_packet_next_reloc(p, &reloc); 2222 if (r) { 2223 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2224 return -EINVAL; 2225 } 2226 2227 if (reloc->robj != track->vgt_strmout_bo[idx_value]) { 2228 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n"); 2229 return -EINVAL; 2230 } 2231 2232 offset = radeon_get_ib_value(p, idx+1) << 8; 2233 if (offset != track->vgt_strmout_bo_offset[idx_value]) { 2234 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%jx, 0x%x\n", 2235 (uintmax_t)offset, track->vgt_strmout_bo_offset[idx_value]); 2236 return -EINVAL; 2237 } 2238 2239 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2240 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%jx, 0x%lx\n", 2241 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2242 return -EINVAL; 2243 } 2244 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 2245 } 2246 break; 2247 case PACKET3_SURFACE_BASE_UPDATE: 2248 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2249 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2250 return -EINVAL; 2251 } 2252 if (pkt->count) { 2253 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2254 return -EINVAL; 2255 } 2256 break; 2257 case PACKET3_STRMOUT_BUFFER_UPDATE: 2258 if (pkt->count != 4) { 2259 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); 2260 return -EINVAL; 2261 } 2262 /* Updating memory at DST_ADDRESS. */ 2263 if (idx_value & 0x1) { 2264 u64 offset; 2265 r = r600_cs_packet_next_reloc(p, &reloc); 2266 if (r) { 2267 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2268 return -EINVAL; 2269 } 2270 offset = radeon_get_ib_value(p, idx+1); 2271 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2272 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2273 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%jx, 0x%lx\n", 2274 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2275 return -EINVAL; 2276 } 2277 offset += reloc->lobj.gpu_offset; 2278 ib[idx+1] = offset; 2279 ib[idx+2] = upper_32_bits(offset) & 0xff; 2280 } 2281 /* Reading data from SRC_ADDRESS. */ 2282 if (((idx_value >> 1) & 0x3) == 2) { 2283 u64 offset; 2284 r = r600_cs_packet_next_reloc(p, &reloc); 2285 if (r) { 2286 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2287 return -EINVAL; 2288 } 2289 offset = radeon_get_ib_value(p, idx+3); 2290 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2291 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2292 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%jx, 0x%lx\n", 2293 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2294 return -EINVAL; 2295 } 2296 offset += reloc->lobj.gpu_offset; 2297 ib[idx+3] = offset; 2298 ib[idx+4] = upper_32_bits(offset) & 0xff; 2299 } 2300 break; 2301 case PACKET3_MEM_WRITE: 2302 { 2303 u64 offset; 2304 2305 if (pkt->count != 3) { 2306 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2307 return -EINVAL; 2308 } 2309 r = r600_cs_packet_next_reloc(p, &reloc); 2310 if (r) { 2311 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2312 return -EINVAL; 2313 } 2314 offset = radeon_get_ib_value(p, idx+0); 2315 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; 2316 if (offset & 0x7) { 2317 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); 2318 return -EINVAL; 2319 } 2320 if ((offset + 8) > radeon_bo_size(reloc->robj)) { 2321 DRM_ERROR("bad MEM_WRITE bo too small: 0x%jx, 0x%lx\n", 2322 (uintmax_t)offset + 8, radeon_bo_size(reloc->robj)); 2323 return -EINVAL; 2324 } 2325 offset += reloc->lobj.gpu_offset; 2326 ib[idx+0] = offset; 2327 ib[idx+1] = upper_32_bits(offset) & 0xff; 2328 break; 2329 } 2330 case PACKET3_COPY_DW: 2331 if (pkt->count != 4) { 2332 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2333 return -EINVAL; 2334 } 2335 if (idx_value & 0x1) { 2336 u64 offset; 2337 /* SRC is memory. */ 2338 r = r600_cs_packet_next_reloc(p, &reloc); 2339 if (r) { 2340 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2341 return -EINVAL; 2342 } 2343 offset = radeon_get_ib_value(p, idx+1); 2344 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2345 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2346 DRM_ERROR("bad COPY_DW src bo too small: 0x%jx, 0x%lx\n", 2347 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2348 return -EINVAL; 2349 } 2350 offset += reloc->lobj.gpu_offset; 2351 ib[idx+1] = offset; 2352 ib[idx+2] = upper_32_bits(offset) & 0xff; 2353 } else { 2354 /* SRC is a reg. */ 2355 reg = radeon_get_ib_value(p, idx+1) << 2; 2356 if (!r600_is_safe_reg(p, reg, idx+1)) 2357 return -EINVAL; 2358 } 2359 if (idx_value & 0x2) { 2360 u64 offset; 2361 /* DST is memory. */ 2362 r = r600_cs_packet_next_reloc(p, &reloc); 2363 if (r) { 2364 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2365 return -EINVAL; 2366 } 2367 offset = radeon_get_ib_value(p, idx+3); 2368 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2369 if ((offset + 4) > radeon_bo_size(reloc->robj)) { 2370 DRM_ERROR("bad COPY_DW dst bo too small: 0x%jx, 0x%lx\n", 2371 (uintmax_t)offset + 4, radeon_bo_size(reloc->robj)); 2372 return -EINVAL; 2373 } 2374 offset += reloc->lobj.gpu_offset; 2375 ib[idx+3] = offset; 2376 ib[idx+4] = upper_32_bits(offset) & 0xff; 2377 } else { 2378 /* DST is a reg. */ 2379 reg = radeon_get_ib_value(p, idx+3) << 2; 2380 if (!r600_is_safe_reg(p, reg, idx+3)) 2381 return -EINVAL; 2382 } 2383 break; 2384 case PACKET3_NOP: 2385 break; 2386 default: 2387 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); 2388 return -EINVAL; 2389 } 2390 return 0; 2391 } 2392 2393 int r600_cs_parse(struct radeon_cs_parser *p) 2394 { 2395 struct radeon_cs_packet pkt; 2396 struct r600_cs_track *track; 2397 int r; 2398 2399 if (p->track == NULL) { 2400 /* initialize tracker, we are in kms */ 2401 track = kmalloc(sizeof(*track), DRM_MEM_DRIVER, 2402 M_ZERO | M_WAITOK); 2403 if (track == NULL) 2404 return -ENOMEM; 2405 r600_cs_track_init(track); 2406 if (p->rdev->family < CHIP_RV770) { 2407 track->npipes = p->rdev->config.r600.tiling_npipes; 2408 track->nbanks = p->rdev->config.r600.tiling_nbanks; 2409 track->group_size = p->rdev->config.r600.tiling_group_size; 2410 } else if (p->rdev->family <= CHIP_RV740) { 2411 track->npipes = p->rdev->config.rv770.tiling_npipes; 2412 track->nbanks = p->rdev->config.rv770.tiling_nbanks; 2413 track->group_size = p->rdev->config.rv770.tiling_group_size; 2414 } 2415 p->track = track; 2416 } 2417 do { 2418 r = r600_cs_packet_parse(p, &pkt, p->idx); 2419 if (r) { 2420 drm_free(p->track, DRM_MEM_DRIVER); 2421 p->track = NULL; 2422 return r; 2423 } 2424 p->idx += pkt.count + 2; 2425 switch (pkt.type) { 2426 case PACKET_TYPE0: 2427 r = r600_cs_parse_packet0(p, &pkt); 2428 break; 2429 case PACKET_TYPE2: 2430 break; 2431 case PACKET_TYPE3: 2432 r = r600_packet3_check(p, &pkt); 2433 break; 2434 default: 2435 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 2436 drm_free(p->track, DRM_MEM_DRIVER); 2437 p->track = NULL; 2438 return -EINVAL; 2439 } 2440 if (r) { 2441 drm_free(p->track, DRM_MEM_DRIVER); 2442 p->track = NULL; 2443 return r; 2444 } 2445 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2446 #if 0 2447 for (r = 0; r < p->ib.length_dw; r++) { 2448 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2449 DRM_MDELAY(1); 2450 } 2451 #endif 2452 drm_free(p->track, DRM_MEM_DRIVER); 2453 p->track = NULL; 2454 return 0; 2455 } 2456 2457 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2458 { 2459 if (p->chunk_relocs_idx == -1) { 2460 return 0; 2461 } 2462 p->relocs = kmalloc(sizeof(struct radeon_cs_reloc), DRM_MEM_DRIVER, 2463 M_ZERO | M_WAITOK); 2464 if (p->relocs == NULL) { 2465 return -ENOMEM; 2466 } 2467 return 0; 2468 } 2469 2470 /** 2471 * cs_parser_fini() - clean parser states 2472 * @parser: parser structure holding parsing context. 2473 * @error: error number 2474 * 2475 * If error is set than unvalidate buffer, otherwise just free memory 2476 * used by parsing context. 2477 **/ 2478 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error) 2479 { 2480 unsigned i; 2481 2482 drm_free(parser->relocs, DRM_MEM_DRIVER); 2483 for (i = 0; i < parser->nchunks; i++) { 2484 drm_free(parser->chunks[i].kdata, DRM_MEM_DRIVER); 2485 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) { 2486 drm_free(parser->chunks[i].kpage[0], DRM_MEM_DRIVER); 2487 drm_free(parser->chunks[i].kpage[1], DRM_MEM_DRIVER); 2488 } 2489 } 2490 drm_free(parser->chunks, DRM_MEM_DRIVER); 2491 drm_free(parser->chunks_array, DRM_MEM_DRIVER); 2492 drm_free(parser->track, DRM_MEM_DRIVER); 2493 } 2494 2495 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 2496 unsigned family, u32 *ib, int *l) 2497 { 2498 struct radeon_cs_parser parser; 2499 struct radeon_cs_chunk *ib_chunk; 2500 struct r600_cs_track *track; 2501 int r; 2502 2503 /* initialize tracker */ 2504 track = kmalloc(sizeof(*track), DRM_MEM_DRIVER, M_ZERO | M_WAITOK); 2505 if (track == NULL) 2506 return -ENOMEM; 2507 r600_cs_track_init(track); 2508 r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); 2509 /* initialize parser */ 2510 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 2511 parser.filp = filp; 2512 parser.dev = dev->device; 2513 parser.rdev = NULL; 2514 parser.family = family; 2515 parser.track = track; 2516 parser.ib.ptr = ib; 2517 r = radeon_cs_parser_init(&parser, data); 2518 if (r) { 2519 DRM_ERROR("Failed to initialize parser !\n"); 2520 r600_cs_parser_fini(&parser, r); 2521 return r; 2522 } 2523 r = r600_cs_parser_relocs_legacy(&parser); 2524 if (r) { 2525 DRM_ERROR("Failed to parse relocation !\n"); 2526 r600_cs_parser_fini(&parser, r); 2527 return r; 2528 } 2529 /* Copy the packet into the IB, the parser will read from the 2530 * input memory (cached) and write to the IB (which can be 2531 * uncached). */ 2532 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2533 parser.ib.length_dw = ib_chunk->length_dw; 2534 *l = parser.ib.length_dw; 2535 r = r600_cs_parse(&parser); 2536 if (r) { 2537 DRM_ERROR("Invalid command stream !\n"); 2538 r600_cs_parser_fini(&parser, r); 2539 return r; 2540 } 2541 r = radeon_cs_finish_pages(&parser); 2542 if (r) { 2543 DRM_ERROR("Invalid command stream !\n"); 2544 r600_cs_parser_fini(&parser, r); 2545 return r; 2546 } 2547 r600_cs_parser_fini(&parser, r); 2548 return r; 2549 } 2550 2551 void r600_cs_legacy_init(void) 2552 { 2553 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2554 } 2555 2556 /* 2557 * DMA 2558 */ 2559 /** 2560 * r600_dma_cs_next_reloc() - parse next reloc 2561 * @p: parser structure holding parsing context. 2562 * @cs_reloc: reloc informations 2563 * 2564 * Return the next reloc, do bo validation and compute 2565 * GPU offset using the provided start. 2566 **/ 2567 int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2568 struct radeon_cs_reloc **cs_reloc) 2569 { 2570 struct radeon_cs_chunk *relocs_chunk; 2571 unsigned idx; 2572 2573 *cs_reloc = NULL; 2574 if (p->chunk_relocs_idx == -1) { 2575 DRM_ERROR("No relocation chunk !\n"); 2576 return -EINVAL; 2577 } 2578 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2579 idx = p->dma_reloc_idx; 2580 if (idx >= p->nrelocs) { 2581 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2582 idx, p->nrelocs); 2583 return -EINVAL; 2584 } 2585 *cs_reloc = p->relocs_ptr[idx]; 2586 p->dma_reloc_idx++; 2587 return 0; 2588 } 2589 2590 #define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28) 2591 #define GET_DMA_COUNT(h) ((h) & 0x0000ffff) 2592 #define GET_DMA_T(h) (((h) & 0x00800000) >> 23) 2593 2594 /** 2595 * r600_dma_cs_parse() - parse the DMA IB 2596 * @p: parser structure holding parsing context. 2597 * 2598 * Parses the DMA IB from the CS ioctl and updates 2599 * the GPU addresses based on the reloc information and 2600 * checks for errors. (R6xx-R7xx) 2601 * Returns 0 for success and an error on failure. 2602 **/ 2603 int r600_dma_cs_parse(struct radeon_cs_parser *p) 2604 { 2605 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2606 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2607 u32 header, cmd, count, tiled; 2608 volatile u32 *ib = p->ib.ptr; 2609 u32 idx, idx_value; 2610 u64 src_offset, dst_offset; 2611 int r; 2612 2613 do { 2614 if (p->idx >= ib_chunk->length_dw) { 2615 DRM_ERROR("Can not parse packet at %d after CS end %d !\n", 2616 p->idx, ib_chunk->length_dw); 2617 return -EINVAL; 2618 } 2619 idx = p->idx; 2620 header = radeon_get_ib_value(p, idx); 2621 cmd = GET_DMA_CMD(header); 2622 count = GET_DMA_COUNT(header); 2623 tiled = GET_DMA_T(header); 2624 2625 switch (cmd) { 2626 case DMA_PACKET_WRITE: 2627 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2628 if (r) { 2629 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2630 return -EINVAL; 2631 } 2632 if (tiled) { 2633 dst_offset = radeon_get_ib_value(p, idx+1); 2634 dst_offset <<= 8; 2635 2636 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2637 p->idx += count + 5; 2638 } else { 2639 dst_offset = radeon_get_ib_value(p, idx+1); 2640 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2641 2642 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2643 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2644 p->idx += count + 3; 2645 } 2646 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2647 dev_warn(p->dev, "DMA write buffer too small (%ju %lu)\n", 2648 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2649 return -EINVAL; 2650 } 2651 break; 2652 case DMA_PACKET_COPY: 2653 r = r600_dma_cs_next_reloc(p, &src_reloc); 2654 if (r) { 2655 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2656 return -EINVAL; 2657 } 2658 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2659 if (r) { 2660 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2661 return -EINVAL; 2662 } 2663 if (tiled) { 2664 idx_value = radeon_get_ib_value(p, idx + 2); 2665 /* detile bit */ 2666 if (idx_value & (1 << 31)) { 2667 /* tiled src, linear dst */ 2668 src_offset = radeon_get_ib_value(p, idx+1); 2669 src_offset <<= 8; 2670 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2671 2672 dst_offset = radeon_get_ib_value(p, idx+5); 2673 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2674 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2675 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2676 } else { 2677 /* linear src, tiled dst */ 2678 src_offset = radeon_get_ib_value(p, idx+5); 2679 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2680 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2681 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2682 2683 dst_offset = radeon_get_ib_value(p, idx+1); 2684 dst_offset <<= 8; 2685 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2686 } 2687 p->idx += 7; 2688 } else { 2689 if (p->family >= CHIP_RV770) { 2690 src_offset = radeon_get_ib_value(p, idx+2); 2691 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2692 dst_offset = radeon_get_ib_value(p, idx+1); 2693 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2694 2695 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2696 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2697 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2698 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2699 p->idx += 5; 2700 } else { 2701 src_offset = radeon_get_ib_value(p, idx+2); 2702 src_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2703 dst_offset = radeon_get_ib_value(p, idx+1); 2704 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff0000)) << 16; 2705 2706 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2707 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2708 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2709 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16; 2710 p->idx += 4; 2711 } 2712 } 2713 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2714 dev_warn(p->dev, "DMA copy src buffer too small (%ju %lu)\n", 2715 (uintmax_t)src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2716 return -EINVAL; 2717 } 2718 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2719 dev_warn(p->dev, "DMA write dst buffer too small (%ju %lu)\n", 2720 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2721 return -EINVAL; 2722 } 2723 break; 2724 case DMA_PACKET_CONSTANT_FILL: 2725 if (p->family < CHIP_RV770) { 2726 DRM_ERROR("Constant Fill is 7xx only !\n"); 2727 return -EINVAL; 2728 } 2729 r = r600_dma_cs_next_reloc(p, &dst_reloc); 2730 if (r) { 2731 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2732 return -EINVAL; 2733 } 2734 dst_offset = radeon_get_ib_value(p, idx+1); 2735 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; 2736 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2737 dev_warn(p->dev, "DMA constant fill buffer too small (%ju %lu)\n", 2738 (uintmax_t)dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2739 return -EINVAL; 2740 } 2741 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2742 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000; 2743 p->idx += 4; 2744 break; 2745 case DMA_PACKET_NOP: 2746 p->idx += 1; 2747 break; 2748 default: 2749 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2750 return -EINVAL; 2751 } 2752 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2753 #if 0 2754 for (r = 0; r < p->ib->length_dw; r++) { 2755 DRM_INFO("%05d 0x%08X\n", r, p->ib.ptr[r]); 2756 DRM_MDELAY(1); 2757 } 2758 #endif 2759 return 0; 2760 } 2761