1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include "r600_pipe.h"
24 #include "r600_public.h"
25 #include "r600_isa.h"
26 #include "evergreen_compute.h"
27 #include "r600d.h"
28
29 #include "sb/sb_public.h"
30
31 #include <errno.h>
32 #include "pipe/p_shader_tokens.h"
33 #include "util/u_debug.h"
34 #include "util/u_memory.h"
35 #include "util/u_screen.h"
36 #include "util/u_simple_shaders.h"
37 #include "util/u_upload_mgr.h"
38 #include "util/u_math.h"
39 #include "vl/vl_decoder.h"
40 #include "vl/vl_video_buffer.h"
41 #include "radeon_video.h"
42 #include "radeon_uvd.h"
43 #include "util/os_time.h"
44
45 static const struct debug_named_value r600_debug_options[] = {
46 /* features */
47 { "nocpdma", DBG_NO_CP_DMA, "Disable CP DMA" },
48
49 /* shader backend */
50 { "nosb", DBG_NO_SB, "Disable sb backend for graphics shaders" },
51 { "sbcl", DBG_SB_CS, "Enable sb backend for compute shaders" },
52 { "sbdry", DBG_SB_DRY_RUN, "Don't use optimized bytecode (just print the dumps)" },
53 { "sbstat", DBG_SB_STAT, "Print optimization statistics for shaders" },
54 { "sbdump", DBG_SB_DUMP, "Print IR dumps after some optimization passes" },
55 { "sbnofallback", DBG_SB_NO_FALLBACK, "Abort on errors instead of fallback" },
56 { "sbdisasm", DBG_SB_DISASM, "Use sb disassembler for shader dumps" },
57 { "sbsafemath", DBG_SB_SAFEMATH, "Disable unsafe math optimizations" },
58 { "nirsb", DBG_NIR_SB, "Enable NIR with SB optimizer"},
59
60 DEBUG_NAMED_VALUE_END /* must be last */
61 };
62
63 /*
64 * pipe_context
65 */
66
r600_destroy_context(struct pipe_context * context)67 static void r600_destroy_context(struct pipe_context *context)
68 {
69 struct r600_context *rctx = (struct r600_context *)context;
70 unsigned sh, i;
71
72 r600_isa_destroy(rctx->isa);
73
74 r600_sb_context_destroy(rctx->sb_context);
75
76 for (sh = 0; sh < (rctx->b.chip_class < EVERGREEN ? R600_NUM_HW_STAGES : EG_NUM_HW_STAGES); sh++) {
77 r600_resource_reference(&rctx->scratch_buffers[sh].buffer, NULL);
78 }
79 r600_resource_reference(&rctx->dummy_cmask, NULL);
80 r600_resource_reference(&rctx->dummy_fmask, NULL);
81
82 if (rctx->append_fence)
83 pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL);
84 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
85 rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, NULL);
86 free(rctx->driver_consts[sh].constants);
87 }
88
89 if (rctx->fixed_func_tcs_shader)
90 rctx->b.b.delete_tcs_state(&rctx->b.b, rctx->fixed_func_tcs_shader);
91
92 if (rctx->dummy_pixel_shader) {
93 rctx->b.b.delete_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
94 }
95 if (rctx->custom_dsa_flush) {
96 rctx->b.b.delete_depth_stencil_alpha_state(&rctx->b.b, rctx->custom_dsa_flush);
97 }
98 if (rctx->custom_blend_resolve) {
99 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_resolve);
100 }
101 if (rctx->custom_blend_decompress) {
102 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_decompress);
103 }
104 if (rctx->custom_blend_fastclear) {
105 rctx->b.b.delete_blend_state(&rctx->b.b, rctx->custom_blend_fastclear);
106 }
107 util_unreference_framebuffer_state(&rctx->framebuffer.state);
108
109 if (rctx->gs_rings.gsvs_ring.buffer)
110 pipe_resource_reference(&rctx->gs_rings.gsvs_ring.buffer, NULL);
111
112 if (rctx->gs_rings.esgs_ring.buffer)
113 pipe_resource_reference(&rctx->gs_rings.esgs_ring.buffer, NULL);
114
115 for (sh = 0; sh < PIPE_SHADER_TYPES; ++sh)
116 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i)
117 rctx->b.b.set_constant_buffer(context, sh, i, false, NULL);
118
119 if (rctx->blitter) {
120 util_blitter_destroy(rctx->blitter);
121 }
122 u_suballocator_destroy(&rctx->allocator_fetch_shader);
123
124 r600_release_command_buffer(&rctx->start_cs_cmd);
125
126 FREE(rctx->start_compute_cs_cmd.buf);
127
128 r600_common_context_cleanup(&rctx->b);
129
130 r600_resource_reference(&rctx->trace_buf, NULL);
131 r600_resource_reference(&rctx->last_trace_buf, NULL);
132 radeon_clear_saved_cs(&rctx->last_gfx);
133
134 FREE(rctx);
135 }
136
r600_create_context(struct pipe_screen * screen,void * priv,unsigned flags)137 static struct pipe_context *r600_create_context(struct pipe_screen *screen,
138 void *priv, unsigned flags)
139 {
140 struct r600_context *rctx = CALLOC_STRUCT(r600_context);
141 struct r600_screen* rscreen = (struct r600_screen *)screen;
142 struct radeon_winsys *ws = rscreen->b.ws;
143
144 if (!rctx)
145 return NULL;
146
147 rctx->b.b.screen = screen;
148 assert(!priv);
149 rctx->b.b.priv = NULL; /* for threaded_context_unwrap_sync */
150 rctx->b.b.destroy = r600_destroy_context;
151 rctx->b.set_atom_dirty = (void *)r600_set_atom_dirty;
152
153 if (!r600_common_context_init(&rctx->b, &rscreen->b, flags))
154 goto fail;
155
156 rctx->screen = rscreen;
157 list_inithead(&rctx->texture_buffers);
158
159 r600_init_blit_functions(rctx);
160
161 if (rscreen->b.info.has_video_hw.uvd_decode) {
162 rctx->b.b.create_video_codec = r600_uvd_create_decoder;
163 rctx->b.b.create_video_buffer = r600_video_buffer_create;
164 } else {
165 rctx->b.b.create_video_codec = vl_create_decoder;
166 rctx->b.b.create_video_buffer = vl_video_buffer_create;
167 }
168
169 if (getenv("R600_TRACE"))
170 rctx->is_debug = true;
171 r600_init_common_state_functions(rctx);
172
173 switch (rctx->b.chip_class) {
174 case R600:
175 case R700:
176 r600_init_state_functions(rctx);
177 r600_init_atom_start_cs(rctx);
178 rctx->custom_dsa_flush = r600_create_db_flush_dsa(rctx);
179 rctx->custom_blend_resolve = rctx->b.chip_class == R700 ? r700_create_resolve_blend(rctx)
180 : r600_create_resolve_blend(rctx);
181 rctx->custom_blend_decompress = r600_create_decompress_blend(rctx);
182 rctx->has_vertex_cache = !(rctx->b.family == CHIP_RV610 ||
183 rctx->b.family == CHIP_RV620 ||
184 rctx->b.family == CHIP_RS780 ||
185 rctx->b.family == CHIP_RS880 ||
186 rctx->b.family == CHIP_RV710);
187 break;
188 case EVERGREEN:
189 case CAYMAN:
190 evergreen_init_state_functions(rctx);
191 evergreen_init_atom_start_cs(rctx);
192 evergreen_init_atom_start_compute_cs(rctx);
193 rctx->custom_dsa_flush = evergreen_create_db_flush_dsa(rctx);
194 rctx->custom_blend_resolve = evergreen_create_resolve_blend(rctx);
195 rctx->custom_blend_decompress = evergreen_create_decompress_blend(rctx);
196 rctx->custom_blend_fastclear = evergreen_create_fastclear_blend(rctx);
197 rctx->has_vertex_cache = !(rctx->b.family == CHIP_CEDAR ||
198 rctx->b.family == CHIP_PALM ||
199 rctx->b.family == CHIP_SUMO ||
200 rctx->b.family == CHIP_SUMO2 ||
201 rctx->b.family == CHIP_CAICOS ||
202 rctx->b.family == CHIP_CAYMAN ||
203 rctx->b.family == CHIP_ARUBA);
204
205 rctx->append_fence = pipe_buffer_create(rctx->b.b.screen, PIPE_BIND_CUSTOM,
206 PIPE_USAGE_DEFAULT, 32);
207 break;
208 default:
209 R600_ERR("Unsupported chip class %d.\n", rctx->b.chip_class);
210 goto fail;
211 }
212
213 ws->cs_create(&rctx->b.gfx.cs, rctx->b.ctx, RING_GFX,
214 r600_context_gfx_flush, rctx, false);
215 rctx->b.gfx.flush = r600_context_gfx_flush;
216
217 u_suballocator_init(&rctx->allocator_fetch_shader, &rctx->b.b, 64 * 1024,
218 0, PIPE_USAGE_DEFAULT, 0, FALSE);
219
220 rctx->isa = calloc(1, sizeof(struct r600_isa));
221 if (!rctx->isa || r600_isa_init(rctx, rctx->isa))
222 goto fail;
223
224 if (rscreen->b.debug_flags & DBG_FORCE_DMA)
225 rctx->b.b.resource_copy_region = rctx->b.dma_copy;
226
227 rctx->blitter = util_blitter_create(&rctx->b.b);
228 if (rctx->blitter == NULL)
229 goto fail;
230 util_blitter_set_texture_multisample(rctx->blitter, rscreen->has_msaa);
231 rctx->blitter->draw_rectangle = r600_draw_rectangle;
232
233 r600_begin_new_cs(rctx);
234
235 rctx->dummy_pixel_shader =
236 util_make_fragment_cloneinput_shader(&rctx->b.b, 0,
237 TGSI_SEMANTIC_GENERIC,
238 TGSI_INTERPOLATE_CONSTANT);
239 rctx->b.b.bind_fs_state(&rctx->b.b, rctx->dummy_pixel_shader);
240
241 return &rctx->b.b;
242
243 fail:
244 r600_destroy_context(&rctx->b.b);
245 return NULL;
246 }
247
is_nir_enabled(struct r600_common_screen * screen)248 static bool is_nir_enabled(struct r600_common_screen *screen) {
249 return ((screen->debug_flags & DBG_NIR_PREFERRED) &&
250 screen->family >= CHIP_CEDAR);
251 }
252
253 /*
254 * pipe_screen
255 */
256
r600_get_param(struct pipe_screen * pscreen,enum pipe_cap param)257 static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param)
258 {
259 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
260 enum radeon_family family = rscreen->b.family;
261
262 switch (param) {
263 /* Supported features (boolean caps). */
264 case PIPE_CAP_NPOT_TEXTURES:
265 case PIPE_CAP_MIXED_FRAMEBUFFER_SIZES:
266 case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
267 case PIPE_CAP_ANISOTROPIC_FILTER:
268 case PIPE_CAP_POINT_SPRITE:
269 case PIPE_CAP_OCCLUSION_QUERY:
270 case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
271 case PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE:
272 case PIPE_CAP_BLEND_EQUATION_SEPARATE:
273 case PIPE_CAP_TEXTURE_SWIZZLE:
274 case PIPE_CAP_DEPTH_CLIP_DISABLE:
275 case PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE:
276 case PIPE_CAP_SHADER_STENCIL_EXPORT:
277 case PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR:
278 case PIPE_CAP_MIXED_COLORBUFFER_FORMATS:
279 case PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT:
280 case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER:
281 case PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD:
282 case PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES:
283 case PIPE_CAP_VERTEX_SHADER_SATURATE:
284 case PIPE_CAP_SEAMLESS_CUBE_MAP:
285 case PIPE_CAP_PRIMITIVE_RESTART:
286 case PIPE_CAP_PRIMITIVE_RESTART_FIXED_INDEX:
287 case PIPE_CAP_CONDITIONAL_RENDER:
288 case PIPE_CAP_TEXTURE_BARRIER:
289 case PIPE_CAP_VERTEX_COLOR_UNCLAMPED:
290 case PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION:
291 case PIPE_CAP_TGSI_INSTANCEID:
292 case PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY:
293 case PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY:
294 case PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY:
295 case PIPE_CAP_START_INSTANCE:
296 case PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS:
297 case PIPE_CAP_TEXTURE_BUFFER_OBJECTS:
298 case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
299 case PIPE_CAP_QUERY_PIPELINE_STATISTICS:
300 case PIPE_CAP_TEXTURE_MULTISAMPLE:
301 case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
302 case PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION:
303 case PIPE_CAP_TGSI_VS_LAYER_VIEWPORT:
304 case PIPE_CAP_SAMPLE_SHADING:
305 case PIPE_CAP_CLIP_HALFZ:
306 case PIPE_CAP_POLYGON_OFFSET_CLAMP:
307 case PIPE_CAP_CONDITIONAL_RENDER_INVERTED:
308 case PIPE_CAP_TEXTURE_FLOAT_LINEAR:
309 case PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR:
310 case PIPE_CAP_TGSI_TXQS:
311 case PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS:
312 case PIPE_CAP_INVALIDATE_BUFFER:
313 case PIPE_CAP_SURFACE_REINTERPRET_BLOCKS:
314 case PIPE_CAP_QUERY_MEMORY_INFO:
315 case PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT:
316 case PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED:
317 case PIPE_CAP_CLEAR_TEXTURE:
318 case PIPE_CAP_TGSI_MUL_ZERO_WINS:
319 case PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX:
320 case PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION:
321 case PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR:
322 case PIPE_CAP_NIR_ATOMICS_AS_DEREF:
323 return 1;
324
325 case PIPE_CAP_SHAREABLE_SHADERS:
326 return 0;
327
328 case PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET:
329 /* Optimal number for good TexSubImage performance on Polaris10. */
330 return 64 * 1024 * 1024;
331
332 case PIPE_CAP_DEVICE_RESET_STATUS_QUERY:
333 return rscreen->b.info.drm_minor >= 43;
334
335 case PIPE_CAP_RESOURCE_FROM_USER_MEMORY:
336 return !R600_BIG_ENDIAN && rscreen->b.info.has_userptr;
337
338 case PIPE_CAP_COMPUTE:
339 return rscreen->b.chip_class > R700;
340
341 case PIPE_CAP_TGSI_TEXCOORD:
342 return 1;
343
344 case PIPE_CAP_NIR_IMAGES_AS_DEREF:
345 case PIPE_CAP_FAKE_SW_MSAA:
346 return 0;
347
348 case PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE:
349 return MIN2(rscreen->b.info.max_alloc_size, INT_MAX);
350
351 case PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT:
352 return R600_MAP_BUFFER_ALIGNMENT;
353
354 case PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT:
355 return 256;
356
357 case PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT:
358 return 4;
359
360 case PIPE_CAP_GLSL_FEATURE_LEVEL:
361 if (family >= CHIP_CEDAR)
362 return is_nir_enabled(&rscreen->b) ? 450 : 430;
363 /* pre-evergreen geom shaders need newer kernel */
364 if (rscreen->b.info.drm_minor >= 37)
365 return 330;
366 return 140;
367
368 case PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY:
369 return 140;
370
371 /* Supported except the original R600. */
372 case PIPE_CAP_INDEP_BLEND_ENABLE:
373 case PIPE_CAP_INDEP_BLEND_FUNC:
374 /* R600 doesn't support per-MRT blends */
375 return family == CHIP_R600 ? 0 : 1;
376
377 /* Supported on Evergreen. */
378 case PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE:
379 case PIPE_CAP_CUBE_MAP_ARRAY:
380 case PIPE_CAP_TEXTURE_GATHER_SM5:
381 case PIPE_CAP_TEXTURE_QUERY_LOD:
382 case PIPE_CAP_TGSI_FS_FINE_DERIVATIVE:
383 case PIPE_CAP_SAMPLER_VIEW_TARGET:
384 case PIPE_CAP_TGSI_PACK_HALF_FLOAT:
385 case PIPE_CAP_TGSI_CLOCK:
386 case PIPE_CAP_TGSI_ARRAY_COMPONENTS:
387 case PIPE_CAP_QUERY_BUFFER_OBJECT:
388 return family >= CHIP_CEDAR ? 1 : 0;
389 case PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS:
390 return family >= CHIP_CEDAR ? 4 : 0;
391 case PIPE_CAP_DRAW_INDIRECT:
392 /* kernel command checker support is also required */
393 return family >= CHIP_CEDAR && rscreen->b.info.drm_minor >= 41;
394
395 case PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY:
396 return family >= CHIP_CEDAR ? 0 : 1;
397
398 case PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES:
399 return 8;
400
401 case PIPE_CAP_MAX_GS_INVOCATIONS:
402 return 32;
403
404 /* shader buffer objects */
405 case PIPE_CAP_MAX_SHADER_BUFFER_SIZE:
406 return 1 << 27;
407 case PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS:
408 return 8;
409
410 case PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY:
411 return 0;
412
413 case PIPE_CAP_INT64:
414 case PIPE_CAP_DOUBLES:
415 if (rscreen->b.family == CHIP_ARUBA ||
416 rscreen->b.family == CHIP_CAYMAN ||
417 rscreen->b.family == CHIP_CYPRESS ||
418 rscreen->b.family == CHIP_HEMLOCK)
419 return 1;
420 if (is_nir_enabled(&rscreen->b))
421 return 1;
422 return 0;
423 case PIPE_CAP_INT64_DIVMOD:
424 /* it is actually not supported, but the nir lowering hdanles this corectly wheras
425 * the glsl lowering path seems to not initialize the buildins correctly.
426 */
427 return is_nir_enabled(&rscreen->b);
428 case PIPE_CAP_CULL_DISTANCE:
429 return 1;
430
431 case PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT:
432 if (family >= CHIP_CEDAR)
433 return 256;
434 return 0;
435
436 case PIPE_CAP_MAX_SHADER_PATCH_VARYINGS:
437 if (family >= CHIP_CEDAR)
438 return 30;
439 else
440 return 0;
441 /* Stream output. */
442 case PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS:
443 return rscreen->b.has_streamout ? 4 : 0;
444 case PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME:
445 case PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS:
446 return rscreen->b.has_streamout ? 1 : 0;
447 case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
448 case PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS:
449 return 32*4;
450
451 /* Geometry shader output. */
452 case PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES:
453 return 1024;
454 case PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS:
455 return 16384;
456 case PIPE_CAP_MAX_VERTEX_STREAMS:
457 return family >= CHIP_CEDAR ? 4 : 1;
458
459 case PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE:
460 /* Should be 2047, but 2048 is a requirement for GL 4.4 */
461 return 2048;
462
463 /* Texturing. */
464 case PIPE_CAP_MAX_TEXTURE_2D_SIZE:
465 if (family >= CHIP_CEDAR)
466 return 16384;
467 else
468 return 8192;
469 case PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS:
470 if (family >= CHIP_CEDAR)
471 return 15;
472 else
473 return 14;
474 case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
475 /* textures support 8192, but layered rendering supports 2048 */
476 return 12;
477 case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
478 /* textures support 8192, but layered rendering supports 2048 */
479 return 2048;
480
481 /* Render targets. */
482 case PIPE_CAP_MAX_RENDER_TARGETS:
483 /* XXX some r6xx are buggy and can only do 4 */
484 return 8;
485
486 case PIPE_CAP_MAX_VIEWPORTS:
487 return R600_MAX_VIEWPORTS;
488 case PIPE_CAP_VIEWPORT_SUBPIXEL_BITS:
489 case PIPE_CAP_RASTERIZER_SUBPIXEL_BITS:
490 return 8;
491
492 /* Timer queries, present when the clock frequency is non zero. */
493 case PIPE_CAP_QUERY_TIME_ELAPSED:
494 return rscreen->b.info.clock_crystal_freq != 0;
495 case PIPE_CAP_QUERY_TIMESTAMP:
496 return rscreen->b.info.drm_minor >= 20 &&
497 rscreen->b.info.clock_crystal_freq != 0;
498
499 case PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET:
500 case PIPE_CAP_MIN_TEXEL_OFFSET:
501 return -8;
502
503 case PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET:
504 case PIPE_CAP_MAX_TEXEL_OFFSET:
505 return 7;
506
507 case PIPE_CAP_MAX_VARYINGS:
508 return 32;
509
510 case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
511 return PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600;
512 case PIPE_CAP_ENDIANNESS:
513 return PIPE_ENDIAN_LITTLE;
514
515 case PIPE_CAP_VENDOR_ID:
516 return ATI_VENDOR_ID;
517 case PIPE_CAP_DEVICE_ID:
518 return rscreen->b.info.pci_id;
519 case PIPE_CAP_ACCELERATED:
520 return 1;
521 case PIPE_CAP_VIDEO_MEMORY:
522 return rscreen->b.info.vram_size >> 20;
523 case PIPE_CAP_UMA:
524 return 0;
525 case PIPE_CAP_MULTISAMPLE_Z_RESOLVE:
526 return rscreen->b.chip_class >= R700;
527 case PIPE_CAP_PCI_GROUP:
528 return rscreen->b.info.pci_domain;
529 case PIPE_CAP_PCI_BUS:
530 return rscreen->b.info.pci_bus;
531 case PIPE_CAP_PCI_DEVICE:
532 return rscreen->b.info.pci_dev;
533 case PIPE_CAP_PCI_FUNCTION:
534 return rscreen->b.info.pci_func;
535
536 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS:
537 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
538 return 8;
539 return 0;
540 case PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS:
541 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
542 return EG_MAX_ATOMIC_BUFFERS;
543 return 0;
544
545 default:
546 return u_pipe_screen_get_param_defaults(pscreen, param);
547 }
548 }
549
r600_get_shader_param(struct pipe_screen * pscreen,enum pipe_shader_type shader,enum pipe_shader_cap param)550 static int r600_get_shader_param(struct pipe_screen* pscreen,
551 enum pipe_shader_type shader,
552 enum pipe_shader_cap param)
553 {
554 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
555
556 switch(shader)
557 {
558 case PIPE_SHADER_FRAGMENT:
559 case PIPE_SHADER_VERTEX:
560 break;
561 case PIPE_SHADER_GEOMETRY:
562 if (rscreen->b.family >= CHIP_CEDAR)
563 break;
564 /* pre-evergreen geom shaders need newer kernel */
565 if (rscreen->b.info.drm_minor >= 37)
566 break;
567 return 0;
568 case PIPE_SHADER_TESS_CTRL:
569 case PIPE_SHADER_TESS_EVAL:
570 case PIPE_SHADER_COMPUTE:
571 if (rscreen->b.family >= CHIP_CEDAR)
572 break;
573 FALLTHROUGH;
574 default:
575 return 0;
576 }
577
578 switch (param) {
579 case PIPE_SHADER_CAP_MAX_INSTRUCTIONS:
580 case PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS:
581 case PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS:
582 case PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS:
583 return 16384;
584 case PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH:
585 return 32;
586 case PIPE_SHADER_CAP_MAX_INPUTS:
587 return shader == PIPE_SHADER_VERTEX ? 16 : 32;
588 case PIPE_SHADER_CAP_MAX_OUTPUTS:
589 return shader == PIPE_SHADER_FRAGMENT ? 8 : 32;
590 case PIPE_SHADER_CAP_MAX_TEMPS:
591 return 256; /* Max native temporaries. */
592 case PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE:
593 if (shader == PIPE_SHADER_COMPUTE) {
594 uint64_t max_const_buffer_size;
595 enum pipe_shader_ir ir_type = is_nir_enabled(&rscreen->b) ?
596 PIPE_SHADER_IR_NIR: PIPE_SHADER_IR_TGSI;
597 pscreen->get_compute_param(pscreen, ir_type,
598 PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
599 &max_const_buffer_size);
600 return MIN2(max_const_buffer_size, INT_MAX);
601
602 } else {
603 return R600_MAX_CONST_BUFFER_SIZE;
604 }
605 case PIPE_SHADER_CAP_MAX_CONST_BUFFERS:
606 return R600_MAX_USER_CONST_BUFFERS;
607 case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED:
608 return 1;
609 case PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED:
610 return 1;
611 case PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR:
612 case PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR:
613 case PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR:
614 case PIPE_SHADER_CAP_INDIRECT_CONST_ADDR:
615 return 1;
616 case PIPE_SHADER_CAP_SUBROUTINES:
617 case PIPE_SHADER_CAP_INT64_ATOMICS:
618 case PIPE_SHADER_CAP_FP16:
619 case PIPE_SHADER_CAP_FP16_DERIVATIVES:
620 case PIPE_SHADER_CAP_FP16_CONST_BUFFERS:
621 case PIPE_SHADER_CAP_INT16:
622 case PIPE_SHADER_CAP_GLSL_16BIT_CONSTS:
623 return 0;
624 case PIPE_SHADER_CAP_INTEGERS:
625 case PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE:
626 return 1;
627 case PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS:
628 case PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS:
629 return 16;
630 case PIPE_SHADER_CAP_PREFERRED_IR:
631 if (is_nir_enabled(&rscreen->b))
632 return PIPE_SHADER_IR_NIR;
633 return PIPE_SHADER_IR_TGSI;
634 case PIPE_SHADER_CAP_SUPPORTED_IRS: {
635 int ir = 0;
636 if (shader == PIPE_SHADER_COMPUTE)
637 ir = 1 << PIPE_SHADER_IR_NATIVE;
638 if (rscreen->b.family >= CHIP_CEDAR) {
639 ir |= 1 << PIPE_SHADER_IR_TGSI;
640 if (is_nir_enabled(&rscreen->b))
641 ir |= 1 << PIPE_SHADER_IR_NIR;
642 }
643 return ir;
644 }
645 case PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED:
646 if (rscreen->b.family == CHIP_ARUBA ||
647 rscreen->b.family == CHIP_CAYMAN ||
648 rscreen->b.family == CHIP_CYPRESS ||
649 rscreen->b.family == CHIP_HEMLOCK)
650 return 1;
651 return 0;
652 case PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED:
653 case PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED:
654 case PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED:
655 case PIPE_SHADER_CAP_LOWER_IF_THRESHOLD:
656 case PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS:
657 return 0;
658 case PIPE_SHADER_CAP_MAX_SHADER_BUFFERS:
659 case PIPE_SHADER_CAP_MAX_SHADER_IMAGES:
660 if (rscreen->b.family >= CHIP_CEDAR &&
661 (shader == PIPE_SHADER_FRAGMENT || shader == PIPE_SHADER_COMPUTE))
662 return 8;
663 return 0;
664 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS:
665 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics)
666 return 8;
667 return 0;
668 case PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS:
669 /* having to allocate the atomics out amongst shaders stages is messy,
670 so give compute 8 buffers and all the others one */
671 if (rscreen->b.family >= CHIP_CEDAR && rscreen->has_atomics) {
672 return EG_MAX_ATOMIC_BUFFERS;
673 }
674 return 0;
675 case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
676 /* due to a bug in the shader compiler, some loops hang
677 * if they are not unrolled, see:
678 * https://bugs.freedesktop.org/show_bug.cgi?id=86720
679 */
680 return 255;
681 }
682 return 0;
683 }
684
r600_destroy_screen(struct pipe_screen * pscreen)685 static void r600_destroy_screen(struct pipe_screen* pscreen)
686 {
687 struct r600_screen *rscreen = (struct r600_screen *)pscreen;
688
689 if (!rscreen)
690 return;
691
692 if (!rscreen->b.ws->unref(rscreen->b.ws))
693 return;
694
695 if (rscreen->global_pool) {
696 compute_memory_pool_delete(rscreen->global_pool);
697 }
698
699 r600_destroy_common_screen(&rscreen->b);
700 }
701
r600_resource_create(struct pipe_screen * screen,const struct pipe_resource * templ)702 static struct pipe_resource *r600_resource_create(struct pipe_screen *screen,
703 const struct pipe_resource *templ)
704 {
705 if (templ->target == PIPE_BUFFER &&
706 (templ->bind & PIPE_BIND_GLOBAL))
707 return r600_compute_global_buffer_create(screen, templ);
708
709 return r600_resource_create_common(screen, templ);
710 }
711
r600_screen_create(struct radeon_winsys * ws,const struct pipe_screen_config * config)712 struct pipe_screen *r600_screen_create(struct radeon_winsys *ws,
713 const struct pipe_screen_config *config)
714 {
715 struct r600_screen *rscreen = CALLOC_STRUCT(r600_screen);
716
717 if (!rscreen) {
718 return NULL;
719 }
720
721 /* Set functions first. */
722 rscreen->b.b.context_create = r600_create_context;
723 rscreen->b.b.destroy = r600_destroy_screen;
724 rscreen->b.b.get_param = r600_get_param;
725 rscreen->b.b.get_shader_param = r600_get_shader_param;
726 rscreen->b.b.resource_create = r600_resource_create;
727
728 if (!r600_common_screen_init(&rscreen->b, ws)) {
729 FREE(rscreen);
730 return NULL;
731 }
732
733 if (rscreen->b.info.chip_class >= EVERGREEN) {
734 rscreen->b.b.is_format_supported = evergreen_is_format_supported;
735 } else {
736 rscreen->b.b.is_format_supported = r600_is_format_supported;
737 }
738
739 rscreen->b.debug_flags |= debug_get_flags_option("R600_DEBUG", r600_debug_options, 0);
740 if (debug_get_bool_option("R600_DEBUG_COMPUTE", FALSE))
741 rscreen->b.debug_flags |= DBG_COMPUTE;
742 if (debug_get_bool_option("R600_DUMP_SHADERS", FALSE))
743 rscreen->b.debug_flags |= DBG_ALL_SHADERS | DBG_FS;
744 if (!debug_get_bool_option("R600_HYPERZ", TRUE))
745 rscreen->b.debug_flags |= DBG_NO_HYPERZ;
746
747 if (rscreen->b.family == CHIP_UNKNOWN) {
748 fprintf(stderr, "r600: Unknown chipset 0x%04X\n", rscreen->b.info.pci_id);
749 FREE(rscreen);
750 return NULL;
751 }
752
753 /* Figure out streamout kernel support. */
754 switch (rscreen->b.chip_class) {
755 case R600:
756 if (rscreen->b.family < CHIP_RS780) {
757 rscreen->b.has_streamout = rscreen->b.info.drm_minor >= 14;
758 } else {
759 rscreen->b.has_streamout = rscreen->b.info.drm_minor >= 23;
760 }
761 break;
762 case R700:
763 rscreen->b.has_streamout = rscreen->b.info.drm_minor >= 17;
764 break;
765 case EVERGREEN:
766 case CAYMAN:
767 rscreen->b.has_streamout = rscreen->b.info.drm_minor >= 14;
768 break;
769 default:
770 rscreen->b.has_streamout = FALSE;
771 break;
772 }
773
774 /* MSAA support. */
775 switch (rscreen->b.chip_class) {
776 case R600:
777 case R700:
778 rscreen->has_msaa = rscreen->b.info.drm_minor >= 22;
779 rscreen->has_compressed_msaa_texturing = false;
780 break;
781 case EVERGREEN:
782 rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
783 rscreen->has_compressed_msaa_texturing = rscreen->b.info.drm_minor >= 24;
784 break;
785 case CAYMAN:
786 rscreen->has_msaa = rscreen->b.info.drm_minor >= 19;
787 rscreen->has_compressed_msaa_texturing = true;
788 break;
789 default:
790 rscreen->has_msaa = FALSE;
791 rscreen->has_compressed_msaa_texturing = false;
792 }
793
794 rscreen->b.has_cp_dma = rscreen->b.info.drm_minor >= 27 &&
795 !(rscreen->b.debug_flags & DBG_NO_CP_DMA);
796
797 rscreen->b.barrier_flags.cp_to_L2 =
798 R600_CONTEXT_INV_VERTEX_CACHE |
799 R600_CONTEXT_INV_TEX_CACHE |
800 R600_CONTEXT_INV_CONST_CACHE;
801 rscreen->b.barrier_flags.compute_to_L2 = R600_CONTEXT_CS_PARTIAL_FLUSH | R600_CONTEXT_FLUSH_AND_INV;
802
803 rscreen->global_pool = compute_memory_pool_new(rscreen);
804
805 /* Create the auxiliary context. This must be done last. */
806 rscreen->b.aux_context = rscreen->b.b.context_create(&rscreen->b.b, NULL, 0);
807
808 rscreen->has_atomics = rscreen->b.info.drm_minor >= 44;
809 #if 0 /* This is for testing whether aux_context and buffer clearing work correctly. */
810 struct pipe_resource templ = {};
811
812 templ.width0 = 4;
813 templ.height0 = 2048;
814 templ.depth0 = 1;
815 templ.array_size = 1;
816 templ.target = PIPE_TEXTURE_2D;
817 templ.format = PIPE_FORMAT_R8G8B8A8_UNORM;
818 templ.usage = PIPE_USAGE_DEFAULT;
819
820 struct r600_resource *res = r600_resource(rscreen->screen.resource_create(&rscreen->screen, &templ));
821 unsigned char *map = ws->buffer_map(res->buf, NULL, PIPE_MAP_WRITE);
822
823 memset(map, 0, 256);
824
825 r600_screen_clear_buffer(rscreen, &res->b.b, 4, 4, 0xCC);
826 r600_screen_clear_buffer(rscreen, &res->b.b, 8, 4, 0xDD);
827 r600_screen_clear_buffer(rscreen, &res->b.b, 12, 4, 0xEE);
828 r600_screen_clear_buffer(rscreen, &res->b.b, 20, 4, 0xFF);
829 r600_screen_clear_buffer(rscreen, &res->b.b, 32, 20, 0x87);
830
831 ws->buffer_wait(res->buf, RADEON_USAGE_WRITE);
832
833 int i;
834 for (i = 0; i < 256; i++) {
835 printf("%02X", map[i]);
836 if (i % 16 == 15)
837 printf("\n");
838 }
839 #endif
840
841 if (rscreen->b.debug_flags & DBG_TEST_DMA)
842 r600_test_dma(&rscreen->b);
843
844 r600_query_fix_enabled_rb_mask(&rscreen->b);
845 return &rscreen->b.b;
846 }
847