1 /**************************************************************************
2 
3 Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4 
5 The Weather Channel (TM) funded Tungsten Graphics to develop the
6 initial release of the Radeon 8500 driver under the XFree86 license.
7 This notice must be preserved.
8 
9 Permission is hereby granted, free of charge, to any person obtaining
10 a copy of this software and associated documentation files (the
11 "Software"), to deal in the Software without restriction, including
12 without limitation the rights to use, copy, modify, merge, publish,
13 distribute, sublicense, and/or sell copies of the Software, and to
14 permit persons to whom the Software is furnished to do so, subject to
15 the following conditions:
16 
17 The above copyright notice and this permission notice (including the
18 next paragraph) shall be included in all copies or substantial
19 portions of the Software.
20 
21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 
29 **************************************************************************/
30 
31 /*
32  * Authors:
33  *   Keith Whitwell <keithw@vmware.com>
34  */
35 
36 /*
37    - Scissor implementation
38    - buffer swap/copy ioctls
39    - finish/flush
40    - state emission
41    - cmdbuffer management
42 */
43 
44 #include <errno.h>
45 #include "main/glheader.h"
46 #include "main/context.h"
47 #include "main/enums.h"
48 #include "main/fbobject.h"
49 #include "main/framebuffer.h"
50 #include "main/renderbuffer.h"
51 #include "drivers/common/meta.h"
52 
53 #include "radeon_common.h"
54 #include "radeon_drm.h"
55 #include "radeon_queryobj.h"
56 
57 /**
58  * Enable verbose debug output for emit code.
59  * 0 no output
60  * 1 most output
61  * 2 also print state alues
62  */
63 #define RADEON_CMDBUF         0
64 
65 /* =============================================================
66  * Scissoring
67  */
68 
69 /**
70  * Update cliprects and scissors.
71  */
radeonSetCliprects(radeonContextPtr radeon)72 void radeonSetCliprects(radeonContextPtr radeon)
73 {
74 	__DRIdrawable *const drawable = radeon_get_drawable(radeon);
75 	__DRIdrawable *const readable = radeon_get_readable(radeon);
76 
77 	if(drawable == NULL && readable == NULL)
78 		return;
79 
80 	struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
81 	struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
82 
83 	if ((draw_rfb->base.Width != drawable->w) ||
84 	    (draw_rfb->base.Height != drawable->h)) {
85 		_mesa_resize_framebuffer(&radeon->glCtx, &draw_rfb->base,
86 					 drawable->w, drawable->h);
87 	}
88 
89 	if (drawable != readable) {
90 		if ((read_rfb->base.Width != readable->w) ||
91 		    (read_rfb->base.Height != readable->h)) {
92 			_mesa_resize_framebuffer(&radeon->glCtx, &read_rfb->base,
93 						 readable->w, readable->h);
94 		}
95 	}
96 
97 	if (radeon->state.scissor.enabled)
98 		radeonUpdateScissor(&radeon->glCtx);
99 
100 }
101 
102 
103 
radeonUpdateScissor(struct gl_context * ctx)104 void radeonUpdateScissor( struct gl_context *ctx )
105 {
106 	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
107 	GLint x = ctx->Scissor.ScissorArray[0].X, y = ctx->Scissor.ScissorArray[0].Y;
108 	GLsizei w = ctx->Scissor.ScissorArray[0].Width, h = ctx->Scissor.ScissorArray[0].Height;
109 	int x1, y1, x2, y2;
110 	int min_x, min_y, max_x, max_y;
111 
112 	if (!ctx->DrawBuffer)
113 	    return;
114 	min_x = min_y = 0;
115 	max_x = ctx->DrawBuffer->Width - 1;
116 	max_y = ctx->DrawBuffer->Height - 1;
117 
118 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
119 		x1 = x;
120 		y1 = ctx->DrawBuffer->Height - (y + h);
121 		x2 = x + w - 1;
122 		y2 = y1 + h - 1;
123 	} else {
124 		x1 = x;
125 		y1 = y;
126 		x2 = x + w - 1;
127 		y2 = y + h - 1;
128 
129 	}
130 
131 	rmesa->state.scissor.rect.x1 = CLAMP(x1,  min_x, max_x);
132 	rmesa->state.scissor.rect.y1 = CLAMP(y1,  min_y, max_y);
133 	rmesa->state.scissor.rect.x2 = CLAMP(x2,  min_x, max_x);
134 	rmesa->state.scissor.rect.y2 = CLAMP(y2,  min_y, max_y);
135 
136 	if (rmesa->vtbl.update_scissor)
137 	   rmesa->vtbl.update_scissor(ctx);
138 }
139 
140 /* =============================================================
141  * Scissoring
142  */
143 
radeonScissor(struct gl_context * ctx)144 void radeonScissor(struct gl_context *ctx)
145 {
146 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
147 	if (ctx->Scissor.EnableFlags) {
148 		/* We don't pipeline cliprect changes */
149 		radeon_firevertices(radeon);
150 		radeonUpdateScissor(ctx);
151 	}
152 }
153 
154 /* ================================================================
155  * SwapBuffers with client-side throttling
156  */
157 
radeonGetAge(radeonContextPtr radeon)158 uint32_t radeonGetAge(radeonContextPtr radeon)
159 {
160 	drm_radeon_getparam_t gp;
161 	int ret;
162 	uint32_t age;
163 
164 	gp.param = RADEON_PARAM_LAST_CLEAR;
165 	gp.value = (int *)&age;
166 	ret = drmCommandWriteRead(radeon->radeonScreen->driScreen->fd, DRM_RADEON_GETPARAM,
167 				  &gp, sizeof(gp));
168 	if (ret) {
169 		fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
170 			ret);
171 		exit(1);
172 	}
173 
174 	return age;
175 }
176 
radeon_draw_buffer(struct gl_context * ctx,struct gl_framebuffer * fb)177 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
178 {
179 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
180 	struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
181 		*rrbColor = NULL;
182 	uint32_t offset = 0;
183 
184 
185 	if (!fb) {
186 		/* this can happen during the initial context initialization */
187 		return;
188 	}
189 
190 	/* radeons only handle 1 color draw so far */
191 	if (fb->_NumColorDrawBuffers != 1) {
192 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
193 		return;
194 	}
195 
196 	/* Do this here, note core Mesa, since this function is called from
197 	 * many places within the driver.
198 	 */
199 	if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
200 		/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
201 		_mesa_update_framebuffer(ctx, ctx->ReadBuffer, ctx->DrawBuffer);
202 		/* this updates the DrawBuffer's Width/Height if it's a FBO */
203 		_mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
204 	}
205 
206 	if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
207 		/* this may occur when we're called by glBindFrameBuffer() during
208 		 * the process of someone setting up renderbuffers, etc.
209 		 */
210 		/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
211 		return;
212 	}
213 
214 	if (fb->Name) {
215 		;/* do something depthy/stencily TODO */
216         }
217 
218 		/* none */
219 	if (fb->Name == 0) {
220 		if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
221 			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
222 			radeon->front_cliprects = GL_TRUE;
223 		} else {
224 			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
225 			radeon->front_cliprects = GL_FALSE;
226 		}
227 	} else {
228 		/* user FBO in theory */
229 		struct radeon_renderbuffer *rrb;
230 		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
231 		if (rrb) {
232 			offset = rrb->draw_offset;
233 			rrbColor = rrb;
234 		}
235 	}
236 
237 	if (rrbColor == NULL)
238 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
239 	else
240 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
241 
242 
243 	if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
244 		rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
245 		if (rrbDepth && rrbDepth->bo) {
246 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
247 		} else {
248 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
249 		}
250 	} else {
251 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
252 		rrbDepth = NULL;
253 	}
254 
255 	if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
256 		rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
257 		if (rrbStencil && rrbStencil->bo) {
258 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
259 			/* need to re-compute stencil hw state */
260 			if (!rrbDepth)
261 				rrbDepth = rrbStencil;
262 		} else {
263 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
264 		}
265 	} else {
266 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
267 		if (ctx->Driver.Enable != NULL)
268 			ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
269 		else
270 			ctx->NewState |= _NEW_STENCIL;
271 	}
272 
273 	/* Update culling direction which changes depending on the
274 	 * orientation of the buffer:
275 	 */
276 	if (ctx->Driver.FrontFace)
277 		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
278 	else
279 		ctx->NewState |= _NEW_POLYGON;
280 
281 	/*
282 	 * Update depth test state
283 	 */
284 	if (ctx->Driver.Enable) {
285 		ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
286 				   (ctx->Depth.Test && fb->Visual.depthBits > 0));
287 		ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
288 				   (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
289 	} else {
290 		ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
291 	}
292 
293 	_mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
294 	_mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
295 	radeon->state.color.draw_offset = offset;
296 
297 	ctx->NewState |= _NEW_VIEWPORT;
298 
299 	/* Set state we know depends on drawable parameters:
300 	 */
301 	radeonUpdateScissor(ctx);
302 	radeon->NewGLState |= _NEW_SCISSOR;
303 
304 	if (ctx->Driver.DepthRange)
305 		ctx->Driver.DepthRange(ctx);
306 
307 	/* Update culling direction which changes depending on the
308 	 * orientation of the buffer:
309 	 */
310 	if (ctx->Driver.FrontFace)
311 		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
312 	else
313 		ctx->NewState |= _NEW_POLYGON;
314 }
315 
316 /**
317  * Called via glDrawBuffer.
318  */
radeonDrawBuffer(struct gl_context * ctx)319 void radeonDrawBuffer(struct gl_context *ctx)
320 {
321 	if (RADEON_DEBUG & RADEON_DRI)
322 		fprintf(stderr, "%s\n", __func__);
323 
324 	if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
325 		radeonContextPtr radeon = RADEON_CONTEXT(ctx);
326 
327 		/* If we might be front-buffer rendering on this buffer for
328 		 * the first time, invalidate our DRI drawable so we'll ask
329 		 * for new buffers (including the fake front) before we start
330 		 * rendering again.
331 		 */
332 		radeon_update_renderbuffers(radeon->driContext,
333 					    radeon->driContext->driDrawablePriv,
334 					    GL_FALSE);
335 	}
336 
337 	radeon_draw_buffer(ctx, ctx->DrawBuffer);
338 }
339 
radeonReadBuffer(struct gl_context * ctx,GLenum mode)340 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
341 {
342 	if (_mesa_is_front_buffer_reading(ctx->ReadBuffer)) {
343 		struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
344 		radeon_update_renderbuffers(rmesa->driContext,
345 					    rmesa->driContext->driReadablePriv, GL_FALSE);
346 	}
347 	/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
348 	if (ctx->ReadBuffer == ctx->DrawBuffer) {
349 		/* This will update FBO completeness status.
350 		 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
351 		 * refers to a missing renderbuffer.  Calling glReadBuffer can set
352 		 * that straight and can make the drawing buffer complete.
353 		 */
354 		radeon_draw_buffer(ctx, ctx->DrawBuffer);
355 	}
356 }
357 
radeon_window_moved(radeonContextPtr radeon)358 void radeon_window_moved(radeonContextPtr radeon)
359 {
360 	/* Cliprects has to be updated before doing anything else */
361 	radeonSetCliprects(radeon);
362 }
363 
radeon_viewport(struct gl_context * ctx)364 void radeon_viewport(struct gl_context *ctx)
365 {
366 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
367 	__DRIcontext *driContext = radeon->driContext;
368 	void (*old_viewport)(struct gl_context *ctx);
369 
370 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
371 		if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer)) {
372 			ctx->Driver.Flush(ctx, 0);
373 		}
374 		radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
375 		if (driContext->driDrawablePriv != driContext->driReadablePriv)
376 			radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
377 	}
378 
379 	old_viewport = ctx->Driver.Viewport;
380 	ctx->Driver.Viewport = NULL;
381 	radeon_window_moved(radeon);
382 	radeon_draw_buffer(ctx, radeon->glCtx.DrawBuffer);
383 	ctx->Driver.Viewport = old_viewport;
384 }
385 
radeon_print_state_atom(radeonContextPtr radeon,struct radeon_state_atom * state)386 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
387 {
388 	int i, j, reg, count;
389 	int dwords;
390 	uint32_t packet0;
391 	if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
392 		return;
393 
394 	dwords = state->check(&radeon->glCtx, state);
395 
396 	fprintf(stderr, "  emit %s %d/%d\n", state->name, dwords, state->cmd_size);
397 
398 	if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
399 		if (dwords > state->cmd_size)
400 			dwords = state->cmd_size;
401 		for (i = 0; i < dwords;) {
402 			packet0 = state->cmd[i];
403 			reg = (packet0 & 0x1FFF) << 2;
404 			count = ((packet0 & 0x3FFF0000) >> 16) + 1;
405 			fprintf(stderr, "      %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
406 					state->name, i, reg, count);
407 			++i;
408 			for (j = 0; j < count && i < dwords; j++) {
409 				fprintf(stderr, "      %s[%d]: 0x%04x = %08x\n",
410 						state->name, i, reg, state->cmd[i]);
411 				reg += 4;
412 				++i;
413 			}
414 		}
415 	}
416 }
417 
418 /**
419  * Count total size for next state emit.
420  **/
radeonCountStateEmitSize(radeonContextPtr radeon)421 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
422 {
423 	struct radeon_state_atom *atom;
424 	GLuint dwords = 0;
425 	/* check if we are going to emit full state */
426 
427 	if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
428 		if (!radeon->hw.is_dirty)
429 			goto out;
430 		foreach(atom, &radeon->hw.atomlist) {
431 			if (atom->dirty) {
432 				const GLuint atom_size = atom->check(&radeon->glCtx, atom);
433 				dwords += atom_size;
434 				if (RADEON_CMDBUF && atom_size) {
435 					radeon_print_state_atom(radeon, atom);
436 				}
437 			}
438 		}
439 	} else {
440 		foreach(atom, &radeon->hw.atomlist) {
441 			const GLuint atom_size = atom->check(&radeon->glCtx, atom);
442 			dwords += atom_size;
443 			if (RADEON_CMDBUF && atom_size) {
444 				radeon_print_state_atom(radeon, atom);
445 			}
446 
447 		}
448 	}
449 out:
450 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
451 	return dwords;
452 }
453 
radeon_emit_atom(radeonContextPtr radeon,struct radeon_state_atom * atom)454 static inline void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
455 {
456 	BATCH_LOCALS(radeon);
457 	int dwords;
458 
459 	dwords = atom->check(&radeon->glCtx, atom);
460 	if (dwords) {
461 
462 		radeon_print_state_atom(radeon, atom);
463 
464 		if (atom->emit) {
465 			atom->emit(&radeon->glCtx, atom);
466 		} else {
467 			BEGIN_BATCH(dwords);
468 			OUT_BATCH_TABLE(atom->cmd, dwords);
469 			END_BATCH();
470 		}
471 		atom->dirty = GL_FALSE;
472 
473 	} else {
474 		radeon_print(RADEON_STATE, RADEON_VERBOSE, "  skip state %s\n", atom->name);
475 	}
476 
477 }
478 
radeonEmitAtoms(radeonContextPtr radeon,GLboolean emitAll)479 static inline void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
480 {
481 	struct radeon_state_atom *atom;
482 
483 	/* Emit actual atoms */
484 	if (radeon->hw.all_dirty || emitAll) {
485 		foreach(atom, &radeon->hw.atomlist)
486 			radeon_emit_atom( radeon, atom );
487 	} else {
488 		foreach(atom, &radeon->hw.atomlist) {
489 			if ( atom->dirty )
490 				radeon_emit_atom( radeon, atom );
491 		}
492 	}
493 
494 	COMMIT_BATCH();
495 }
496 
radeonEmitState(radeonContextPtr radeon)497 void radeonEmitState(radeonContextPtr radeon)
498 {
499 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
500 
501 	if (radeon->vtbl.pre_emit_state)
502 		radeon->vtbl.pre_emit_state(radeon);
503 
504 	/* this code used to return here but now it emits zbs */
505 	if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
506 		return;
507 
508 	if (!radeon->cmdbuf.cs->cdw) {
509 		if (RADEON_DEBUG & RADEON_STATE)
510 			fprintf(stderr, "Begin reemit state\n");
511 
512 		radeonEmitAtoms(radeon, GL_TRUE);
513 	} else {
514 
515 		if (RADEON_DEBUG & RADEON_STATE)
516 			fprintf(stderr, "Begin dirty state\n");
517 
518 		radeonEmitAtoms(radeon, GL_FALSE);
519 	}
520 
521 	radeon->hw.is_dirty = GL_FALSE;
522 	radeon->hw.all_dirty = GL_FALSE;
523 }
524 
525 
radeonFlush(struct gl_context * ctx,unsigned gallium_flush_flags)526 void radeonFlush(struct gl_context *ctx, unsigned gallium_flush_flags)
527 {
528 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
529 	if (RADEON_DEBUG & RADEON_IOCTL)
530 		fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
531 
532 	/* okay if we have no cmds in the buffer &&
533 	   we have no DMA flush &&
534 	   we have no DMA buffer allocated.
535 	   then no point flushing anything at all.
536 	*/
537 	if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
538 		goto flush_front;
539 
540 	if (radeon->dma.flush)
541 		radeon->dma.flush( ctx );
542 
543 	if (radeon->cmdbuf.cs->cdw)
544 		rcommonFlushCmdBuf(radeon, __func__);
545 
546 flush_front:
547 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
548 		__DRIscreen *const screen = radeon->radeonScreen->driScreen;
549 
550 		if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
551 			&& (screen->dri2.loader->flushFrontBuffer != NULL)) {
552 			__DRIdrawable * drawable = radeon_get_drawable(radeon);
553 
554 			/* We set the dirty bit in radeon_prepare_render() if we're
555 			 * front buffer rendering once we get there.
556 			 */
557 			radeon->front_buffer_dirty = GL_FALSE;
558 
559 			screen->dri2.loader->flushFrontBuffer(drawable, drawable->loaderPrivate);
560 		}
561 	}
562 }
563 
564 /* Make sure all commands have been sent to the hardware and have
565  * completed processing.
566  */
radeonFinish(struct gl_context * ctx)567 void radeonFinish(struct gl_context * ctx)
568 {
569 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
570 	struct gl_framebuffer *fb = ctx->DrawBuffer;
571 	struct radeon_renderbuffer *rrb;
572 	int i;
573 
574 	if (ctx->Driver.Flush)
575 		ctx->Driver.Flush(ctx, 0); /* +r6/r7 */
576 
577 	for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
578 		struct radeon_renderbuffer *rrb;
579 		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
580 		if (rrb && rrb->bo)
581 			radeon_bo_wait(rrb->bo);
582 	}
583 	rrb = radeon_get_depthbuffer(radeon);
584 	if (rrb && rrb->bo)
585 		radeon_bo_wait(rrb->bo);
586 }
587 
588 /* cmdbuffer */
589 /**
590  * Send the current command buffer via ioctl to the hardware.
591  */
rcommonFlushCmdBufLocked(radeonContextPtr rmesa,const char * caller)592 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
593 {
594 	int ret = 0;
595 
596 	if (rmesa->cmdbuf.flushing) {
597 		fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
598 		exit(-1);
599 	}
600 	rmesa->cmdbuf.flushing = 1;
601 
602 	if (RADEON_DEBUG & RADEON_IOCTL) {
603 		fprintf(stderr, "%s from %s\n", __func__, caller);
604 	}
605 
606 	radeonEmitQueryEnd(&rmesa->glCtx);
607 
608 	if (rmesa->cmdbuf.cs->cdw) {
609 		ret = radeon_cs_emit(rmesa->cmdbuf.cs);
610 		rmesa->hw.all_dirty = GL_TRUE;
611 	}
612 	radeon_cs_erase(rmesa->cmdbuf.cs);
613 	rmesa->cmdbuf.flushing = 0;
614 
615 	if (!rmesa->vtbl.revalidate_all_buffers(&rmesa->glCtx))
616 		fprintf(stderr,"failed to revalidate buffers\n");
617 
618 	return ret;
619 }
620 
rcommonFlushCmdBuf(radeonContextPtr rmesa,const char * caller)621 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
622 {
623 	int ret;
624 
625 	radeonReleaseDmaRegions(rmesa);
626 
627 	ret = rcommonFlushCmdBufLocked(rmesa, caller);
628 
629 	if (ret) {
630 		fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
631 				"parse or rejected command stream. See dmesg "
632 				"for more info.\n", ret);
633 		exit(ret);
634 	}
635 
636 	return ret;
637 }
638 
639 /**
640  * Make sure that enough space is available in the command buffer
641  * by flushing if necessary.
642  *
643  * \param dwords The number of dwords we need to be free on the command buffer
644  */
rcommonEnsureCmdBufSpace(radeonContextPtr rmesa,int dwords,const char * caller)645 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
646 {
647    if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
648 	 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
649       /* If we try to flush empty buffer there is too big rendering operation. */
650       assert(rmesa->cmdbuf.cs->cdw);
651       rcommonFlushCmdBuf(rmesa, caller);
652       return GL_TRUE;
653    }
654    return GL_FALSE;
655 }
656 
rcommonInitCmdBuf(radeonContextPtr rmesa)657 void rcommonInitCmdBuf(radeonContextPtr rmesa)
658 {
659 	GLuint size;
660 	struct drm_radeon_gem_info mminfo = { 0 };
661 	int fd = rmesa->radeonScreen->driScreen->fd;
662 
663 	/* Initialize command buffer */
664 	size = 256 * driQueryOptioni(&rmesa->optionCache,
665 				     "command_buffer_size");
666 	if (size < 2 * rmesa->hw.max_state_size) {
667 		size = 2 * rmesa->hw.max_state_size + 65535;
668 	}
669 	if (size > 64 * 256)
670 		size = 64 * 256;
671 
672 	radeon_print(RADEON_CS, RADEON_VERBOSE,
673 			"sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
674 	radeon_print(RADEON_CS, RADEON_VERBOSE,
675 			"sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
676 	radeon_print(RADEON_CS, RADEON_VERBOSE,
677 			"Allocating %d bytes command buffer (max state is %d bytes)\n",
678 			size * 4, rmesa->hw.max_state_size * 4);
679 
680 	rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
681 	if (rmesa->cmdbuf.csm == NULL) {
682 		/* FIXME: fatal error */
683 		return;
684 	}
685 	rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
686 	assert(rmesa->cmdbuf.cs != NULL);
687 	rmesa->cmdbuf.size = size;
688 
689 	radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
690 				  (void (*)(void *))rmesa->glCtx.Driver.Flush, &rmesa->glCtx);
691 
692 
693 	if (!drmCommandWriteRead(fd, DRM_RADEON_GEM_INFO,
694 				 &mminfo, sizeof(mminfo))) {
695 		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
696 				    mminfo.vram_visible);
697 		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
698 				    mminfo.gart_size);
699 	}
700 }
701 
702 /**
703  * Destroy the command buffer
704  */
rcommonDestroyCmdBuf(radeonContextPtr rmesa)705 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
706 {
707 	radeon_cs_destroy(rmesa->cmdbuf.cs);
708 	radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
709 }
710 
rcommonBeginBatch(radeonContextPtr rmesa,int n,const char * file,const char * function,int line)711 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
712 		       const char *file,
713 		       const char *function,
714 		       int line)
715 {
716 	radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
717 
718     radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
719                         n, rmesa->cmdbuf.cs->cdw, function, line);
720 
721 }
722 
radeonUserClear(struct gl_context * ctx,GLuint mask)723 void radeonUserClear(struct gl_context *ctx, GLuint mask)
724 {
725    _mesa_meta_Clear(ctx, mask);
726 }
727