1 /**************************************************************************
2  *
3  * Copyright 2003 VMware, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 
29 #include "main/glheader.h"
30 #include "main/context.h"
31 #include "main/extensions.h"
32 #include "main/fbobject.h"
33 #include "main/framebuffer.h"
34 #include "main/points.h"
35 #include "main/renderbuffer.h"
36 
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
39 #include "tnl/tnl.h"
40 #include "drivers/common/driverfuncs.h"
41 #include "drivers/common/meta.h"
42 
43 #include "intel_chipset.h"
44 #include "intel_buffers.h"
45 #include "intel_tex.h"
46 #include "intel_batchbuffer.h"
47 #include "intel_clear.h"
48 #include "intel_extensions.h"
49 #include "intel_pixel.h"
50 #include "intel_regions.h"
51 #include "intel_buffer_objects.h"
52 #include "intel_fbo.h"
53 #include "intel_bufmgr.h"
54 #include "intel_screen.h"
55 #include "intel_mipmap_tree.h"
56 
57 #include "utils.h"
58 #include "util/debug.h"
59 #include "util/ralloc.h"
60 #include "util/u_memory.h"
61 
62 int INTEL_DEBUG = (0);
63 
64 const char *const i915_vendor_string = "Intel Open Source Technology Center";
65 
66 const char *
i915_get_renderer_string(unsigned deviceID)67 i915_get_renderer_string(unsigned deviceID)
68 {
69    const char *chipset;
70    static char buffer[128];
71 
72    switch (deviceID) {
73 #undef CHIPSET
74 #define CHIPSET(id, symbol, str) case id: chipset = str; break;
75 #include "pci_ids/i830_pci_ids.h"
76 #include "pci_ids/i915_pci_ids.h"
77    default:
78       chipset = "Unknown Intel Chipset";
79       break;
80    }
81 
82    (void) driGetRendererString(buffer, chipset, 0);
83    return buffer;
84 }
85 
86 static const GLubyte *
intelGetString(struct gl_context * ctx,GLenum name)87 intelGetString(struct gl_context * ctx, GLenum name)
88 {
89    const struct intel_context *const intel = intel_context(ctx);
90 
91    switch (name) {
92    case GL_VENDOR:
93       return (GLubyte *) i915_vendor_string;
94 
95    case GL_RENDERER:
96       return
97          (GLubyte *) i915_get_renderer_string(intel->intelScreen->deviceID);
98 
99    default:
100       return NULL;
101    }
102 }
103 
104 #define flushFront(screen)      ((screen)->image.loader ? (screen)->image.loader->flushFrontBuffer : (screen)->dri2.loader->flushFrontBuffer)
105 
106 static void
intel_flush_front(struct gl_context * ctx)107 intel_flush_front(struct gl_context *ctx)
108 {
109    struct intel_context *intel = intel_context(ctx);
110     __DRIcontext *driContext = intel->driContext;
111     __DRIdrawable *driDrawable = driContext->driDrawablePriv;
112     __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
113 
114     if (intel->front_buffer_dirty && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
115       if (flushFront(screen) &&
116           driDrawable &&
117           driDrawable->loaderPrivate) {
118          flushFront(screen)(driDrawable, driDrawable->loaderPrivate);
119 
120 	 /* We set the dirty bit in intel_prepare_render() if we're
121 	  * front buffer rendering once we get there.
122 	  */
123 	 intel->front_buffer_dirty = false;
124       }
125    }
126 }
127 
128 static void
129 intel_update_image_buffers(struct intel_context *intel, __DRIdrawable *drawable);
130 
131 static unsigned
intel_bits_per_pixel(const struct intel_renderbuffer * rb)132 intel_bits_per_pixel(const struct intel_renderbuffer *rb)
133 {
134    return _mesa_get_format_bytes(intel_rb_format(rb)) * 8;
135 }
136 
137 static void
138 intel_query_dri2_buffers(struct intel_context *intel,
139 			 __DRIdrawable *drawable,
140 			 __DRIbuffer **buffers,
141 			 int *count);
142 
143 static void
144 intel_process_dri2_buffer(struct intel_context *intel,
145 			  __DRIdrawable *drawable,
146 			  __DRIbuffer *buffer,
147 			  struct intel_renderbuffer *rb,
148 			  const char *buffer_name);
149 
150 static void
intel_update_dri2_buffers(struct intel_context * intel,__DRIdrawable * drawable)151 intel_update_dri2_buffers(struct intel_context *intel, __DRIdrawable *drawable)
152 {
153    __DRIbuffer *buffers = NULL;
154    int i, count;
155    const char *region_name;
156    struct intel_renderbuffer *rb;
157    struct gl_framebuffer *fb = drawable->driverPrivate;
158 
159    intel_query_dri2_buffers(intel, drawable, &buffers, &count);
160 
161    if (buffers == NULL)
162       return;
163 
164    for (i = 0; i < count; i++) {
165       switch (buffers[i].attachment) {
166       case __DRI_BUFFER_FRONT_LEFT:
167          rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
168          region_name = "dri2 front buffer";
169          break;
170 
171       case __DRI_BUFFER_FAKE_FRONT_LEFT:
172          rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
173          region_name = "dri2 fake front buffer";
174          break;
175 
176       case __DRI_BUFFER_BACK_LEFT:
177          rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
178          region_name = "dri2 back buffer";
179          break;
180 
181       case __DRI_BUFFER_DEPTH:
182       case __DRI_BUFFER_HIZ:
183       case __DRI_BUFFER_DEPTH_STENCIL:
184       case __DRI_BUFFER_STENCIL:
185       case __DRI_BUFFER_ACCUM:
186       default:
187          fprintf(stderr,
188                  "unhandled buffer attach event, attachment type %d\n",
189                  buffers[i].attachment);
190          return;
191       }
192 
193       intel_process_dri2_buffer(intel, drawable, &buffers[i], rb, region_name);
194    }
195 }
196 
197 void
intel_update_renderbuffers(__DRIcontext * context,__DRIdrawable * drawable)198 intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
199 {
200    struct intel_context *intel = context->driverPrivate;
201    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
202 
203    /* Set this up front, so that in case our buffers get invalidated
204     * while we're getting new buffers, we don't clobber the stamp and
205     * thus ignore the invalidate. */
206    drawable->lastStamp = drawable->dri2.stamp;
207 
208    if (unlikely(INTEL_DEBUG & DEBUG_DRI))
209       fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
210 
211    if (screen->image.loader)
212       intel_update_image_buffers(intel, drawable);
213    else
214       intel_update_dri2_buffers(intel, drawable);
215 
216    driUpdateFramebufferSize(&intel->ctx, drawable);
217 }
218 
219 /**
220  * intel_prepare_render should be called anywhere that curent read/drawbuffer
221  * state is required.
222  */
223 void
intel_prepare_render(struct intel_context * intel)224 intel_prepare_render(struct intel_context *intel)
225 {
226    __DRIcontext *driContext = intel->driContext;
227    __DRIdrawable *drawable;
228 
229    drawable = driContext->driDrawablePriv;
230    if (drawable && drawable->dri2.stamp != driContext->dri2.draw_stamp) {
231       if (drawable->lastStamp != drawable->dri2.stamp)
232 	 intel_update_renderbuffers(driContext, drawable);
233       intel_draw_buffer(&intel->ctx);
234       driContext->dri2.draw_stamp = drawable->dri2.stamp;
235    }
236 
237    drawable = driContext->driReadablePriv;
238    if (drawable && drawable->dri2.stamp != driContext->dri2.read_stamp) {
239       if (drawable->lastStamp != drawable->dri2.stamp)
240 	 intel_update_renderbuffers(driContext, drawable);
241       driContext->dri2.read_stamp = drawable->dri2.stamp;
242    }
243 
244    /* If we're currently rendering to the front buffer, the rendering
245     * that will happen next will probably dirty the front buffer.  So
246     * mark it as dirty here.
247     */
248    if (_mesa_is_front_buffer_drawing(intel->ctx.DrawBuffer))
249       intel->front_buffer_dirty = true;
250 
251    /* Wait for the swapbuffers before the one we just emitted, so we
252     * don't get too many swaps outstanding for apps that are GPU-heavy
253     * but not CPU-heavy.
254     *
255     * We're using intelDRI2Flush (called from the loader before
256     * swapbuffer) and glFlush (for front buffer rendering) as the
257     * indicator that a frame is done and then throttle when we get
258     * here as we prepare to render the next frame.  At this point for
259     * round trips for swap/copy and getting new buffers are done and
260     * we'll spend less time waiting on the GPU.
261     *
262     * Unfortunately, we don't have a handle to the batch containing
263     * the swap, and getting our hands on that doesn't seem worth it,
264     * so we just us the first batch we emitted after the last swap.
265     */
266    if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
267       if (!intel->disable_throttling)
268          drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
269       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
270       intel->first_post_swapbuffers_batch = NULL;
271       intel->need_throttle = false;
272    }
273 }
274 
275 static void
intel_noninvalidate_viewport(struct gl_context * ctx)276 intel_noninvalidate_viewport(struct gl_context *ctx)
277 {
278     struct intel_context *intel = intel_context(ctx);
279     __DRIcontext *driContext = intel->driContext;
280 
281     intelCalcViewport(ctx);
282 
283     if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
284        dri2InvalidateDrawable(driContext->driDrawablePriv);
285        dri2InvalidateDrawable(driContext->driReadablePriv);
286     }
287 }
288 
289 static void
intel_viewport(struct gl_context * ctx)290 intel_viewport(struct gl_context *ctx)
291 {
292     intelCalcViewport(ctx);
293 }
294 
295 static const struct debug_control debug_control[] = {
296    { "tex",   DEBUG_TEXTURE},
297    { "state", DEBUG_STATE},
298    { "blit",  DEBUG_BLIT},
299    { "mip",   DEBUG_MIPTREE},
300    { "fall",  DEBUG_PERF},
301    { "perf",  DEBUG_PERF},
302    { "bat",   DEBUG_BATCH},
303    { "pix",   DEBUG_PIXEL},
304    { "buf",   DEBUG_BUFMGR},
305    { "reg",   DEBUG_REGION},
306    { "fbo",   DEBUG_FBO},
307    { "fs",    DEBUG_WM },
308    { "sync",  DEBUG_SYNC},
309    { "dri",   DEBUG_DRI },
310    { "stats", DEBUG_STATS },
311    { "wm",    DEBUG_WM },
312    { "aub",   DEBUG_AUB },
313    { NULL,    0 }
314 };
315 
316 
317 static void
intelInvalidateState(struct gl_context * ctx)318 intelInvalidateState(struct gl_context * ctx)
319 {
320    GLuint new_state = ctx->NewState;
321     struct intel_context *intel = intel_context(ctx);
322 
323     if (ctx->swrast_context)
324        _swrast_InvalidateState(ctx, new_state);
325 
326    intel->NewGLState |= new_state;
327 
328    if (new_state & (_NEW_SCISSOR | _NEW_BUFFERS | _NEW_VIEWPORT))
329       _mesa_update_draw_buffer_bounds(ctx, ctx->DrawBuffer);
330 
331    if (intel->vtbl.invalidate_state)
332       intel->vtbl.invalidate_state( intel, new_state );
333 }
334 
335 void
intel_flush_rendering_to_batch(struct gl_context * ctx)336 intel_flush_rendering_to_batch(struct gl_context *ctx)
337 {
338    struct intel_context *intel = intel_context(ctx);
339 
340    if (intel->Fallback)
341       _swrast_flush(ctx);
342 
343    INTEL_FIREVERTICES(intel);
344 }
345 
346 void
_intel_flush(struct gl_context * ctx,const char * file,int line)347 _intel_flush(struct gl_context *ctx, const char *file, int line)
348 {
349    struct intel_context *intel = intel_context(ctx);
350 
351    intel_flush_rendering_to_batch(ctx);
352 
353    if (intel->batch.used)
354       _intel_batchbuffer_flush(intel, file, line);
355 }
356 
357 static void
intel_glFlush(struct gl_context * ctx,unsigned gallium_flush_flags)358 intel_glFlush(struct gl_context *ctx, unsigned gallium_flush_flags)
359 {
360    struct intel_context *intel = intel_context(ctx);
361 
362    intel_flush(ctx);
363    intel_flush_front(ctx);
364    if (_mesa_is_front_buffer_drawing(ctx->DrawBuffer))
365       intel->need_throttle = true;
366 }
367 
368 void
intelFinish(struct gl_context * ctx)369 intelFinish(struct gl_context * ctx)
370 {
371    struct intel_context *intel = intel_context(ctx);
372 
373    intel_flush(ctx);
374    intel_flush_front(ctx);
375 
376    if (intel->batch.last_bo)
377       drm_intel_bo_wait_rendering(intel->batch.last_bo);
378 }
379 
380 void
intelInitDriverFunctions(struct dd_function_table * functions)381 intelInitDriverFunctions(struct dd_function_table *functions)
382 {
383    _mesa_init_driver_functions(functions);
384    _tnl_init_driver_draw_function(functions);
385 
386    functions->Flush = intel_glFlush;
387    functions->Finish = intelFinish;
388    functions->GetString = intelGetString;
389    functions->UpdateState = intelInvalidateState;
390 
391    intelInitTextureFuncs(functions);
392    intelInitTextureImageFuncs(functions);
393    intelInitTextureSubImageFuncs(functions);
394    intelInitTextureCopyImageFuncs(functions);
395    intelInitClearFuncs(functions);
396    intelInitBufferFuncs(functions);
397    intelInitPixelFuncs(functions);
398    intelInitBufferObjectFuncs(functions);
399    intel_init_syncobj_functions(functions);
400 }
401 
402 bool
intelInitContext(struct intel_context * intel,int api,unsigned major_version,unsigned minor_version,uint32_t flags,const struct gl_config * mesaVis,__DRIcontext * driContextPriv,void * sharedContextPrivate,struct dd_function_table * functions,unsigned * dri_ctx_error)403 intelInitContext(struct intel_context *intel,
404                  int api,
405                  unsigned major_version,
406                  unsigned minor_version,
407                  uint32_t flags,
408                  const struct gl_config * mesaVis,
409                  __DRIcontext * driContextPriv,
410                  void *sharedContextPrivate,
411                  struct dd_function_table *functions,
412                  unsigned *dri_ctx_error)
413 {
414    struct gl_context *ctx = &intel->ctx;
415    struct gl_context *shareCtx = (struct gl_context *) sharedContextPrivate;
416    __DRIscreen *sPriv = driContextPriv->driScreenPriv;
417    struct intel_screen *intelScreen = sPriv->driverPrivate;
418    int bo_reuse_mode;
419 
420    /* Can't rely on invalidate events, fall back to glViewport hack */
421    if (!driContextPriv->driScreenPriv->dri2.useInvalidate)
422       functions->Viewport = intel_noninvalidate_viewport;
423    else
424       functions->Viewport = intel_viewport;
425 
426    intel->intelScreen = intelScreen;
427 
428    if (!_mesa_initialize_context(&intel->ctx, api, mesaVis, shareCtx,
429                                  functions)) {
430       *dri_ctx_error = __DRI_CTX_ERROR_NO_MEMORY;
431       printf("%s: failed to init mesa context\n", __func__);
432       return false;
433    }
434 
435    driContextSetFlags(&intel->ctx, flags);
436 
437    driContextPriv->driverPrivate = intel;
438    intel->driContext = driContextPriv;
439 
440    intel->gen = intelScreen->gen;
441 
442    const int devID = intelScreen->deviceID;
443 
444    intel->is_945 = IS_945(devID);
445 
446    memset(&ctx->TextureFormatSupported,
447 	  0, sizeof(ctx->TextureFormatSupported));
448 
449    driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
450                        sPriv->myNum, "i915", NULL, NULL, NULL, 0, NULL, 0);
451    intel->maxBatchSize = 4096;
452 
453    /* Estimate the size of the mappable aperture into the GTT.  There's an
454     * ioctl to get the whole GTT size, but not one to get the mappable subset.
455     * It turns out it's basically always 256MB, though some ancient hardware
456     * was smaller.
457     */
458    uint32_t gtt_size = 256 * 1024 * 1024;
459    if (intel->gen == 2)
460       gtt_size = 128 * 1024 * 1024;
461 
462    /* We don't want to map two objects such that a memcpy between them would
463     * just fault one mapping in and then the other over and over forever.  So
464     * we would need to divide the GTT size by 2.  Additionally, some GTT is
465     * taken up by things like the framebuffer and the ringbuffer and such, so
466     * be more conservative.
467     */
468    intel->max_gtt_map_object_size = gtt_size / 4;
469 
470    intel->bufmgr = intelScreen->bufmgr;
471 
472    bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
473    switch (bo_reuse_mode) {
474    case DRI_CONF_BO_REUSE_DISABLED:
475       break;
476    case DRI_CONF_BO_REUSE_ALL:
477       intel_bufmgr_gem_enable_reuse(intel->bufmgr);
478       break;
479    }
480 
481    ctx->Const.MinLineWidth = 1.0;
482    ctx->Const.MinLineWidthAA = 1.0;
483    ctx->Const.MaxLineWidth = 7.0;
484    ctx->Const.MaxLineWidthAA = 7.0;
485    ctx->Const.LineWidthGranularity = 0.5;
486 
487    ctx->Const.MinPointSize = 1.0;
488    ctx->Const.MinPointSizeAA = 1.0;
489    ctx->Const.MaxPointSize = 255.0;
490    ctx->Const.MaxPointSizeAA = 3.0;
491    ctx->Const.PointSizeGranularity = 1.0;
492 
493    ctx->Const.StripTextureBorder = GL_TRUE;
494 
495    /* reinitialize the context point state.
496     * It depend on constants in __struct gl_contextRec::Const
497     */
498    _mesa_init_point(ctx);
499 
500    ctx->Const.MaxRenderbufferSize = 2048;
501 
502    _swrast_CreateContext(ctx);
503    _vbo_CreateContext(ctx, false);
504    if (ctx->swrast_context) {
505       _tnl_CreateContext(ctx);
506       _swsetup_CreateContext(ctx);
507 
508       /* Configure swrast to match hardware characteristics: */
509       _swrast_allow_pixel_fog(ctx, false);
510       _swrast_allow_vertex_fog(ctx, true);
511    }
512 
513    _mesa_meta_init(ctx);
514 
515    intel->hw_stipple = 1;
516 
517    intel->RenderIndex = ~0;
518 
519    intelInitExtensions(ctx);
520 
521    INTEL_DEBUG = parse_debug_string(getenv("INTEL_DEBUG"), debug_control);
522    if (INTEL_DEBUG & DEBUG_BUFMGR)
523       dri_bufmgr_set_debug(intel->bufmgr, true);
524    if (INTEL_DEBUG & DEBUG_PERF)
525       intel->perf_debug = true;
526 
527    if (INTEL_DEBUG & DEBUG_AUB)
528       drm_intel_bufmgr_gem_set_aub_dump(intel->bufmgr, true);
529 
530    intel_batchbuffer_init(intel);
531 
532    intel_fbo_init(intel);
533 
534    intel->prim.primitive = ~0;
535 
536    /* Force all software fallbacks */
537    if (getenv("INTEL_NO_RAST")) {
538       fprintf(stderr, "disabling 3D rasterization\n");
539       intel->no_rast = 1;
540    }
541 
542    if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
543       fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
544       intel->always_flush_batch = 1;
545    }
546 
547    if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
548       fprintf(stderr, "flushing GPU caches before/after each draw call\n");
549       intel->always_flush_cache = 1;
550    }
551 
552    if (driQueryOptionb(&intel->optionCache, "disable_throttling")) {
553       fprintf(stderr, "disabling flush throttling\n");
554       intel->disable_throttling = 1;
555    }
556 
557    return true;
558 }
559 
560 void
intelDestroyContext(__DRIcontext * driContextPriv)561 intelDestroyContext(__DRIcontext * driContextPriv)
562 {
563    struct intel_context *intel =
564       (struct intel_context *) driContextPriv->driverPrivate;
565    struct gl_context *ctx = &intel->ctx;
566 
567    assert(intel);               /* should never be null */
568    if (intel) {
569       INTEL_FIREVERTICES(intel);
570 
571       /* Dump a final BMP in case the application doesn't call SwapBuffers */
572       if (INTEL_DEBUG & DEBUG_AUB) {
573          intel_batchbuffer_flush(intel);
574 	 aub_dump_bmp(&intel->ctx);
575       }
576 
577       _mesa_meta_free(&intel->ctx);
578 
579       intel->vtbl.destroy(intel);
580 
581       if (ctx->swrast_context) {
582          _swsetup_DestroyContext(&intel->ctx);
583          _tnl_DestroyContext(&intel->ctx);
584       }
585       _vbo_DestroyContext(&intel->ctx);
586 
587       if (ctx->swrast_context)
588          _swrast_DestroyContext(&intel->ctx);
589       intel->Fallback = 0x0;      /* don't call _swrast_Flush later */
590 
591       intel_batchbuffer_free(intel);
592 
593       free(intel->prim.vb);
594       intel->prim.vb = NULL;
595       drm_intel_bo_unreference(intel->prim.vb_bo);
596       intel->prim.vb_bo = NULL;
597       drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
598       intel->first_post_swapbuffers_batch = NULL;
599 
600       driDestroyOptionCache(&intel->optionCache);
601 
602       /* free the Mesa context */
603       _mesa_free_context_data(&intel->ctx, true);
604 
605       align_free(intel);
606       driContextPriv->driverPrivate = NULL;
607    }
608 }
609 
610 GLboolean
intelUnbindContext(__DRIcontext * driContextPriv)611 intelUnbindContext(__DRIcontext * driContextPriv)
612 {
613    /* Unset current context and dispath table */
614    _mesa_make_current(NULL, NULL, NULL);
615 
616    return true;
617 }
618 
619 GLboolean
intelMakeCurrent(__DRIcontext * driContextPriv,__DRIdrawable * driDrawPriv,__DRIdrawable * driReadPriv)620 intelMakeCurrent(__DRIcontext * driContextPriv,
621                  __DRIdrawable * driDrawPriv,
622                  __DRIdrawable * driReadPriv)
623 {
624    struct intel_context *intel;
625 
626    if (driContextPriv)
627       intel = (struct intel_context *) driContextPriv->driverPrivate;
628    else
629       intel = NULL;
630 
631    if (driContextPriv) {
632       struct gl_context *ctx = &intel->ctx;
633       struct gl_framebuffer *fb, *readFb;
634 
635       if (driDrawPriv == NULL && driReadPriv == NULL) {
636 	 fb = _mesa_get_incomplete_framebuffer();
637 	 readFb = _mesa_get_incomplete_framebuffer();
638       } else {
639 	 fb = driDrawPriv->driverPrivate;
640 	 readFb = driReadPriv->driverPrivate;
641 	 driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
642 	 driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
643       }
644 
645       intel_prepare_render(intel);
646       _mesa_make_current(ctx, fb, readFb);
647 
648       /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
649        * is NULL at that point.  We can't call _mesa_makecurrent()
650        * first, since we need the buffer size for the initial
651        * viewport.  So just call intel_draw_buffer() again here. */
652       intel_draw_buffer(ctx);
653    }
654    else {
655       _mesa_make_current(NULL, NULL, NULL);
656    }
657 
658    return true;
659 }
660 
661 /**
662  * \brief Query DRI2 to obtain a DRIdrawable's buffers.
663  *
664  * To determine which DRI buffers to request, examine the renderbuffers
665  * attached to the drawable's framebuffer. Then request the buffers with
666  * DRI2GetBuffers() or DRI2GetBuffersWithFormat().
667  *
668  * This is called from intel_update_renderbuffers().
669  *
670  * \param drawable      Drawable whose buffers are queried.
671  * \param buffers       [out] List of buffers returned by DRI2 query.
672  * \param buffer_count  [out] Number of buffers returned.
673  *
674  * \see intel_update_renderbuffers()
675  * \see DRI2GetBuffers()
676  * \see DRI2GetBuffersWithFormat()
677  */
678 static void
intel_query_dri2_buffers(struct intel_context * intel,__DRIdrawable * drawable,__DRIbuffer ** buffers,int * buffer_count)679 intel_query_dri2_buffers(struct intel_context *intel,
680 			 __DRIdrawable *drawable,
681 			 __DRIbuffer **buffers,
682 			 int *buffer_count)
683 {
684    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
685    struct gl_framebuffer *fb = drawable->driverPrivate;
686    int i = 0;
687    unsigned attachments[__DRI_BUFFER_COUNT];
688 
689    struct intel_renderbuffer *front_rb;
690    struct intel_renderbuffer *back_rb;
691 
692    front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
693    back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
694 
695    memset(attachments, 0, sizeof(attachments));
696    if ((_mesa_is_front_buffer_drawing(fb) ||
697         _mesa_is_front_buffer_reading(fb) ||
698 	!back_rb) && front_rb) {
699       /* If a fake front buffer is in use, then querying for
700        * __DRI_BUFFER_FRONT_LEFT will cause the server to copy the image from
701        * the real front buffer to the fake front buffer.  So before doing the
702        * query, we need to make sure all the pending drawing has landed in the
703        * real front buffer.
704        */
705       intel_flush(&intel->ctx);
706       intel_flush_front(&intel->ctx);
707 
708       attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
709       attachments[i++] = intel_bits_per_pixel(front_rb);
710    } else if (front_rb && intel->front_buffer_dirty) {
711       /* We have pending front buffer rendering, but we aren't querying for a
712        * front buffer.  If the front buffer we have is a fake front buffer,
713        * the X server is going to throw it away when it processes the query.
714        * So before doing the query, make sure all the pending drawing has
715        * landed in the real front buffer.
716        */
717       intel_flush(&intel->ctx);
718       intel_flush_front(&intel->ctx);
719    }
720 
721    if (back_rb) {
722       attachments[i++] = __DRI_BUFFER_BACK_LEFT;
723       attachments[i++] = intel_bits_per_pixel(back_rb);
724    }
725 
726    assert(i <= ARRAY_SIZE(attachments));
727 
728    *buffers = screen->dri2.loader->getBuffersWithFormat(drawable,
729 							&drawable->w,
730 							&drawable->h,
731 							attachments, i / 2,
732 							buffer_count,
733 							drawable->loaderPrivate);
734 }
735 
736 /**
737  * \brief Assign a DRI buffer's DRM region to a renderbuffer.
738  *
739  * This is called from intel_update_renderbuffers().
740  *
741  * \par Note:
742  *    DRI buffers whose attachment point is DRI2BufferStencil or
743  *    DRI2BufferDepthStencil are handled as special cases.
744  *
745  * \param buffer_name is a human readable name, such as "dri2 front buffer",
746  *        that is passed to intel_region_alloc_for_handle().
747  *
748  * \see intel_update_renderbuffers()
749  * \see intel_region_alloc_for_handle()
750  */
751 static void
intel_process_dri2_buffer(struct intel_context * intel,__DRIdrawable * drawable,__DRIbuffer * buffer,struct intel_renderbuffer * rb,const char * buffer_name)752 intel_process_dri2_buffer(struct intel_context *intel,
753 			  __DRIdrawable *drawable,
754 			  __DRIbuffer *buffer,
755 			  struct intel_renderbuffer *rb,
756 			  const char *buffer_name)
757 {
758    struct intel_region *region = NULL;
759 
760    if (!rb)
761       return;
762 
763    /* We try to avoid closing and reopening the same BO name, because the first
764     * use of a mapping of the buffer involves a bunch of page faulting which is
765     * moderately expensive.
766     */
767    if (rb->mt &&
768        rb->mt->region &&
769        rb->mt->region->name == buffer->name)
770       return;
771 
772    if (unlikely(INTEL_DEBUG & DEBUG_DRI)) {
773       fprintf(stderr,
774 	      "attaching buffer %d, at %d, cpp %d, pitch %d\n",
775 	      buffer->name, buffer->attachment,
776 	      buffer->cpp, buffer->pitch);
777    }
778 
779    intel_miptree_release(&rb->mt);
780    region = intel_region_alloc_for_handle(intel->intelScreen,
781                                           buffer->cpp,
782                                           drawable->w,
783                                           drawable->h,
784                                           buffer->pitch,
785                                           buffer->name,
786                                           buffer_name);
787    if (!region)
788       return;
789 
790    rb->mt = intel_miptree_create_for_dri2_buffer(intel,
791                                                  buffer->attachment,
792                                                  intel_rb_format(rb),
793                                                  region);
794    intel_region_release(&region);
795 }
796 
797 /**
798  * \brief Query DRI Image loader to obtain a DRIdrawable's buffers.
799  *
800  * To determine which DRI buffers to request, examine the renderbuffers
801  * attached to the drawable's framebuffer. Then request the buffers with
802  * dri3
803  *
804  * This is called from intel_update_renderbuffers().
805  *
806  * \param drawable      Drawable whose buffers are queried.
807  * \param buffers       [out] List of buffers returned by DRI2 query.
808  * \param buffer_count  [out] Number of buffers returned.
809  *
810  * \see intel_update_renderbuffers()
811  */
812 
813 static void
intel_update_image_buffer(struct intel_context * intel,__DRIdrawable * drawable,struct intel_renderbuffer * rb,__DRIimage * buffer,enum __DRIimageBufferMask buffer_type)814 intel_update_image_buffer(struct intel_context *intel,
815                           __DRIdrawable *drawable,
816                           struct intel_renderbuffer *rb,
817                           __DRIimage *buffer,
818                           enum __DRIimageBufferMask buffer_type)
819 {
820    struct intel_region *region = buffer->region;
821 
822    if (!rb || !region)
823       return;
824 
825    unsigned num_samples = rb->Base.Base.NumSamples;
826 
827    if (rb->mt &&
828        rb->mt->region &&
829        rb->mt->region == region)
830       return;
831 
832    intel_miptree_release(&rb->mt);
833    rb->mt = intel_miptree_create_for_image_buffer(intel,
834                                                   buffer_type,
835                                                   intel_rb_format(rb),
836                                                   num_samples,
837                                                   region);
838 }
839 
840 
841 static void
intel_update_image_buffers(struct intel_context * intel,__DRIdrawable * drawable)842 intel_update_image_buffers(struct intel_context *intel, __DRIdrawable *drawable)
843 {
844    struct gl_framebuffer *fb = drawable->driverPrivate;
845    __DRIscreen *screen = intel->intelScreen->driScrnPriv;
846    struct intel_renderbuffer *front_rb;
847    struct intel_renderbuffer *back_rb;
848    struct __DRIimageList images;
849    unsigned int format;
850    uint32_t buffer_mask = 0;
851    int ret;
852 
853    front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
854    back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
855 
856    if (back_rb)
857       format = intel_rb_format(back_rb);
858    else if (front_rb)
859       format = intel_rb_format(front_rb);
860    else
861       return;
862 
863    if (front_rb && (_mesa_is_front_buffer_drawing(fb) ||
864                     _mesa_is_front_buffer_reading(fb) || !back_rb)) {
865       buffer_mask |= __DRI_IMAGE_BUFFER_FRONT;
866    }
867 
868    if (back_rb)
869       buffer_mask |= __DRI_IMAGE_BUFFER_BACK;
870 
871    ret = screen->image.loader->getBuffers(drawable,
872                                           driGLFormatToImageFormat(format),
873                                           &drawable->dri2.stamp,
874                                           drawable->loaderPrivate,
875                                           buffer_mask,
876                                           &images);
877    if (!ret)
878       return;
879 
880    if (images.image_mask & __DRI_IMAGE_BUFFER_FRONT) {
881       drawable->w = images.front->width;
882       drawable->h = images.front->height;
883       intel_update_image_buffer(intel,
884                                 drawable,
885                                 front_rb,
886                                 images.front,
887                                 __DRI_IMAGE_BUFFER_FRONT);
888    }
889    if (images.image_mask & __DRI_IMAGE_BUFFER_BACK) {
890       drawable->w = images.back->width;
891       drawable->h = images.back->height;
892       intel_update_image_buffer(intel,
893                                 drawable,
894                                 back_rb,
895                                 images.back,
896                                 __DRI_IMAGE_BUFFER_BACK);
897    }
898 }
899